From 236dfb505d59928a5fe13f7cb01fe04edbbd8c24 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 29 Oct 2014 08:21:03 +0000 Subject: [PATCH 1/8] [Laplace] sum now around argument, instead of call to array --- GPy/inference/latent_function_inference/laplace.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index 3b0ead7f..01d2f997 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -152,7 +152,7 @@ class Laplace(LatentFunctionInference): Ki_W_i = K - C.T.dot(C) #compute the log marginal - log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata).sum() - np.sum(np.log(np.diag(L))) + log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - np.sum(np.log(np.diag(L))) # Compute matrices for derivatives dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat From aa50f1f303de2317692bbf1bba3a493cd782cefc Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 29 Oct 2014 08:21:37 +0000 Subject: [PATCH 2/8] [latent plotting] some adjustments for nicer looking plots --- GPy/plotting/matplot_dep/dim_reduction_plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/plotting/matplot_dep/dim_reduction_plots.py b/GPy/plotting/matplot_dep/dim_reduction_plots.py index b4fed8fd..7ec0b59d 100644 --- a/GPy/plotting/matplot_dep/dim_reduction_plots.py +++ b/GPy/plotting/matplot_dep/dim_reduction_plots.py @@ -162,7 +162,7 @@ def plot_latent(model, labels=None, which_indices=None, else: x = X[index, input_1] y = X[index, input_2] - ax.scatter(x, y, marker=m, s=s, color=Tango.nextMedium(), label=this_label) + ax.scatter(x, y, marker=m, s=s, c=Tango.nextMedium(), label=this_label, linewidth=.5, edgecolor='k', alpha=.9) ax.set_xlabel('latent dimension %i' % input_1) ax.set_ylabel('latent dimension %i' % input_2) @@ -175,7 +175,7 @@ def plot_latent(model, labels=None, which_indices=None, if plot_inducing: Z = model.Z - ax.plot(Z[:, input_1], Z[:, input_2], '^w') + ax.scatter(Z[:, input_1], Z[:, input_2], c='w', s=14, marker="^", edgecolor='k', linewidth=.3, alpha=.6) ax.set_xlim((xmin, xmax)) ax.set_ylim((ymin, ymax)) From 57e56bd93faac249375627f9a82b2ee297202964 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 29 Oct 2014 08:22:11 +0000 Subject: [PATCH 3/8] [pickling] load added to gpy, allows for easy loading of pickled models --- GPy/__init__.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/GPy/__init__.py b/GPy/__init__.py index 819f54bf..ceff68a0 100644 --- a/GPy/__init__.py +++ b/GPy/__init__.py @@ -25,3 +25,15 @@ from core.parameterization import Param, Parameterized, ObsAr @nottest def tests(): Tester(testing).test(verbose=10) + + +def load(file_path): + """ + Load a previously pickled model, using `m.pickle('path/to/file.pickle)' + + :param file_name: path/to/file.pickle + """ + import cPickle as pickle + with open(file_path, 'rb') as f: + m = pickle.load(f) + return m \ No newline at end of file From f91e7e9deb6e8fb0785179916dc22153619306d0 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 29 Oct 2014 08:22:49 +0000 Subject: [PATCH 4/8] [init] parameter updates now check if in init --- GPy/core/parameterization/parameter_core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index d0ee6f68..b1e0d8d2 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -18,7 +18,7 @@ import numpy as np import re import logging -__updated__ = '2014-10-22' +__updated__ = '2014-10-28' class HierarchyError(Exception): """ @@ -99,7 +99,7 @@ class Observable(object): :param bool trigger_parent: Whether to trigger the parent, after self has updated """ - if not self.update_model() or self._in_init_: + if not self.update_model() or (hasattr(self, "_in_init_") and self._in_init_): #print "Warning: updates are off, updating the model will do nothing" return self._trigger_params_changed(trigger_parent) From af40ef8cfb25838ee6286870c70a29accec0b3ee Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 29 Oct 2014 08:23:25 +0000 Subject: [PATCH 5/8] [sparse gp] prediction with posterior per dimension activated --- GPy/core/sparse_gp.py | 8 +++++--- GPy/likelihoods/gaussian.py | 5 ++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 183971d1..d3a9ee3b 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -346,9 +346,11 @@ def optimize(m, maxiter=1000): mu = np.dot(Kx.T, self.posterior.woodbury_vector) if full_cov: Kxx = kern.K(Xnew) - var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx)) - #var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2) - var = var.squeeze() + if self.posterior.woodbury_inv.ndim == 2: + var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx)) + elif self.posterior.woodbury_inv.ndim == 3: + var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2) + var = var else: Kxx = kern.Kdiag(Xnew) var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 16feed13..2546b07a 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -80,7 +80,10 @@ class Gaussian(Likelihood): def predictive_values(self, mu, var, full_cov=False, Y_metadata=None): if full_cov: - var += np.eye(var.shape[0])*self.variance + if var.ndim == 2: + var += np.eye(var.shape[0])*self.variance + if var.ndim == 3: + var += np.atleast_3d(np.eye(var.shape[0])*self.variance) else: var += self.variance return mu, var From f916717aad0e0b97126327537e0eb397579cfb54 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 29 Oct 2014 08:23:57 +0000 Subject: [PATCH 6/8] [stochastics] updated some stuff on the stochastics --- GPy/inference/optimization/stochastics.py | 24 ++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/GPy/inference/optimization/stochastics.py b/GPy/inference/optimization/stochastics.py index c9804e98..f19c3c2e 100644 --- a/GPy/inference/optimization/stochastics.py +++ b/GPy/inference/optimization/stochastics.py @@ -21,6 +21,11 @@ class StochasticStorage(object): """ pass + def reset(self): + """ + Reset the state of this stochastics generator. + """ + class SparseGPMissing(StochasticStorage): def __init__(self, model, batchsize=1): """ @@ -36,18 +41,19 @@ class SparseGPStochastics(StochasticStorage): and the indices corresponding to those """ def __init__(self, model, batchsize=1): - import itertools self.batchsize = batchsize - if self.batchsize == 1: - self.dimensions = itertools.cycle(range(model.Y_normalized.shape[1])) - else: - import numpy as np - self.dimensions = lambda: np.random.choice(model.Y_normalized.shape[1], size=batchsize, replace=False) - self.d = None + self.output_dim = model.Y.shape[1] + self.reset() self.do_stochastics() def do_stochastics(self): if self.batchsize == 1: - self.d = [self.dimensions.next()] + self.current_dim = (self.current_dim+1)%self.output_dim + self.d = [self.current_dim] else: - self.d = self.dimensions() \ No newline at end of file + import numpy as np + self.d = np.random.choice(self.output_dim, size=self.batchsize, replace=False) + + def reset(self): + self.current_dim = -1 + self.d = None \ No newline at end of file From 894829ffebfbafe7a89c6aadfe51b372bd339448 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 29 Oct 2014 08:24:25 +0000 Subject: [PATCH 7/8] [var dtc missing] deleted code for missing data inference --- .../latent_function_inference/var_dtc.py | 198 ------------------ 1 file changed, 198 deletions(-) diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index b10ec832..1cabecc3 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -182,204 +182,6 @@ class VarDTC(LatentFunctionInference): post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm) return post, log_marginal, grad_dict -class VarDTCMissingData(LatentFunctionInference): - const_jitter = 1e-10 - def __init__(self, limit=1, inan=None): - from ...util.caching import Cacher - self._Y = Cacher(self._subarray_computations, limit) - if inan is not None: self._inan = ~inan - else: self._inan = None - pass - - def set_limit(self, limit): - self._Y.limit = limit - - def __getstate__(self): - # has to be overridden, as Cacher objects cannot be pickled. - return self._Y.limit, self._inan - - def __setstate__(self, state): - # has to be overridden, as Cacher objects cannot be pickled. - from ...util.caching import Cacher - self.limit = state[0] - self._inan = state[1] - self._Y = Cacher(self._subarray_computations, self.limit) - - def _subarray_computations(self, Y): - if self._inan is None: - inan = np.isnan(Y) - has_none = inan.any() - self._inan = ~inan - inan = self._inan - else: - inan = self._inan - has_none = True - if has_none: - #print "caching missing data slices, this can take several minutes depending on the number of unique dimensions of the data..." - #csa = common_subarrays(inan, 1) - size = Y.shape[1] - #logger.info('preparing subarrays {:3.3%}'.format((i+1.)/size)) - Ys = [] - next_ten = [0.] - count = itertools.count() - for v, y in itertools.izip(inan.T, Y.T[:,:,None]): - i = count.next() - if ((i+1.)/size) >= next_ten[0]: - logger.info('preparing subarrays {:>6.1%}'.format((i+1.)/size)) - next_ten[0] += .1 - Ys.append(y[v,:]) - - next_ten = [0.] - count = itertools.count() - def trace(y): - i = count.next() - if ((i+1.)/size) >= next_ten[0]: - logger.info('preparing traces {:>6.1%}'.format((i+1.)/size)) - next_ten[0] += .1 - y = y[inan[:,i],i:i+1] - return np.einsum('ij,ij->', y,y) - traces = [trace(Y) for _ in xrange(size)] - return Ys, traces - else: - self._subarray_indices = [[slice(None),slice(None)]] - return [Y], [(Y**2).sum()] - - def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, Lm=None): - if isinstance(X, VariationalPosterior): - uncertain_inputs = True - psi0_all = kern.psi0(Z, X) - psi1_all = kern.psi1(Z, X) - else: - uncertain_inputs = False - psi0_all = kern.Kdiag(X) - psi1_all = kern.K(X, Z) - - Ys, traces = self._Y(Y) - beta_all = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6) - het_noise = beta_all.size != 1 - - num_inducing = Z.shape[0] - - dL_dpsi0_all = np.zeros(Y.shape[0]) - dL_dpsi1_all = np.zeros((Y.shape[0], num_inducing)) - if uncertain_inputs: - dL_dpsi2_all = np.zeros((Y.shape[0], num_inducing, num_inducing)) - - dL_dR = 0 - woodbury_vector = np.zeros((num_inducing, Y.shape[1])) - woodbury_inv_all = np.zeros((num_inducing, num_inducing, Y.shape[1])) - dL_dKmm = 0 - log_marginal = 0 - - Kmm = kern.K(Z).copy() - diag.add(Kmm, self.const_jitter) - #factor Kmm - if Lm is None: - Lm = jitchol(Kmm) - if uncertain_inputs: LmInv = dtrtri(Lm) - - size = len(Ys) - next_ten = 0 - for i, [y, v, trYYT] in enumerate(itertools.izip(Ys, self._inan.T, traces)): - if ((i+1.)/size) >= next_ten: - logger.info('inference {:> 6.1%}'.format((i+1.)/size)) - next_ten += .1 - if het_noise: beta = beta_all[i] - else: beta = beta_all - VVT_factor = (y*beta) - output_dim = 1#len(ind) - - psi0 = psi0_all[v] - psi1 = psi1_all[v, :] - if uncertain_inputs: - psi2 = kern.psi2(Z, X[v, :]) - else: psi2 = None - num_data = psi1.shape[0] - - if uncertain_inputs: - if het_noise: psi2_beta = psi2 * (beta.flatten().reshape(num_data, 1, 1)).sum(0) - else: psi2_beta = psi2 * (beta) - A = LmInv.dot(psi2_beta.dot(LmInv.T)) - else: - if het_noise: tmp = psi1 * (np.sqrt(beta.reshape(num_data, 1))) - else: tmp = psi1 * (np.sqrt(beta)) - tmp, _ = dtrtrs(Lm, tmp.T, lower=1) - A = tdot(tmp) #print A.sum() - - # factor B - B = np.eye(num_inducing) + A - LB = jitchol(B) - - psi1Vf = psi1.T.dot(VVT_factor) - tmp, _ = dtrtrs(Lm, psi1Vf, lower=1, trans=0) - _LBi_Lmi_psi1Vf, _ = dtrtrs(LB, tmp, lower=1, trans=0) - tmp, _ = dtrtrs(LB, _LBi_Lmi_psi1Vf, lower=1, trans=1) - Cpsi1Vf, _ = dtrtrs(Lm, tmp, lower=1, trans=1) - - # data fit and derivative of L w.r.t. Kmm - delit = tdot(_LBi_Lmi_psi1Vf) - data_fit = np.trace(delit) - DBi_plus_BiPBi = backsub_both_sides(LB, output_dim * np.eye(num_inducing) + delit) - delit = -0.5 * DBi_plus_BiPBi - delit += -0.5 * B * output_dim - delit += output_dim * np.eye(num_inducing) - dL_dKmm += backsub_both_sides(Lm, delit) - - # derivatives of L w.r.t. psi - dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, - VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, - psi1, het_noise, uncertain_inputs) - - dL_dpsi0_all[v] += dL_dpsi0 - dL_dpsi1_all[v, :] += dL_dpsi1 - if uncertain_inputs: - dL_dpsi2_all[v, :, :] += dL_dpsi2 - - # log marginal likelihood - log_marginal += _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, - psi0, A, LB, trYYT, data_fit, VVT_factor) - - #put the gradients in the right places - dL_dR += _compute_dL_dR(likelihood, - het_noise, uncertain_inputs, LB, - _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, - psi0, psi1, beta, - data_fit, num_data, output_dim, trYYT, Y) - - #if full_VVT_factor: - woodbury_vector[:, i:i+1] = Cpsi1Vf - #else: - # print 'foobar' - # tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0) - # tmp, _ = dpotrs(LB, tmp, lower=1) - # woodbury_vector[:, ind] = dtrtrs(Lm, tmp, lower=1, trans=1)[0] - - #import ipdb;ipdb.set_trace() - Bi, _ = dpotri(LB, lower=1) - symmetrify(Bi) - Bi = -dpotri(LB, lower=1)[0] - diag.add(Bi, 1) - woodbury_inv_all[:, :, i:i+1] = backsub_both_sides(Lm, Bi)[:,:,None] - - dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) - - # gradients: - if uncertain_inputs: - grad_dict = {'dL_dKmm': dL_dKmm, - 'dL_dpsi0':dL_dpsi0_all, - 'dL_dpsi1':dL_dpsi1_all, - 'dL_dpsi2':dL_dpsi2_all, - 'dL_dthetaL':dL_dthetaL} - else: - grad_dict = {'dL_dKmm': dL_dKmm, - 'dL_dKdiag':dL_dpsi0_all, - 'dL_dKnm':dL_dpsi1_all, - 'dL_dthetaL':dL_dthetaL} - - post = Posterior(woodbury_inv=woodbury_inv_all, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm) - - return post, log_marginal, grad_dict - def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs): dL_dpsi0 = -0.5 * output_dim * (beta[:,None] * np.ones([num_data, 1])).flatten() dL_dpsi1 = np.dot(VVT_factor, Cpsi1Vf.T) From 79df23adc80c3fead8594033c079e265e223c3f2 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 29 Oct 2014 08:24:51 +0000 Subject: [PATCH 8/8] [ObsAr] do not make a copy, if not needed --- GPy/core/parameterization/observable_array.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/core/parameterization/observable_array.py b/GPy/core/parameterization/observable_array.py index 379346c7..1253caae 100644 --- a/GPy/core/parameterization/observable_array.py +++ b/GPy/core/parameterization/observable_array.py @@ -1,7 +1,7 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -__updated__ = '2014-10-22' +__updated__ = '2014-10-29' import numpy as np from parameter_core import Observable, Pickleable @@ -17,7 +17,7 @@ class ObsAr(np.ndarray, Pickleable, Observable): def __new__(cls, input_array, *a, **kw): # allways make a copy of input paramters, as we need it to be in C order: if not isinstance(input_array, ObsAr): - obj = np.atleast_1d(np.require(np.copy(input_array), dtype=np.float64, requirements=['W', 'C'])).view(cls) + obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['W', 'C'])).view(cls) else: obj = input_array super(ObsAr, obj).__init__(*a, **kw) return obj