From 5d283335823e4a4278af9221174ff4aecd488eb0 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Fri, 14 Jun 2013 14:59:18 +0100 Subject: [PATCH 1/5] changed manifest from docs to doc --- MANIFEST.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MANIFEST.in b/MANIFEST.in index fe342b5e..c89284cd 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ include *.txt -recursive-include docs *.txt +recursive-include doc *.txt include *.md -recursive-include docs *.md +recursive-include doc *.md From 0ca52c2346887a9bee47da748df116eea0f942ea Mon Sep 17 00:00:00 2001 From: Ricardo Date: Fri, 14 Jun 2013 18:49:34 +0100 Subject: [PATCH 2/5] Bug fix in the confusion matrix --- GPy/util/classification.py | 43 +++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/GPy/util/classification.py b/GPy/util/classification.py index 07e725d1..41701949 100644 --- a/GPy/util/classification.py +++ b/GPy/util/classification.py @@ -2,29 +2,30 @@ import numpy as np def conf_matrix(p,labels,names=['1','0'],threshold=.5,show=True): """ - Returns true and false positives in a binary classification problem - - Column names: true class of the examples - - Row names: classification assigned by the model + Returns error rate and true/false positives in a binary classification problem + - Actual classes are displayed by column. + - Predicted classes are displayed by row. - p: probabilities estimated for observation of belonging to class '1' - labels: observations' class - names: classes' names - threshold: probability value at which the model allocate an element to each class - show: whether the matrix should be shown or not + :param p: array of class '1' probabilities. + :param labels: array of actual classes. + :param names: list of class names, defaults to ['1','0']. + :param threshold: probability value used to decide the class. + :param show: whether the matrix should be shown or not + :type show: False|True """ - p = p.flatten() - labels = labels.flatten() - N = p.size - C = np.ones(N) - C[p Date: Mon, 17 Jun 2013 13:58:51 +0100 Subject: [PATCH 3/5] began to merege the SVIGP code into GPy. Example is implemented, but the step length is a bit crazy! --- GPy/core/__init__.py | 5 +- GPy/core/gp_base.py | 1 + GPy/core/svigp.py | 466 +++++++++++++++++++++++++++++++++ GPy/examples/__init__.py | 1 + GPy/examples/stochastic.py | 41 +++ GPy/models/__init__.py | 1 + GPy/models/svigp_regression.py | 44 ++++ 7 files changed, 557 insertions(+), 2 deletions(-) create mode 100644 GPy/core/svigp.py create mode 100644 GPy/examples/stochastic.py create mode 100644 GPy/models/svigp_regression.py diff --git a/GPy/core/__init__.py b/GPy/core/__init__.py index 32b6c02d..e9e049b0 100644 --- a/GPy/core/__init__.py +++ b/GPy/core/__init__.py @@ -4,6 +4,7 @@ from model import * from parameterised import * import priors -from GPy.core.gp import GP -from GPy.core.sparse_gp import SparseGP +from gp import GP +from sparse_gp import SparseGP from fitc import FITC +from svigp import SVIGP diff --git a/GPy/core/gp_base.py b/GPy/core/gp_base.py index 0799e0c2..b82f3298 100644 --- a/GPy/core/gp_base.py +++ b/GPy/core/gp_base.py @@ -29,6 +29,7 @@ class GPBase(Model): self._Xscale = np.ones((1, self.input_dim)) super(GPBase, self).__init__() + #Model.__init__(self) # All leaf nodes should call self._set_params(self._get_params()) at # the end diff --git a/GPy/core/svigp.py b/GPy/core/svigp.py new file mode 100644 index 00000000..1db0e26f --- /dev/null +++ b/GPy/core/svigp.py @@ -0,0 +1,466 @@ +# Copyright (c) 2012, James Hensman and Nicolo' Fusi +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import numpy as np +import pylab as pb +from .. import kern +from ..util.linalg import pdinv, mdot, tdot, dpotrs, dtrtrs, jitchol, backsub_both_sides +from ..likelihoods import EP +from gp_base import GPBase +from model import Model +import time +import sys + + +class SVIGP(GPBase): + """ + Stochastic Variational inference in a Gaussian Process + + :param X: inputs + :type X: np.ndarray (N x Q) + :param Y: observed data + :type Y: np.ndarray of observations (N x D) + :param batchsize: the size of a h + + Additional kwargs are used as for a sparse GP. They include + + :param q_u: canonical parameters of the distribution squasehd into a 1D array + :type q_u: np.ndarray + :param M : Number of inducing points (optional, default 10. Ignored if Z is not None) + :type M: int + :param kernel : the kernel/covariance function. See link kernels + :type kernel: a GPy kernel + :param Z: inducing inputs (optional, see note) + :type Z: np.ndarray (M x Q) | None + :param X_uncertainty: The uncertainty in the measurements of X (Gaussian variance) + :type X_uncertainty: np.ndarray (N x Q) | None + :param Zslices: slices for the inducing inputs (see slicing TODO: link) + :param M : Number of inducing points (optional, default 10. Ignored if Z is not None) + :type M: int + :param beta: noise precision. TODO> ignore beta if doing EP + :type beta: float + :param normalize_(X|Y) : whether to normalize the data before computing (predictions will be in original scales) + :type normalize_(X|Y): bool + """ + + + def __init__(self, X, likelihood, kernel, Z, q_u=None, batchsize=10, X_variance=None): + GPBase.__init__(self, X, likelihood, kernel, normalize_X=False) + self.batchsize=batchsize + self.Y = self.likelihood.Y.copy() + self.Z = Z + self.num_inducing = Z.shape[0] + + self.batchcounter = 0 + self.epochs = 0 + self.iterations = 0 + + self.vb_steplength = 0.05 + self.param_steplength = 1e-5 + self.momentum = 0.9 + + if X_variance is None: + self.has_uncertain_inputs = False + else: + self.has_uncertain_inputs = True + self.X_variance = X_variance + + + if q_u is None: + q_u = np.hstack((np.random.randn(self.num_inducing*self.output_dim),-.5*np.eye(self.num_inducing).flatten())) + self.set_vb_param(q_u) + + self._permutation = np.random.permutation(self.num_data) + self.load_batch() + + self._param_trace = [] + self._ll_trace = [] + self._grad_trace = [] + + #set the adaptive steplength parameters + self.hbar_t = 0.0 + self.tau_t = 100.0 + self.gbar_t = 0.0 + self.gbar_t1 = 0.0 + self.gbar_t2 = 0.0 + self.hbar_tp = 0.0 + self.tau_tp = 10000.0 + self.gbar_tp = 0.0 + self.adapt_param_steplength = True + self.adapt_vb_steplength = True + self._param_steplength_trace = [] + self._vb_steplength_trace = [] + + def _compute_kernel_matrices(self): + # kernel computations, using BGPLVM notation + self.Kmm = self.kern.K(self.Z) + if self.has_uncertain_inputs: + self.psi0 = self.kern.psi0(self.Z, self.X_batch, self.X_variance_batch) + self.psi1 = self.kern.psi1(self.Z, self.X_batch, self.X_variance_batch) + self.psi2 = self.kern.psi2(self.Z, self.X_batch, self.X_variance_batch) + else: + self.psi0 = self.kern.Kdiag(self.X_batch) + self.psi1 = self.kern.K(self.X_batch, self.Z) + self.psi2 = None + + def dL_dtheta(self): + dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm, self.Z) + if self.has_uncertain_inputs: + dL_dtheta += self.kern.dpsi0_dtheta(self.dL_dpsi0, self.Z, self.X_batch, self.X_variance_batch) + dL_dtheta += self.kern.dpsi1_dtheta(self.dL_dpsi1, self.Z, self.X_batch, self.X_variance_batch) + dL_dtheta += self.kern.dpsi2_dtheta(self.dL_dpsi2, self.Z, self.X_batch, self.X_variance_batch) + else: + dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1, self.X_batch, self.Z) + dL_dtheta += self.kern.dKdiag_dtheta(self.dL_dpsi0, self.X_batch) + return dL_dtheta + + def _set_params(self, p, computations=True): + self.kern._set_params_transformed(p[:self.kern.num_params]) + self.likelihood._set_params(p[self.kern.num_params:]) + if computations: + self._compute_kernel_matrices() + self._computations() + + def _get_params(self): + return np.hstack((self.kern._get_params_transformed() , self.likelihood._get_params())) + + def _get_param_names(self): + return self.kern._get_param_names_transformed() + self.likelihood._get_param_names() + + def load_batch(self): + """ + load a batch of data (set self.X_batch and self.likelihood.Y from self.X, self.Y) + """ + + #if we've seen all the data, start again with them in a new random order + if self.batchcounter+self.batchsize > self.num_data: + self.batchcounter = 0 + self.epochs += 1 + self._permutation = np.random.permutation(self.num_data) + + this_perm = self._permutation[self.batchcounter:self.batchcounter+self.batchsize] + + self.X_batch = self.X[this_perm] + self.likelihood.set_data(self.Y[this_perm]) + if self.has_uncertain_inputs: + self.X_variance_batch = self.X_variance[this_perm] + + self.batchcounter += self.batchsize + + self.data_prop = float(self.batchsize)/self.num_data + + self._compute_kernel_matrices() + self._computations() + + def _computations(self,do_Kmm=True, do_Kmm_grad=True): + """ + All of the computations needed. Some are optional, see kwargs. + """ + + if do_Kmm: + self.Lm = jitchol(self.Kmm) + + # The rather complex computations of self.A + if self.has_uncertain_inputs: + if self.likelihood.is_heteroscedastic: + psi2_beta = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.batchsize, 1, 1))).sum(0) + else: + psi2_beta = self.psi2.sum(0) * self.likelihood.precision + evals, evecs = linalg.eigh(psi2_beta) + clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable + tmp = evecs * np.sqrt(clipped_evals) + else: + if self.likelihood.is_heteroscedastic: + tmp = self.psi1.T * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.batchsize))) + else: + tmp = self.psi1.T * (np.sqrt(self.likelihood.precision)) + tmp, _ = dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1) + self.A = tdot(tmp) + + self.V = self.likelihood.precision*self.likelihood.Y + self.VmT = np.dot(self.V,self.q_u_expectation[0].T) + self.psi1V = np.dot(self.psi1.T, self.V) + + self.B = np.eye(self.num_inducing)*self.data_prop + self.A + self.Lambda = backsub_both_sides(self.Lm, self.B.T) + self.LQL = backsub_both_sides(self.Lm,self.q_u_expectation[1].T,transpose='right') + + self.trace_K = self.psi0.sum() - np.trace(self.A)/self.likelihood.precision + self.Kmmi_m, _ = dpotrs(self.Lm, self.q_u_expectation[0], lower=1) + self.projected_mean = np.dot(self.psi1,self.Kmmi_m) + + # Compute dL_dpsi + self.dL_dpsi0 = - 0.5 * self.output_dim * self.likelihood.precision * np.ones(self.batchsize) + self.dL_dpsi1, _ = dpotrs(self.Lm,np.asfortranarray(self.VmT.T),lower=1) + self.dL_dpsi1 = self.dL_dpsi1.T + + dL_dpsi2 = -0.5 * self.likelihood.precision * backsub_both_sides(self.Lm, self.LQL - self.output_dim * np.eye(self.num_inducing)) + if self.has_uncertain_inputs: + self.dL_dpsi2 = np.repeat(dL_dpsi2[None,:,:],self.batchsize,axis=0) + else: + self.dL_dpsi1 += 2.*np.dot(dL_dpsi2,self.psi1.T).T + self.dL_dpsi2 = None + + # Compute dL_dKmm + if do_Kmm_grad: + tmp = np.dot(self.LQL,self.A) - backsub_both_sides(self.Lm,np.dot(self.q_u_expectation[0],self.psi1V.T),transpose='right') + tmp += tmp.T + tmp += -self.output_dim*self.B + tmp += self.data_prop*self.LQL + self.dL_dKmm = 0.5*backsub_both_sides(self.Lm,tmp) + + #Compute the gradient of the log likelihood wrt noise variance + self.partial_for_likelihood = -0.5*(self.batchsize*self.output_dim - np.sum(self.A*self.LQL))*self.likelihood.precision + self.partial_for_likelihood += (0.5*self.output_dim*self.trace_K + 0.5 * self.likelihood.trYYT - np.sum(self.likelihood.Y*self.projected_mean))*self.likelihood.precision**2 + + + def log_likelihood(self): + """ + As for uncollapsed sparse GP, but account for the proportion of data we're looking at right now. + + NB. self.batchsize is the size of the batch, not the size of X_all + """ + assert not self.likelihood.is_heteroscedastic + A = -0.5*self.batchsize*self.output_dim*(np.log(2.*np.pi) - np.log(self.likelihood.precision)) + B = -0.5*self.likelihood.precision*self.output_dim*self.trace_K + Kmm_logdet = 2.*np.sum(np.log(np.diag(self.Lm))) + C = -0.5*self.output_dim*self.data_prop*(Kmm_logdet-self.q_u_logdet - self.num_inducing) + C += -0.5*np.sum(self.LQL * self.B) + D = -0.5*self.likelihood.precision*self.likelihood.trYYT + E = np.sum(self.V*self.projected_mean) + return (A+B+C+D+E)/self.data_prop + + def _log_likelihood_gradients(self): + return np.hstack((self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood)))/self.data_prop + + def vb_grad_natgrad(self): + """ + Compute the gradients of the lower bound wrt the canonical and + Expectation parameters of u. + + Note that the natural gradient in either is given by the gradient in the other (See Hensman et al 2012 Fast Variational inference in the conjugate exponential Family) + """ + + # Gradient for eta + dL_dmmT_S = -0.5*self.Lambda/self.data_prop + 0.5*self.q_u_prec + Kmmipsi1V,_ = dpotrs(self.Lm,self.psi1V,lower=1) + dL_dm = (Kmmipsi1V - np.dot(self.Lambda,self.q_u_mean))/self.data_prop + + # Gradients for theta + S = self.q_u_cov + Si = self.q_u_prec + m = self.q_u_mean + dL_dSi = -mdot(S,dL_dmmT_S, S) + + dL_dmhSi = -2*dL_dSi + dL_dSim = np.dot(dL_dSi,m) + np.dot(Si, dL_dm) + + return np.hstack((dL_dm.flatten(),dL_dmmT_S.flatten())) , np.hstack((dL_dSim.flatten(), dL_dmhSi.flatten())) + + + def optimize(self, iterations, print_interval=10, callback=lambda:None, callback_interval=5): + + param_step = 0. + + #Iterate! + for i in range(iterations): + + #store the current configuration for plotting later + self._param_trace.append(self._get_params()) + self._ll_trace.append(self.log_likelihood() + self.log_prior()) + + #load a batch + self.load_batch() + + #compute the (stochastic) gradient + natgrads = self.vb_grad_natgrad() + grads = self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients()) + self._grad_trace.append(grads) + + #compute the steps in all parameters + vb_step = self.vb_steplength*natgrads[0] + if (self.epochs>=1):#only move the parameters after the first epoch + param_step = self.momentum*param_step + self.param_steplength*grads + else: + param_step = 0. + + self.set_vb_param(self.get_vb_param() + vb_step) + #Note: don't recompute everything here, wait until the next iteration when we have a new batch + self._set_params(self._untransform_params(self._get_params_transformed() + param_step), computations=False) + + #print messages if desired + if i and (not i%print_interval): + print i, np.mean(self._ll_trace[-print_interval:]) #, self.log_likelihood() + print np.round(np.mean(self._grad_trace[-print_interval:],0),3) + sys.stdout.flush() + + #callback + if i and not i%callback_interval: + callback() + time.sleep(0.1) + + if self.epochs > 10: + self._adapt_steplength() + + self.iterations += 1 + + + def _adapt_steplength(self): + if self.adapt_vb_steplength: + # self._adaptive_vb_steplength() + self._adaptive_vb_steplength_KL() + self._vb_steplength_trace.append(self.vb_steplength) + assert self.vb_steplength > 0 + + if self.adapt_param_steplength: + # self._adaptive_param_steplength() + # self._adaptive_param_steplength_log() + self._adaptive_param_steplength_from_vb() + self._param_steplength_trace.append(self.param_steplength) + + def _adaptive_param_steplength(self): + decr_factor = 0.1 + g_tp = self._transform_gradients(self._log_likelihood_gradients()) + self.gbar_tp = (1-1/self.tau_tp)*self.gbar_tp + 1/self.tau_tp * g_tp + self.hbar_tp = (1-1/self.tau_tp)*self.hbar_tp + 1/self.tau_tp * np.dot(g_tp.T, g_tp) + new_param_steplength = np.dot(self.gbar_tp.T, self.gbar_tp) / self.hbar_tp + #- hack + new_param_steplength *= decr_factor + self.param_steplength = (self.param_steplength + new_param_steplength)/2 + #- + self.tau_tp = self.tau_tp*(1-self.param_steplength) + 1 + + def _adaptive_param_steplength_log(self): + stp = np.logspace(np.log(0.0001), np.log(1e-6), base=np.e, num=18000) + self.param_steplength = stp[self.iterations] + + def _adaptive_param_steplength_log2(self): + self.param_steplength = (self.iterations + 0.001)**-0.5 + + def _adaptive_param_steplength_from_vb(self): + self.param_steplength = self.vb_steplength * 0.01 + + def _adaptive_vb_steplength(self): + decr_factor = 0.1 + g_t = self.vb_grad_natgrad()[0] + self.gbar_t = (1-1/self.tau_t)*self.gbar_t + 1/self.tau_t * g_t + self.hbar_t = (1-1/self.tau_t)*self.hbar_t + 1/self.tau_t * np.dot(g_t.T, g_t) + new_vb_steplength = np.dot(self.gbar_t.T, self.gbar_t) / self.hbar_t + #- hack + new_vb_steplength *= decr_factor + self.vb_steplength = (self.vb_steplength + new_vb_steplength)/2 + #- + self.tau_t = self.tau_t*(1-self.vb_steplength) + 1 + + def _adaptive_vb_steplength_KL(self): + decr_factor = 1 #0.1 + natgrad = self.vb_grad_natgrad() + g_t1 = natgrad[0] + g_t2 = natgrad[1] + self.gbar_t1 = (1-1/self.tau_t)*self.gbar_t1 + 1/self.tau_t * g_t1 + self.gbar_t2 = (1-1/self.tau_t)*self.gbar_t2 + 1/self.tau_t * g_t2 + self.hbar_t = (1-1/self.tau_t)*self.hbar_t + 1/self.tau_t * np.dot(g_t1.T, g_t2) + self.vb_steplength = np.dot(self.gbar_t1.T, self.gbar_t2) / self.hbar_t + self.vb_steplength *= decr_factor + self.tau_t = self.tau_t*(1-self.vb_steplength) + 1 + + def _raw_predict(self, X_new, X_variance_new=None, which_parts='all',full_cov=False): + """Internal helper function for making predictions, does not account for normalization""" + + #TODO: make this more efficient! + self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm) + tmp = self.Kmmi- mdot(self.Kmmi,self.q_u_cov,self.Kmmi) + + if X_variance_new is None: + Kx = self.kern.K(X_new,self.Z) + mu = np.dot(Kx,self.Kmmi_m) + if full_cov: + Kxx = self.kern.K(X_new) + var = Kxx - mdot(Kx,tmp,Kx.T) + else: + Kxx = self.kern.Kdiag(X_new) + var = (Kxx - np.sum(Kx*np.dot(Kx,tmp),1))[:,None] + return mu, var + else: + assert X_variance_new.shape == X_new.shape + Kx = self.kern.psi1(self.Z,X_new, X_variance_new) + mu = np.dot(Kx,self.Kmmi_m) + Kxx = self.kern.psi0(self.Z,X_new,X_variance_new) + psi2 = self.kern.psi2(self.Z,X_new,X_variance_new) + diag_var = Kxx - np.sum(np.sum(psi2*tmp[None,:,:],1),1) + if full_cov: + raise NotImplementedError + else: + return mu, diag_var[:,None] + + def predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=False): + # normalize X values + Xnew = (Xnew.copy() - self._Xoffset) / self._Xscale + if X_variance_new is not None: + X_variance_new = X_variance_new / self._Xscale ** 2 + + # here's the actual prediction by the GP model + mu, var = self._raw_predict(Xnew, X_variance_new, full_cov=full_cov, which_parts=which_parts) + + # now push through likelihood + mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) + + return mean, var, _025pm, _975pm + + + def set_vb_param(self,vb_param): + """set the distribution q(u) from the canonical parameters""" + self.q_u_canonical_flat = vb_param.copy() + self.q_u_canonical = self.q_u_canonical_flat[:self.num_inducing*self.output_dim].reshape(self.num_inducing,self.output_dim),self.q_u_canonical_flat[self.num_inducing*self.output_dim:].reshape(self.num_inducing,self.num_inducing) + + self.q_u_prec = -2.*self.q_u_canonical[1] + self.q_u_cov, q_u_Li, q_u_L, tmp = pdinv(self.q_u_prec) + self.q_u_Li = q_u_Li + self.q_u_logdet = -tmp + self.q_u_mean, _ = dpotrs(q_u_Li, np.asfortranarray(self.q_u_canonical[0]),lower=1) + + self.q_u_expectation = (self.q_u_mean, np.dot(self.q_u_mean,self.q_u_mean.T)+self.q_u_cov*self.output_dim) + + + def get_vb_param(self): + """ + Return the canonical parameters of the distribution q(u) + """ + return self.q_u_canonical_flat + + + def plot(self, ax=None, fignum=None, Z_height=None, **kwargs): + + if ax is None: + fig = pb.figure(num=fignum) + ax = fig.add_subplot(111) + + #horrible hack here: + data = self.likelihood.data.copy() + self.likelihood.data = self.Y + GPBase.plot(self, ax=ax, **kwargs) + self.likelihood.data = data + + Zu = self.Z * self._Xscale + self._Xoffset + if self.input_dim==1: + ax.plot(self.X_batch, self.likelihood.data, 'gx',mew=2) + if Z_height is None: + Z_height = ax.get_ylim()[0] + ax.plot(Zu, np.zeros_like(Zu) + Z_height, 'r|', mew=1.5, markersize=12) + + if self.input_dim==2: + ax.scatter(self.X_all[:,0], self.X_all[:,1], 20., self.Y[:,0], linewidth=0, cmap=pb.cm.jet) + ax.plot(Zu[:,0], Zu[:,1], 'w^') + + def plot_traces(self): + pb.figure() + t = np.array(self._param_trace) + pb.subplot(2,1,1) + for l,ti in zip(self._get_param_names(),t.T): + if not l[:3]=='iip': + pb.plot(ti,label=l) + pb.legend(loc=0) + + pb.subplot(2,1,2) + pb.plot(np.asarray(self._ll_trace),label='stochastic likelihood') + pb.legend(loc=0) diff --git a/GPy/examples/__init__.py b/GPy/examples/__init__.py index 0d73daff..2f74858a 100644 --- a/GPy/examples/__init__.py +++ b/GPy/examples/__init__.py @@ -5,3 +5,4 @@ import classification import regression import dimensionality_reduction import tutorials +import stochastic diff --git a/GPy/examples/stochastic.py b/GPy/examples/stochastic.py new file mode 100644 index 00000000..45418d5c --- /dev/null +++ b/GPy/examples/stochastic.py @@ -0,0 +1,41 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import pylab as pb +import numpy as np +import GPy + +def toy_1d(): + N = 2000 + M = 20 + + #create data + X = np.linspace(0,32,N)[:,None] + Z = np.linspace(0,32,M)[:,None] + Y = np.sin(X) + np.cos(0.3*X) + np.random.randn(*X.shape)/np.sqrt(50.) + + m = GPy.models.SVIGPRegression(X,Y, batchsize=10, Z=Z) + m.ensure_default_constraints() + m.constrain_bounded('noise_variance',1e-3,1e-1) + + m.param_steplength = 1e-4 + + fig = pb.figure() + ax = fig.add_subplot(111) + def cb(): + ax.cla() + m.plot(ax=ax,Z_height=-3) + ax.set_ylim(-3,3) + fig.canvas.draw() + + m.optimize(500, callback=cb, callback_interval=1) + + m.plot_traces() + return m + + + + + + + diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index f18e89db..885372a1 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -4,6 +4,7 @@ from gp_regression import GPRegression from gp_classification import GPClassification from sparse_gp_regression import SparseGPRegression +from svigp_regression import SVIGPRegression from sparse_gp_classification import SparseGPClassification from fitc_classification import FITCClassification from gplvm import GPLVM diff --git a/GPy/models/svigp_regression.py b/GPy/models/svigp_regression.py new file mode 100644 index 00000000..8448bf37 --- /dev/null +++ b/GPy/models/svigp_regression.py @@ -0,0 +1,44 @@ +# Copyright (c) 2012, James Hensman +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import numpy as np +from ..core import SVIGP +from .. import likelihoods +from .. import kern + +class SVIGPRegression(SVIGP): + """ + Gaussian Process model for regression + + This is a thin wrapper around the SVIGP class, with a set of sensible defalts + + :param X: input observations + :param Y: observed values + :param kernel: a GPy kernel, defaults to rbf+white + :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) + :type normalize_X: False|True + :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales) + :type normalize_Y: False|True + :rtype: model object + + .. Note:: Multiple independent outputs are allowed using columns of Y + + """ + + def __init__(self, X, Y, kernel=None, Z=None, num_inducing=10, q_u=None, batchsize=10): + # kern defaults to rbf (plus white for stability) + if kernel is None: + kernel = kern.rbf(X.shape[1], variance=1., lengthscale=4.) + kern.white(X.shape[1], 1e-3) + + # Z defaults to a subset of the data + if Z is None: + i = np.random.permutation(X.shape[0])[:num_inducing] + Z = X[i].copy() + else: + assert Z.shape[1] == X.shape[1] + + # likelihood defaults to Gaussian + likelihood = likelihoods.Gaussian(Y, normalize=False) + + SVIGP.__init__(self, X, likelihood, kernel, Z, q_u=q_u, batchsize=batchsize) + self.load_batch() From 068c74a515bf1ffc05e54339a48e38ab4efbd20d Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Jun 2013 15:04:35 +0100 Subject: [PATCH 4/5] New version number --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4e9873b8..90645e71 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ import os from setuptools import setup # Version number -version = '0.4.5' +version = '0.4.6' def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() From 8fd8288fb88b4e14a4150bc23d3a4372d0f60de9 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Mon, 17 Jun 2013 15:57:19 +0100 Subject: [PATCH 5/5] ensure_default_constraints is on by default --- GPy/examples/classification.py | 5 ----- GPy/examples/dimensionality_reduction.py | 12 ------------ GPy/examples/regression.py | 14 -------------- GPy/examples/stochastic.py | 1 - GPy/examples/tutorials.py | 3 --- GPy/kern/rbf.py | 14 +++++++------- GPy/models/bayesian_gplvm.py | 2 +- GPy/models/fitc_classification.py | 2 +- GPy/models/gp_classification.py | 2 +- GPy/models/gp_regression.py | 2 +- GPy/models/gplvm.py | 2 +- GPy/models/mrd.py | 2 +- GPy/models/sparse_gp_classification.py | 2 +- GPy/models/sparse_gp_regression.py | 2 +- GPy/models/sparse_gplvm.py | 1 + GPy/testing/bgplvm_tests.py | 5 ----- GPy/testing/gplvm_tests.py | 3 --- GPy/testing/mrd_tests.py | 1 - GPy/testing/prior_tests.py | 3 --- GPy/testing/psi_stat_gradient_tests.py | 2 -- GPy/testing/sparse_gplvm_tests.py | 3 --- GPy/testing/unit_tests.py | 4 ---- 22 files changed, 16 insertions(+), 71 deletions(-) diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py index 4f1a4ebc..c7daa26b 100644 --- a/GPy/examples/classification.py +++ b/GPy/examples/classification.py @@ -25,7 +25,6 @@ def crescent_data(seed=default_seed): # FIXME Y[Y.flatten()==-1] = 0 m = GPy.models.GPClassification(data['X'], Y) - m.ensure_default_constraints() #m.update_likelihood_approximation() #m.optimize() m.pseudo_EM() @@ -75,7 +74,6 @@ def toy_linear_1d_classification(seed=default_seed): # Model definition m = GPy.models.GPClassification(data['X'], Y) - m.ensure_default_constraints() # Optimize #m.update_likelihood_approximation() @@ -106,7 +104,6 @@ def sparse_toy_linear_1d_classification(num_inducing=10,seed=default_seed): m = GPy.models.SparseGPClassification(data['X'], Y,num_inducing=num_inducing) m['.*len']= 4. - m.ensure_default_constraints() # Optimize #m.update_likelihood_approximation() # Parameters optimization: @@ -137,7 +134,6 @@ def sparse_crescent_data(num_inducing=10, seed=default_seed): Y[Y.flatten()==-1]=0 m = GPy.models.SparseGPClassification(data['X'], Y,num_inducing=num_inducing) - m.ensure_default_constraints() m['.*len'] = 10. #m.update_likelihood_approximation() #m.optimize() @@ -163,7 +159,6 @@ def FITC_crescent_data(num_inducing=10, seed=default_seed): m = GPy.models.FITCClassification(data['X'], Y,num_inducing=num_inducing) m.constrain_bounded('.*len',1.,1e3) - m.ensure_default_constraints() m['.*len'] = 3. #m.update_likelihood_approximation() #m.optimize() diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 28ee2bde..16afe5eb 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -37,7 +37,6 @@ def BGPLVM(seed=default_seed): # m.optimize(messages = 1) # m.plot() # pb.title('After optimisation') - m.ensure_default_constraints() m.randomize() m.checkgrad(verbose=1) @@ -53,7 +52,6 @@ def GPLVM_oil_100(optimize=True): m.data_labels = data['Y'].argmax(axis=1) # optimize - m.ensure_default_constraints() if optimize: m.optimize('scg', messages=1) @@ -108,7 +106,6 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False m.data_colors = c m.data_t = t - m.ensure_default_constraints() m['rbf_lengthscale'] = 1. # X.var(0).max() / X.var(0) m['noise_variance'] = Y.var() / 100. m['bias_variance'] = 0.05 @@ -134,7 +131,6 @@ def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_f_eval=50, plot= m['.*lengt'] = 1. # m.X.var(0).max() / m.X.var(0) m['noise'] = Yn.var() / 100. - m.ensure_default_constraints() # optimize if optimize: @@ -159,7 +155,6 @@ def oil_100(): m = GPy.models.GPLVM(data['X'], 2) # optimize - m.ensure_default_constraints() m.optimize(messages=1, max_iters=2) # plot @@ -239,7 +234,6 @@ def bgplvm_simulation_matlab_compare(): # X=mu, # X_variance=S, _debug=False) - m.ensure_default_constraints() m.auto_scale_factor = True m['noise'] = Y.var() / 100. m['linear_variance'] = .01 @@ -262,7 +256,6 @@ def bgplvm_simulation(optimize='scg', k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q) m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k, _debug=True) # m.constrain('variance|noise', logexp_clipped()) - m.ensure_default_constraints() m['noise'] = Y.var() / 100. m['linear_variance'] = .01 @@ -291,7 +284,6 @@ def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw): for i, Y in enumerate(Ylist): m['{}_noise'.format(i + 1)] = Y.var() / 100. - m.ensure_default_constraints() # DEBUG # np.seterr("raise") @@ -319,7 +311,6 @@ def brendan_faces(): # optimize m.constrain('rbf|noise|white', GPy.core.transformations.logexp_clipped()) - m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=10000) ax = m.plot_latent(which_indices=(0, 1)) @@ -336,7 +327,6 @@ def stick(): m = GPy.models.GPLVM(data['Y'], 2) # optimize - m.ensure_default_constraints() m.optimize(messages=1, max_f_eval=10000) m._set_params(m._get_params()) @@ -359,7 +349,6 @@ def cmu_mocap(subject='35', motion=['01'], in_place=True): m = GPy.models.GPLVM(data['Y'], 2, normalize_Y=True) # optimize - m.ensure_default_constraints() m.optimize(messages=1, max_f_eval=10000) ax = m.plot_latent() @@ -391,7 +380,6 @@ def cmu_mocap(subject='35', motion=['01'], in_place=True): # m.set('iip', Z) # m.set('bias', 1e-4) # # optimize -# # m.ensure_default_constraints() # # import pdb; pdb.set_trace() # m.optimize('tnc', messages=1) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 726a9085..21b435e7 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -18,7 +18,6 @@ def toy_rbf_1d(optimizer='tnc', max_nb_eval_optim=100): m = GPy.models.GPRegression(data['X'],data['Y']) # optimize - m.ensure_default_constraints() m.optimize(optimizer, max_f_eval=max_nb_eval_optim) # plot m.plot() @@ -36,7 +35,6 @@ def rogers_girolami_olympics(optim_iters=100): m['rbf_lengthscale'] = 10 # optimize - m.ensure_default_constraints() m.optimize(max_f_eval=optim_iters) # plot @@ -52,7 +50,6 @@ def toy_rbf_1d_50(optim_iters=100): m = GPy.models.GPRegression(data['X'],data['Y']) # optimize - m.ensure_default_constraints() m.optimize(max_f_eval=optim_iters) # plot @@ -68,7 +65,6 @@ def silhouette(optim_iters=100): m = GPy.models.GPRegression(data['X'],data['Y']) # optimize - m.ensure_default_constraints() m.optimize(messages=True,max_f_eval=optim_iters) print(m) @@ -92,7 +88,6 @@ def coregionalisation_toy2(optim_iters=100): m = GPy.models.GPRegression(X,Y,kernel=k) m.constrain_fixed('.*rbf_var',1.) #m.constrain_positive('.*kappa') - m.ensure_default_constraints() m.optimize('sim',messages=1,max_f_eval=optim_iters) pb.figure() @@ -124,7 +119,6 @@ def coregionalisation_toy(optim_iters=100): m = GPy.models.GPRegression(X,Y,kernel=k) m.constrain_fixed('.*rbf_var',1.) #m.constrain_positive('kappa') - m.ensure_default_constraints() m.optimize(max_f_eval=optim_iters) pb.figure() @@ -162,7 +156,6 @@ def coregionalisation_sparse(optim_iters=100): m.constrain_fixed('.*rbf_var',1.) m.constrain_fixed('iip') m.constrain_bounded('noise_variance',1e-3,1e-1) - m.ensure_default_constraints() m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters) #plotting: @@ -189,11 +182,9 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000 log_SNRs = np.linspace(-3., 4., resolution) data = GPy.util.datasets.della_gatta_TRP63_gene_expression(gene_number) - # Sub sample the data to ensure multiple optima #data['Y'] = data['Y'][0::2, :] #data['X'] = data['X'][0::2, :] - # Remove the mean (no bias kernel to ensure signal/noise is in RBF/white) data['Y'] = data['Y'] - np.mean(data['Y']) lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.rbf) @@ -220,7 +211,6 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000 optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']); # optimize - m.ensure_default_constraints() m.optimize('scg', xtol=1e-6, ftol=1e-6, max_f_eval=optim_iters) optim_point_x[1] = m['rbf_lengthscale'] @@ -273,7 +263,6 @@ def sparse_GP_regression_1D(N = 400, num_inducing = 5, optim_iters=100): # create simple GP Model m = GPy.models.SparseGPRegression(X, Y, kernel, num_inducing=num_inducing) - m.ensure_default_constraints() m.checkgrad(verbose=1) m.optimize('tnc', messages = 1, max_f_eval=optim_iters) @@ -294,7 +283,6 @@ def sparse_GP_regression_2D(N = 400, num_inducing = 50, optim_iters=100): m = GPy.models.SparseGPRegression(X,Y,kernel, num_inducing = num_inducing) # contrain all parameters to be positive (but not inducing inputs) - m.ensure_default_constraints() m.set('.*len',2.) m.checkgrad() @@ -320,7 +308,6 @@ def uncertain_inputs_sparse_regression(optim_iters=100): # create simple GP Model - no input uncertainty on this one m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z) - m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=optim_iters) m.plot(ax=axes[0]) axes[0].set_title('no input uncertainty') @@ -328,7 +315,6 @@ def uncertain_inputs_sparse_regression(optim_iters=100): #the same Model with uncertainty m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S) - m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=optim_iters) m.plot(ax=axes[1]) axes[1].set_title('with input uncertainty') diff --git a/GPy/examples/stochastic.py b/GPy/examples/stochastic.py index 45418d5c..533904d5 100644 --- a/GPy/examples/stochastic.py +++ b/GPy/examples/stochastic.py @@ -15,7 +15,6 @@ def toy_1d(): Y = np.sin(X) + np.cos(0.3*X) + np.random.randn(*X.shape)/np.sqrt(50.) m = GPy.models.SVIGPRegression(X,Y, batchsize=10, Z=Z) - m.ensure_default_constraints() m.constrain_bounded('noise_variance',1e-3,1e-1) m.param_steplength = 1e-4 diff --git a/GPy/examples/tutorials.py b/GPy/examples/tutorials.py index 6950af37..69fc2aaf 100644 --- a/GPy/examples/tutorials.py +++ b/GPy/examples/tutorials.py @@ -24,7 +24,6 @@ def tuto_GP_regression(): print m m.plot() - m.ensure_default_constraints() m.constrain_positive('') m.unconstrain('') # may be used to remove the previous constrains @@ -135,7 +134,6 @@ def tuto_kernel_overview(): pb.ylabel("+ ",rotation='horizontal',fontsize='30') m.plot(ax=axs, which_parts=[False,False,False,True]) - m.ensure_default_constraints() return(m) @@ -144,6 +142,5 @@ def model_interaction(): Y = np.sin(X) + np.random.randn(*X.shape)*0.01 + 5. k = GPy.kern.rbf(1) + GPy.kern.bias(1) m = GPy.models.GPRegression(X, Y, kernel=k) - m.ensure_default_constraints() return m diff --git a/GPy/kern/rbf.py b/GPy/kern/rbf.py index 5686d7bd..03b37b01 100644 --- a/GPy/kern/rbf.py +++ b/GPy/kern/rbf.py @@ -241,9 +241,9 @@ class rbf(Kernpart): # here are the "statistics" for psi1 and psi2 if not np.array_equal(Z, self._Z): #Z has changed, compute Z specific stuff - self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # num_inducing,num_inducing,input_dim - self._psi2_Zdist = 0.5*(Z[:,None,:]-Z[None,:,:]) # num_inducing,num_inducing,input_dim - self._psi2_Zdist_sq = np.square(self._psi2_Zdist/self.lengthscale) # num_inducing,num_inducing,input_dim + self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # M,M,Q + self._psi2_Zdist = 0.5*(Z[:,None,:]-Z[None,:,:]) # M,M,Q + self._psi2_Zdist_sq = np.square(self._psi2_Zdist/self.lengthscale) # M,M,Q self._Z = Z if not (np.array_equal(Z, self._Z) and np.array_equal(mu, self._mu) and np.array_equal(S, self._S)): @@ -257,12 +257,12 @@ class rbf(Kernpart): self._psi1 = self.variance*np.exp(self._psi1_exponent) #psi2 - self._psi2_denom = 2.*S[:,None,None,:]/self.lengthscale2+1. # N,num_inducing,num_inducing,input_dim + self._psi2_denom = 2.*S[:,None,None,:]/self.lengthscale2+1. # N,M,M,Q self._psi2_mudist, self._psi2_mudist_sq, self._psi2_exponent, _ = self.weave_psi2(mu,self._psi2_Zhat) - #self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,num_inducing,num_inducing,input_dim + #self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,M,M,Q #self._psi2_mudist_sq = np.square(self._psi2_mudist)/(self.lengthscale2*self._psi2_denom) - #self._psi2_exponent = np.sum(-self._psi2_Zdist_sq -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,num_inducing,num_inducing - self._psi2 = np.square(self.variance)*np.exp(self._psi2_exponent) # N,num_inducing,num_inducing + #self._psi2_exponent = np.sum(-self._psi2_Zdist_sq -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,M,M,Q + self._psi2 = np.square(self.variance)*np.exp(self._psi2_exponent) # N,M,M,Q #store matrices for caching self._Z, self._mu, self._S = Z, mu,S diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 8043c635..c401f788 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -60,7 +60,7 @@ class BayesianGPLVM(SparseGP, GPLVM): self._savedABCD = [] SparseGP.__init__(self, X, likelihood, kernel, Z=Z, X_variance=X_variance, **kwargs) - self._set_params(self._get_params()) + self.ensure_default_constraints() @property def oldps(self): diff --git a/GPy/models/fitc_classification.py b/GPy/models/fitc_classification.py index 65178c8c..f4cf4e8d 100644 --- a/GPy/models/fitc_classification.py +++ b/GPy/models/fitc_classification.py @@ -44,4 +44,4 @@ class FITCClassification(FITC): assert Z.shape[1]==X.shape[1] FITC.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/gp_classification.py b/GPy/models/gp_classification.py index 376f0005..c6012988 100644 --- a/GPy/models/gp_classification.py +++ b/GPy/models/gp_classification.py @@ -38,4 +38,4 @@ class GPClassification(GP): raise Warning, 'likelihood.data and Y are different.' GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/gp_regression.py b/GPy/models/gp_regression.py index 8d0b02e0..db5d21b2 100644 --- a/GPy/models/gp_regression.py +++ b/GPy/models/gp_regression.py @@ -32,4 +32,4 @@ class GPRegression(GP): likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y) GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index e602a59a..44a9d2ce 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -33,7 +33,7 @@ class GPLVM(GP): kernel = kern.rbf(input_dim, ARD=input_dim>1) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2)) likelihood = Gaussian(Y, normalize=normalize_Y) GP.__init__(self, X, likelihood, kernel, normalize_X=False) - self._set_params(self._get_params()) + self.ensure_default_constraints() def initialise_latent(self, init, input_dim, Y): if init == 'PCA': diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 75c6fee9..1d521e5d 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -79,7 +79,7 @@ class MRD(Model): self.MQ = self.num_inducing * self.input_dim Model.__init__(self) - self._set_params(self._get_params()) + self.ensure_default_constraints() @property def X(self): diff --git a/GPy/models/sparse_gp_classification.py b/GPy/models/sparse_gp_classification.py index 9027ef07..9228fb89 100644 --- a/GPy/models/sparse_gp_classification.py +++ b/GPy/models/sparse_gp_classification.py @@ -44,4 +44,4 @@ class SparseGPClassification(SparseGP): assert Z.shape[1]==X.shape[1] SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/sparse_gp_regression.py b/GPy/models/sparse_gp_regression.py index 432d6e18..0dcef3e0 100644 --- a/GPy/models/sparse_gp_regression.py +++ b/GPy/models/sparse_gp_regression.py @@ -42,4 +42,4 @@ class SparseGPRegression(SparseGP): likelihood = likelihoods.Gaussian(Y, normalize=normalize_Y) SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X, X_variance=X_variance) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/sparse_gplvm.py b/GPy/models/sparse_gplvm.py index ea2f8013..d6f4adb9 100644 --- a/GPy/models/sparse_gplvm.py +++ b/GPy/models/sparse_gplvm.py @@ -26,6 +26,7 @@ class SparseGPLVM(SparseGPRegression, GPLVM): def __init__(self, Y, input_dim, kernel=None, init='PCA', num_inducing=10): X = self.initialise_latent(init, input_dim, Y) SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing) + self.ensure_default_constraints() def _get_param_names(self): return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], []) diff --git a/GPy/testing/bgplvm_tests.py b/GPy/testing/bgplvm_tests.py index ff558f6d..6b91d999 100644 --- a/GPy/testing/bgplvm_tests.py +++ b/GPy/testing/bgplvm_tests.py @@ -16,7 +16,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -29,7 +28,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -42,7 +40,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -55,7 +52,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -69,7 +65,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/gplvm_tests.py b/GPy/testing/gplvm_tests.py index 8c2ba9fc..ebb5c4e5 100644 --- a/GPy/testing/gplvm_tests.py +++ b/GPy/testing/gplvm_tests.py @@ -14,7 +14,6 @@ class GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = GPy.models.GPLVM(Y, input_dim, kernel = k) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -26,7 +25,6 @@ class GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001) m = GPy.models.GPLVM(Y, input_dim, kernel = k) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -38,7 +36,6 @@ class GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) m = GPy.models.GPLVM(Y, input_dim, kernel = k) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/mrd_tests.py b/GPy/testing/mrd_tests.py index b0137709..40fcb86a 100644 --- a/GPy/testing/mrd_tests.py +++ b/GPy/testing/mrd_tests.py @@ -24,7 +24,6 @@ class MRDTests(unittest.TestCase): likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist] m = GPy.models.MRD(likelihood_list, input_dim=input_dim, kernels=k, num_inducing=num_inducing) - m.ensure_default_constraints() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/prior_tests.py b/GPy/testing/prior_tests.py index e0226751..c16057db 100644 --- a/GPy/testing/prior_tests.py +++ b/GPy/testing/prior_tests.py @@ -14,7 +14,6 @@ class PriorTests(unittest.TestCase): y += 0.05*np.random.randn(len(X)) X, y = X[:, None], y[:, None] m = GPy.models.GPRegression(X, y) - m.ensure_default_constraints() lognormal = GPy.priors.LogGaussian(1, 2) m.set_prior('rbf', lognormal) m.randomize() @@ -28,7 +27,6 @@ class PriorTests(unittest.TestCase): y += 0.05*np.random.randn(len(X)) X, y = X[:, None], y[:, None] m = GPy.models.GPRegression(X, y) - m.ensure_default_constraints() Gamma = GPy.priors.Gamma(1, 1) m.set_prior('rbf', Gamma) m.randomize() @@ -42,7 +40,6 @@ class PriorTests(unittest.TestCase): y += 0.05*np.random.randn(len(X)) X, y = X[:, None], y[:, None] m = GPy.models.GPRegression(X, y) - m.ensure_default_constraints() gaussian = GPy.priors.Gaussian(1, 1) success = False diff --git a/GPy/testing/psi_stat_gradient_tests.py b/GPy/testing/psi_stat_gradient_tests.py index c110d270..de670f41 100644 --- a/GPy/testing/psi_stat_gradient_tests.py +++ b/GPy/testing/psi_stat_gradient_tests.py @@ -113,7 +113,6 @@ if __name__ == "__main__": # Y -= Y.mean(axis=0) # k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) # m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) -# m.ensure_default_constraints() # m.randomize() # # self.assertTrue(m.checkgrad()) numpy.random.seed(0) @@ -146,7 +145,6 @@ if __name__ == "__main__": # num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim)) m3 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z, num_inducing=num_inducing, kernel=GPy.kern.linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim))) - m3.ensure_default_constraints() # + GPy.kern.bias(input_dim)) # m4 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z, # num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim)) diff --git a/GPy/testing/sparse_gplvm_tests.py b/GPy/testing/sparse_gplvm_tests.py index 6145f350..e27fccff 100644 --- a/GPy/testing/sparse_gplvm_tests.py +++ b/GPy/testing/sparse_gplvm_tests.py @@ -15,7 +15,6 @@ class sparse_GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -27,7 +26,6 @@ class sparse_GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001) m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -39,7 +37,6 @@ class sparse_GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/unit_tests.py b/GPy/testing/unit_tests.py index 494ebf19..6e504a69 100644 --- a/GPy/testing/unit_tests.py +++ b/GPy/testing/unit_tests.py @@ -37,7 +37,6 @@ class GradientTests(unittest.TestCase): noise = GPy.kern.white(dimension) kern = kern + noise m = model_fit(X, Y, kernel=kern) - m.ensure_default_constraints() m.randomize() # contrain all parameters to be positive self.assertTrue(m.checkgrad()) @@ -150,7 +149,6 @@ class GradientTests(unittest.TestCase): K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T m = GPy.models.GPLVM(Y, input_dim, kernel=k) - m.ensure_default_constraints() self.assertTrue(m.checkgrad()) def test_GPLVM_rbf_linear_white_kern_2D(self): @@ -161,7 +159,6 @@ class GradientTests(unittest.TestCase): K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T m = GPy.models.GPLVM(Y, input_dim, init='PCA', kernel=k) - m.ensure_default_constraints() self.assertTrue(m.checkgrad()) def test_GP_EP_probit(self): @@ -195,7 +192,6 @@ class GradientTests(unittest.TestCase): k = GPy.kern.rbf(1) + GPy.kern.white(1) Y = np.hstack([np.ones(N/2),np.zeros(N/2)])[:,None] m = GPy.models.FITCClassification(X, Y=Y) - m.ensure_default_constraints() m.update_likelihood_approximation() self.assertTrue(m.checkgrad())