diff --git a/GPy/inference/SGD.py b/GPy/inference/SGD.py index 701a8c65..7cb7566f 100644 --- a/GPy/inference/SGD.py +++ b/GPy/inference/SGD.py @@ -18,7 +18,7 @@ class opt_SGD(Optimizer): """ - def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, **kwargs): + def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, actual_iter=None, schedule=None, **kwargs): self.opt_name = "Stochastic Gradient Descent" self.model = model @@ -34,12 +34,14 @@ class opt_SGD(Optimizer): self.param_traces = [('noise',[])] self.iteration_file = iteration_file self.learning_rate_adaptation = learning_rate_adaptation + self.actual_iter = actual_iter if self.learning_rate_adaptation != None: if self.learning_rate_adaptation == 'annealing': self.learning_rate_0 = self.learning_rate else: self.learning_rate_0 = self.learning_rate.mean() + self.schedule = schedule # if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1: # self.param_traces.append(('bias',[])) # if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1: @@ -224,48 +226,36 @@ class opt_SGD(Optimizer): return f, step, self.model.N - def adapt_learning_rate(self, t): + def adapt_learning_rate(self, t, D): if self.learning_rate_adaptation == 'adagrad': - if t > 5: - g = np.array(self.grads) - l2_g = np.sqrt(np.square(g).sum(0)) - self.learning_rate = 0.001/l2_g + if t > 0: + g_k = self.model.grads + self.s_k += np.square(g_k) + t0 = 100.0 + self.learning_rate = 0.1/(t0 + np.sqrt(self.s_k)) + + import pdb; pdb.set_trace() else: self.learning_rate = np.zeros_like(self.learning_rate) + self.s_k = np.zeros_like(self.x_opt) + elif self.learning_rate_adaptation == 'annealing': - self.learning_rate = self.learning_rate_0/(1+float(t+1)/10) + #self.learning_rate = self.learning_rate_0/(1+float(t+1)/10) + self.learning_rate = np.ones_like(self.learning_rate) * self.schedule[t] + + elif self.learning_rate_adaptation == 'semi_pesky': if self.model.__class__.__name__ == 'Bayesian_GPLVM': + g_t = self.model.grads if t == 0: - N = self.model.N - Q = self.model.Q - M = self.model.M + self.hbar_t = 0.0 + self.tau_t = 100.0 + self.gbar_t = 0.0 - iip_pos = np.arange(2*N*Q,2*N*Q+M*Q) - mu_pos = np.arange(0,N*Q) - S_pos = np.arange(N*Q,2*N*Q) - self.vbparam_dict = {'iip': [iip_pos], - 'mu': [mu_pos], - 'S': [S_pos]} - - for k in self.vbparam_dict.keys(): - hbar_t = 0.0 - tau_t = 1000.0 - gbar_t = 0.0 - self.vbparam_dict[k].append(hbar_t) - self.vbparam_dict[k].append(tau_t) - self.vbparam_dict[k].append(gbar_t) - - g_t = self.model.grads - - for k in self.vbparam_dict.keys(): - pos, hbar_t, tau_t, gbar_t = self.vbparam_dict[k] - - gbar_t = (1-1/tau_t)*gbar_t + 1/tau_t * g_t[pos] - hbar_t = (1-1/tau_t)*hbar_t + 1/tau_t * np.dot(g_t[pos].T, g_t[pos]) - self.learning_rate[pos] = np.dot(gbar_t.T, gbar_t) / hbar_t - tau_t = tau_t*(1-self.learning_rate[pos]) + 1 - self.vbparam_dict[k] = [pos, hbar_t, tau_t, gbar_t] + self.gbar_t = (1-1/self.tau_t)*self.gbar_t + 1/self.tau_t * g_t + self.hbar_t = (1-1/self.tau_t)*self.hbar_t + 1/self.tau_t * np.dot(g_t.T, g_t) + self.learning_rate = np.ones_like(self.learning_rate)*(np.dot(self.gbar_t.T, self.gbar_t) / self.hbar_t) + tau_t = self.tau_t*(1-self.learning_rate) + 1 def opt(self, f_fp=None, f=None, fp=None): @@ -274,8 +264,8 @@ class opt_SGD(Optimizer): X, Y = self.model.X.copy(), self.model.likelihood.Y.copy() - self.model.likelihood.YYT = None - self.model.likelihood.trYYT = None + self.model.likelihood.YYT = 0 + self.model.likelihood.trYYT = 0 self.model.likelihood._bias = 0.0 self.model.likelihood._scale = 1.0 @@ -287,6 +277,9 @@ class opt_SGD(Optimizer): step = np.zeros_like(num_params) for it in range(self.iterations): + if self.actual_iter != None: + it = self.actual_iter + self.model.grads = np.zeros_like(self.x_opt) # TODO this is ugly if it == 0 or self.self_paced is False: @@ -316,6 +309,7 @@ class opt_SGD(Optimizer): self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT) Nj = N f, fp = f_fp(self.x_opt) + self.model.grads = fp.copy() step = self.momentum * step + self.learning_rate * fp self.x_opt -= step @@ -326,6 +320,7 @@ class opt_SGD(Optimizer): sys.stdout.flush() self.param_traces['noise'].append(noise) + self.adapt_learning_rate(it+count, D) NLL.append(f) self.fopt_trace.append(NLL[-1]) # fig = plt.figure('traces') @@ -335,7 +330,6 @@ class opt_SGD(Optimizer): # for k in self.param_traces.keys(): # self.param_traces[k].append(self.model.get(k)[0]) self.grads.append(self.model.grads.tolist()) - self.adapt_learning_rate(it) # should really be a sum(), but earlier samples in the iteration will have a very crappy ll self.f_opt = np.mean(NLL) self.model.N = N diff --git a/GPy/models/Bayesian_GPLVM.py b/GPy/models/Bayesian_GPLVM.py index 464d7425..05e9e255 100644 --- a/GPy/models/Bayesian_GPLVM.py +++ b/GPy/models/Bayesian_GPLVM.py @@ -13,24 +13,30 @@ from numpy.linalg.linalg import LinAlgError import itertools from matplotlib.colors import colorConverter from matplotlib.figure import SubplotParams +from GPy.inference.optimization import SCG class Bayesian_GPLVM(sparse_GP, GPLVM): """ Bayesian Gaussian Process Latent Variable Model - :param Y: observed data - :type Y: np.ndarray + :param Y: observed data (np.ndarray) or GPy.likelihood + :type Y: np.ndarray| GPy.likelihood instance :param Q: latent dimensionality :type Q: int :param init: initialisation method for the latent space :type init: 'PCA'|'random' """ - def __init__(self, Y, Q, X=None, X_variance=None, init='PCA', M=10, + def __init__(self, likelihood_or_Y, Q, X=None, X_variance=None, init='PCA', M=10, Z=None, kernel=None, oldpsave=10, _debug=False, **kwargs): + if type(likelihood_or_Y) is np.ndarray: + likelihood = Gaussian(likelihood_or_Y) + else: + likelihood = likelihood_or_Y + if X == None: - X = self.initialise_latent(init, Q, Y) + X = self.initialise_latent(init, Q, likelihood.Y) if X_variance is None: X_variance = np.clip((np.ones_like(X) * 0.5) + .01 * np.random.randn(*X.shape), 0.001, 1) @@ -56,7 +62,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM): self._savedpsiKmm = [] self._savedABCD = [] - sparse_GP.__init__(self, X, Gaussian(Y), kernel, Z=Z, X_variance=X_variance, **kwargs) + sparse_GP.__init__(self, X, likelihood, kernel, Z=Z, X_variance=X_variance, **kwargs) @property def oldps(self): @@ -184,16 +190,46 @@ class Bayesian_GPLVM(sparse_GP, GPLVM): ax.plot(self.Z[:, input_1], self.Z[:, input_2], '^w') return ax + def do_test_latents(self, Y): + """ + Compute the latent representation for a set of new points Y + + Notes: + This will only work with a univariate Gaussian likelihood (for now) + """ + assert not self.likelihood.is_heteroscedastic + N_test = Y.shape[0] + Q = self.Z.shape[1] + means = np.zeros((N_test,Q)) + covars = np.zeros((N_test,Q)) + + dpsi0 = - 0.5 * self.D * self.likelihood.precision + dpsi2 = self.dL_dpsi2[0][None,:,:] # TODO: this may change if we ignore het. likelihoods + V = self.likelihood.precision*Y + dpsi1 = np.dot(self.Cpsi1V,V.T) + + start = np.zeros(self.Q*2) + + for n,dpsi1_n in enumerate(dpsi1.T[:,:,None]): + args = (self.kern,self.Z,dpsi0,dpsi1_n,dpsi2) + xopt,fopt,neval,status = SCG(f=latent_cost, gradf=latent_grad, x=start, optargs=args, display = False) + + mu,log_S = xopt.reshape(2,1,-1) + means[n] = mu[0].copy() + covars[n] = np.exp(log_S[0]).copy() + + return means, covars + + def plot_X_1d(self, fig=None, axes=None, fig_num="LVM mu S 1d", colors=None): """ Plot latent space X in 1D: - + -if fig is given, create Q subplots in fig and plot in these -if axes is given plot Q 1D latent space plots of X into each `axis` -if neither fig nor axes is given create a figure with fig_num and plot in there - + colors: - colors of different latent space dimensions Q """ import pylab @@ -483,3 +519,63 @@ class Bayesian_GPLVM(sparse_GP, GPLVM): cidd = figs[0].canvas.mpl_connect('motion_notify_event', motion) return ax1, ax2, ax3, ax4, ax5 # , ax6, ax7 + + + + +def latent_cost_and_grad(mu_S, kern,Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): + """ + objective function for fitting the latent variables for test points + (negative log-likelihood: should be minimised!) + """ + mu,log_S = mu_S.reshape(2,1,-1) + S = np.exp(log_S) + + psi0 = kern.psi0(Z,mu,S) + psi1 = kern.psi1(Z,mu,S) + psi2 = kern.psi2(Z,mu,S) + + lik = dL_dpsi0*psi0 + np.dot(dL_dpsi1.flatten(),psi1.flatten()) + np.dot(dL_dpsi2.flatten(),psi2.flatten()) - 0.5*np.sum(np.square(mu) + S) + 0.5*np.sum(log_S) + + mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0,Z,mu,S) + mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1,Z,mu,S) + mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2,Z,mu,S) + + dmu = mu0 + mu1 + mu2 - mu + #dS = S0 + S1 + S2 -0.5 + .5/S + dlnS = S*(S0 + S1 + S2 -0.5) + .5 + return -lik,-np.hstack((dmu.flatten(),dlnS.flatten())) + +def latent_cost(mu_S, kern,Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): + """ + objective function for fitting the latent variables (negative log-likelihood: should be minimised!) + This is the same as latent_cost_and_grad but only for the objective + """ + mu,log_S = mu_S.reshape(2,1,-1) + S = np.exp(log_S) + + psi0 = kern.psi0(Z,mu,S) + psi1 = kern.psi1(Z,mu,S) + psi2 = kern.psi2(Z,mu,S) + + lik = dL_dpsi0*psi0 + np.dot(dL_dpsi1.flatten(),psi1.flatten()) + np.dot(dL_dpsi2.flatten(),psi2.flatten()) - 0.5*np.sum(np.square(mu) + S) + 0.5*np.sum(log_S) + return -float(lik) + +def latent_grad(mu_S, kern,Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): + """ + This is the same as latent_cost_and_grad but only for the grad + """ + mu,log_S = mu_S.reshape(2,1,-1) + S = np.exp(log_S) + + mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0,Z,mu,S) + mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1,Z,mu,S) + mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2,Z,mu,S) + + dmu = mu0 + mu1 + mu2 - mu + #dS = S0 + S1 + S2 -0.5 + .5/S + dlnS = S*(S0 + S1 + S2 -0.5) + .5 + + return -np.hstack((dmu.flatten(),dlnS.flatten())) + + diff --git a/GPy/models/GP.py b/GPy/models/GP.py index b518ce4e..8f7ef9d3 100644 --- a/GPy/models/GP.py +++ b/GPy/models/GP.py @@ -173,7 +173,7 @@ class GP(model): """ # normalize X values Xnew = (Xnew.copy() - self._Xmean) / self._Xstd - mu, var = self._raw_predict(Xnew, which_parts, full_cov) + mu, var = self._raw_predict(Xnew, which_parts=which_parts, full_cov=full_cov) # now push through likelihood mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index e79b6720..74f526d0 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -15,11 +15,11 @@ import pylab class MRD(model): """ Do MRD on given Datasets in Ylist. - All Ys in Ylist are in [N x Dn], where Dn can be different per Yn, + All Ys in likelihood_list are in [N x Dn], where Dn can be different per Yn, N must be shared across datasets though. - :param Ylist...: observed datasets - :type Ylist: [np.ndarray] + :param likelihood_list...: likelihoods of observed datasets + :type likelihood_list: [GPy.likelihood] :param names: names for different gplvm models :type names: [str] :param Q: latent dimensionality (will raise @@ -41,8 +41,41 @@ class MRD(model): :param kernel: kernel to use """ + def __init__(self,likelihood_list,Q,M=10,names=None,kernels=None,initX='PCA',initz='permute',_debug=False, **kwargs): + if names is None: + self.names = ["{}".format(i + 1) for i in range(len(likelihood_list))] - def __init__(self, *Ylist, **kwargs): + #sort out the kernels + if kernels is None: + kernels = [None]*len(likelihood_list) + elif isinstance(kernels,kern.kern): + kernels = [kernels.copy() for i in range(len(likelihood_list))] + else: + assert len(kernels)==len(likelihood_list), "need one kernel per output" + assert all([isinstance(k, kern.kern) for k in kernels]), "invalid kernel object detected!" + + self.Q = Q + self.M = M + self.N = self.gref.N + self.NQ = self.N * self.Q + self.MQ = self.M * self.Q + + self._init = True + X = self._init_X(initx, likelihood_list) + Z = self._init_Z(initz, X) + self.bgplvms = [Bayesian_GPLVM(l, k, X=X, Z=Z, M=self.M, **kwargs) for l,k in zip(likelihood_list,kernels)] + + del self._init + + self.gref = self.bgplvms[0] + nparams = numpy.array([0] + [sparse_GP._get_params(g).size - g.Z.size for g in self.bgplvms]) + self.nparams = nparams.cumsum() + + model.__init__(self) # @UndefinedVariable + + + + def __init__(self, *likelihood_list, **kwargs): if kwargs.has_key("_debug"): self._debug = kwargs['_debug'] del kwargs['_debug'] @@ -52,7 +85,7 @@ class MRD(model): self.names = kwargs['names'] del kwargs['names'] else: - self.names = ["{}".format(i + 1) for i in range(len(Ylist))] + self.names = ["{}".format(i + 1) for i in range(len(likelihood_list))] if kwargs.has_key('kernel'): kernel = kwargs['kernel'] k = lambda: kernel.copy() @@ -80,9 +113,10 @@ class MRD(model): self.M = 10 self._init = True - X = self._init_X(initx, Ylist) + X = self._init_X(initx, likelihood_list) Z = self._init_Z(initz, X) - self.bgplvms = [Bayesian_GPLVM(Y, kernel=k(), X=X, Z=Z, M=self.M, **kwargs) for Y in Ylist] + self.bgplvms = [Bayesian_GPLVM(Y, kernel=k(), X=X, Z=Z, M=self.M, **kwargs) for Y in likelihood_list] + del self._init self.gref = self.bgplvms[0] @@ -126,11 +160,11 @@ class MRD(model): if not self._init: raise AttributeError("bgplvm list not initialized") @property - def Ylist(self): + def likelihood_list(self): return [g.likelihood.Y for g in self.bgplvms] - @Ylist.setter - def Ylist(self, Ylist): - for g, Y in itertools.izip(self.bgplvms, Ylist): + @likelihood_list.setter + def likelihood_list(self, likelihood_list): + for g, Y in itertools.izip(self.bgplvms, likelihood_list): g.likelihood.Y = Y @property @@ -152,7 +186,7 @@ class MRD(model): def randomize(self, initx='concat', initz='permute', *args, **kw): super(MRD, self).randomize(*args, **kw) - self._init_X(initx, self.Ylist) + self._init_X(initx, self.likelihood_list) self._init_Z(initz, self.X) def _get_param_names(self): @@ -225,6 +259,10 @@ class MRD(model): # g._computations() + def update_likelihood_approximation(self):#TODO: object oriented vs script base + for bgplvm in self.bgplvms: + bgplvm.update_likelihood_approximation() + def log_likelihood(self): ll = -self.gref.KL_divergence() for g in self.bgplvms: @@ -246,17 +284,18 @@ class MRD(model): partial=g.partial_for_likelihood)]) \ for g in self.bgplvms]))) - def _init_X(self, init='PCA', Ylist=None): - if Ylist is None: - Ylist = self.Ylist + def _init_X(self, init='PCA', likelihood_list=None): + if likelihood_list is None: + likelihood_list = self.likelihood_list if init in "PCA_single": - X = numpy.zeros((Ylist[0].shape[0], self.Q)) - for qs, Y in itertools.izip(numpy.array_split(numpy.arange(self.Q), len(Ylist)), Ylist): - X[:, qs] = PCA(Y, len(qs))[0] + X = numpy.zeros((likelihood_list[0].Y.shape[0], self.Q)) + for qs, Y in itertools.izip(numpy.array_split(numpy.arange(self.Q), len(likelihood_list)), likelihood_list): + X[:, qs] = PCA(Y.Y, len(qs))[0] elif init in "PCA_concat": - X = PCA(numpy.hstack(Ylist), self.Q)[0] + X = PCA(numpy.hstack([l.Y for l in likelihood_list]), self.Q)[0] + #X = PCA(numpy.hstack(likelihood_list), self.Q)[0] else: # init == 'random': - X = numpy.random.randn(Ylist[0].shape[0], self.Q) + X = numpy.random.randn(likelihood_list[0].Y.shape[0], self.Q) self.X = X return X @@ -294,8 +333,8 @@ class MRD(model): fig = self._handle_plotting(fig_num, axes, lambda i, g, ax: ax.imshow(g.X)) return fig - def plot_predict(self, fig_num="MRD Predictions", axes=None): - fig = self._handle_plotting(fig_num, axes, lambda i, g, ax: ax.imshow(g.predict(g.X)[0])) + def plot_predict(self, fig_num="MRD Predictions", axes=None, **kwargs): + fig = self._handle_plotting(fig_num, axes, lambda i, g, ax: ax.imshow(g.predict(g.X)[0],**kwargs)) return fig def plot_scales(self, fig_num="MRD Scales", axes=None, *args, **kwargs): diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py index 259db8f2..fe9d9dc4 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/models/sparse_GP.py @@ -8,6 +8,7 @@ from ..util.plot import gpplot from .. import kern from GP import GP from scipy import linalg +from ..likelihoods import Gaussian class sparse_GP(GP): """ @@ -172,19 +173,19 @@ class sparse_GP(GP): For a Gaussian likelihood, no iteration is required: this function does nothing """ - if self.has_uncertain_inputs: - - Lmi = chol_inv(self.Lm) - Kmmi = tdot(Lmi.T) - diag_tr_psi2Kmmi = np.array([np.trace(psi2_Kmmi) for psi2_Kmmi in np.dot(self.psi2,Kmmi)]) - - self.likelihood.fit_FITC(self.Kmm,self.psi1,diag_tr_psi2Kmmi) #This uses the fit_FITC code, but does not perfomr a FITC-EP.#TODO solve potential confusion - #raise NotImplementedError, "EP approximation not implemented for uncertain inputs" - else: - self.likelihood.fit_DTC(self.Kmm, self.psi1) - # self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0) - self._set_params(self._get_params()) # update the GP + if not isinstance(self.likelihood,Gaussian): #Updates not needed for Gaussian likelihood + self.likelihood.restart() #TODO check consistency with pseudo_EP + if self.has_uncertain_inputs: + Lmi = chol_inv(self.Lm) + Kmmi = tdot(Lmi.T) + diag_tr_psi2Kmmi = np.array([np.trace(psi2_Kmmi) for psi2_Kmmi in np.dot(self.psi2,Kmmi)]) + self.likelihood.fit_FITC(self.Kmm,self.psi1,diag_tr_psi2Kmmi) #This uses the fit_FITC code, but does not perfomr a FITC-EP.#TODO solve potential confusion + #raise NotImplementedError, "EP approximation not implemented for uncertain inputs" + else: + self.likelihood.fit_DTC(self.Kmm, self.psi1) + # self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0) + self._set_params(self._get_params()) # update the GP def _log_likelihood_gradients(self): return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood))) @@ -216,20 +217,33 @@ class sparse_GP(GP): dL_dZ += self.kern.dK_dX(self.dL_dpsi1, self.Z, self.X) return dL_dZ - def _raw_predict(self, Xnew, which_parts='all', full_cov=False): + def _raw_predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=False): """Internal helper function for making predictions, does not account for normalization""" Bi, _ = linalg.lapack.flapack.dpotri(self.LB, lower=0) # WTH? this lower switch should be 1, but that doesn't work! symmetrify(Bi) Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.M) - Bi) - Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts) - mu = np.dot(Kx.T, self.Cpsi1V) # / self.scale_factor) - if full_cov: - Kxx = self.kern.K(Xnew, which_parts=which_parts) - var = Kxx - mdot(Kx.T, Kmmi_LmiBLmi, Kx) # NOTE this won't work for plotting + if X_variance_new is None: + Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts) + mu = np.dot(Kx.T, self.Cpsi1V) + if full_cov: + Kxx = self.kern.K(Xnew, which_parts=which_parts) + var = Kxx - mdot(Kx.T, Kmmi_LmiBLmi, Kx) # NOTE this won't work for plotting + else: + Kxx = self.kern.Kdiag(Xnew, which_parts=which_parts) + var = Kxx - np.sum(Kx * np.dot(Kmmi_LmiBLmi, Kx), 0) else: - Kxx = self.kern.Kdiag(Xnew, which_parts=which_parts) - var = Kxx - np.sum(Kx * np.dot(Kmmi_LmiBLmi, Kx), 0) + # assert which_parts=='all', "swithching out parts of variational kernels is not implemented" + Kx = self.kern.psi1(self.Z, Xnew, X_variance_new)#, which_parts=which_parts) TODO: which_parts + mu = np.dot(Kx, self.Cpsi1V) + if full_cov: + raise NotImplementedError, "TODO" + else: + Kxx = self.kern.psi0(self.Z,Xnew,X_variance_new) + psi2 = self.kern.psi2(self.Z,Xnew,X_variance_new) + var = Kxx - np.sum(np.sum(psi2*Kmmi_LmiBLmi[None,:,:],1),1) return mu, var[:, None] + + diff --git a/GPy/testing/mrd_tests.py b/GPy/testing/mrd_tests.py index a00ebcb9..17e43a7e 100644 --- a/GPy/testing/mrd_tests.py +++ b/GPy/testing/mrd_tests.py @@ -21,8 +21,9 @@ class MRDTests(unittest.TestCase): K = k.K(X) Ylist = [np.random.multivariate_normal(np.zeros(N), K, D).T for _ in range(num_m)] + likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist] - m = GPy.models.MRD(*Ylist, Q=Q, kernel=k, M=M) + m = GPy.models.MRD(*likelihood_list, Q=Q, kernel=k, M=M) m.ensure_default_constraints() self.assertTrue(m.checkgrad()) diff --git a/GPy/util/visualize.py b/GPy/util/visualize.py index 4ecf6c4f..2b515e91 100644 --- a/GPy/util/visualize.py +++ b/GPy/util/visualize.py @@ -86,7 +86,6 @@ class lvm(data_show): def modify(self, vals): """When latent values are modified update the latent representation and ulso update the output visualization.""" y = self.model.predict(vals)[0] - print y self.data_visualize.modify(y) self.latent_handle.set_data(vals[self.latent_index[0]], vals[self.latent_index[1]]) self.axes.figure.canvas.draw()