diff --git a/GPy/core/fitc.py b/GPy/core/fitc.py index b8118485..604db5e8 100644 --- a/GPy/core/fitc.py +++ b/GPy/core/fitc.py @@ -7,14 +7,8 @@ from ..util.linalg import mdot, jitchol, chol_inv, tdot, symmetrify,pdinv from ..util.plot import gpplot from .. import kern from scipy import stats, linalg -from gp_base import GPBase from sparse_gp import SparseGP -def backsub_both_sides(L,X): - """ Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky""" - tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1) - return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T - class FITC(SparseGP): """ sparse FITC approximation @@ -27,48 +21,13 @@ class FITC(SparseGP): :type kernel: a GPy.kern.kern instance :param Z: inducing inputs (optional, see note) :type Z: np.ndarray (M x Q) | None - :param M : Number of inducing points (optional, default 10. Ignored if Z is not None) - :type M: int :param normalize_(X|Y) : whether to normalize the data before computing (predictions will be in original scales) :type normalize_(X|Y): bool """ def __init__(self, X, likelihood, kernel, Z, normalize_X=False): - GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) - - self.Z = Z - self.M = Z.shape[0] - self.likelihood = likelihood - - X_variance = None - if X_variance is None: - self.has_uncertain_inputs = False - else: - assert X_variance.shape == X.shape - self.has_uncertain_inputs = True - self.X_variance = X_variance - - if normalize_X: - self.Z = (self.Z.copy() - self._Xmean) / self._Xstd - - # normalize X uncertainty also - if self.has_uncertain_inputs: - self.X_variance /= np.square(self._Xstd) - - def _set_params(self, p): - self.Z = p[:self.M * self.input_dim].reshape(self.M, self.input_dim) - self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam]) - self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:]) - self._compute_kernel_matrices() - self._computations() - - def _get_params(self): - return np.hstack([self.Z.flatten(), self.kern._get_params_transformed(), self.likelihood._get_params()]) - - def _get_param_names(self): - return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])],[])\ - + self.kern._get_param_names_transformed() + self.likelihood._get_param_names() - + SparseGP.__init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False) + assert self.output_dim == 1, "FITC model is not defined for handling multiple outputs" def update_likelihood_approximation(self): """ @@ -77,47 +36,33 @@ class FITC(SparseGP): For a Gaussian likelihood, no iteration is required: this function does nothing """ - if self.has_uncertain_inputs: - raise NotImplementedError, "FITC approximation not implemented for uncertain inputs" - else: - self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0) - self._set_params(self._get_params()) # update the GP + self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0) + self._set_params(self._get_params()) # update the GP def _compute_kernel_matrices(self): # kernel computations, using BGPLVM notation self.Kmm = self.kern.K(self.Z) - if self.has_uncertain_inputs: - self.psi0 = self.kern.psi0(self.Z, self.X, self.X_variance) - self.psi1 = self.kern.psi1(self.Z, self.X, self.X_variance).T - self.psi2 = self.kern.psi2(self.Z, self.X, self.X_variance) - else: - self.psi0 = self.kern.Kdiag(self.X) - self.psi1 = self.kern.K(self.Z, self.X) - self.psi2 = None - + self.psi0 = self.kern.Kdiag(self.X) + self.psi1 = self.kern.K(self.Z, self.X) + self.psi2 = None def _computations(self): #factor Kmm self.Lm = jitchol(self.Kmm) - self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1) + self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.num_inducing),lower=1) Lmipsi1 = np.dot(self.Lmi,self.psi1) self.Qnn = np.dot(Lmipsi1.T,Lmipsi1).copy() self.Diag0 = self.psi0 - np.diag(self.Qnn) - self.beta_star = self.likelihood.precision/(1. + self.likelihood.precision*self.Diag0[:,None]) #Includes Diag0 in the precision + self.beta_star = self.likelihood.precision/(1. + self.likelihood.precision*self.Diag0[:,None]) #NOTE: beta_star contains Diag0 and the precision self.V_star = self.beta_star * self.likelihood.Y # The rather complex computations of self.A - if self.has_uncertain_inputs: - raise NotImplementedError - else: - if self.likelihood.is_heteroscedastic: - assert self.likelihood.output_dim == 1 - tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.num_data))) - tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1) - self.A = tdot(tmp) + tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.num_data))) + tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1) + self.A = tdot(tmp) # factor B - self.B = np.eye(self.M) + self.A + self.B = np.eye(self.num_inducing) + self.A self.LB = jitchol(self.B) self.LBi = chol_inv(self.LB) self.psi1V = np.dot(self.psi1, self.V_star) @@ -170,16 +115,10 @@ class FITC(SparseGP): for i,V_n,alpha_n,gamma_n,gamma_k in zip(range(self.num_data),self.V_star,alpha,gamma_2,gamma_3): K_pp_K = np.dot(Kmmipsi1[:,i:(i+1)],Kmmipsi1[:,i:(i+1)].T) - - #Diag_dpsi1 = Diag_dA_dpsi1: yT*beta_star*y + Diag_dC_dpsi1 +Diag_dD_dpsi1 _dpsi1 = (-V_n**2 - alpha_n + 2.*gamma_k - gamma_n**2) * Kmmipsi1.T[i:(i+1),:] - - #Diag_dKmm = Diag_dA_dKmm: yT*beta_star*y +Diag_dC_dKmm +Diag_dD_dKmm _dKmm = .5*(V_n**2 + alpha_n + gamma_n**2 - 2.*gamma_k) * K_pp_K #Diag_dD_dKmm - self._dpsi1_dtheta += self.kern.dK_dtheta(_dpsi1,self.X[i:i+1,:],self.Z) self._dKmm_dtheta += self.kern.dK_dtheta(_dKmm,self.Z) - self._dKmm_dX += 2.*self.kern.dK_dX(_dKmm ,self.Z) self._dpsi1_dX += self.kern.dK_dX(_dpsi1.T,self.Z,self.X[i:i+1,:]) @@ -188,7 +127,7 @@ class FITC(SparseGP): # save computation here. self.partial_for_likelihood = None elif self.likelihood.is_heteroscedastic: - raise NotImplementedError, "heteroscedatic derivates not implemented" + raise NotImplementedError, "heteroscedatic derivates not implemented." else: # likelihood is not heterscedatic dbstar_dnoise = self.likelihood.precision * (self.beta_star**2 * self.Diag0[:,None] - self.beta_star) @@ -198,14 +137,14 @@ class FITC(SparseGP): aux_1 = self.likelihood.Y.T * np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1) aux_2 = np.dot(LBiLmipsi1.T,self._LBi_Lmi_psi1V) - dA_dnoise = 0.5 * self.D * (dbstar_dnoise/self.beta_star).sum() - 0.5 * self.D * np.sum(self.likelihood.Y**2 * dbstar_dnoise) + dA_dnoise = 0.5 * self.input_dim * (dbstar_dnoise/self.beta_star).sum() - 0.5 * self.input_dim * np.sum(self.likelihood.Y**2 * dbstar_dnoise) dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T) dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T) dD_dnoise_1 = mdot(self.V_star*LBiLmipsi1.T,LBiLmipsi1*dbstar_dnoise.T*self.likelihood.Y.T) alpha = mdot(LBiLmipsi1,self.V_star) alpha_ = mdot(LBiLmipsi1.T,alpha) - dD_dnoise_2 = -0.5 * self.D * np.sum(alpha_**2 * dbstar_dnoise ) + dD_dnoise_2 = -0.5 * self.input_dim * np.sum(alpha_**2 * dbstar_dnoise ) dD_dnoise_1 = mdot(self.V_star.T,self.psi1.T,self.Lmi.T,self.LBi.T,self.LBi,self.Lmi,self.psi1,dbstar_dnoise*self.likelihood.Y) dD_dnoise_2 = 0.5*mdot(self.V_star.T,self.psi1.T,Hi,self.psi1,dbstar_dnoise*self.psi1.T,Hi,self.psi1,self.V_star) @@ -225,35 +164,30 @@ class FITC(SparseGP): return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood))) def dL_dtheta(self): - if self.has_uncertain_inputs: - raise NotImplementedError, "FITC approximation not implemented for uncertain inputs" - else: - dL_dtheta = self.kern.dKdiag_dtheta(self._dL_dpsi0,self.X) - dL_dtheta += self.kern.dK_dtheta(self._dL_dpsi1,self.X,self.Z) - dL_dtheta += self.kern.dK_dtheta(self._dL_dKmm,X=self.Z) - dL_dtheta += self._dKmm_dtheta - dL_dtheta += self._dpsi1_dtheta + dL_dtheta = self.kern.dKdiag_dtheta(self._dL_dpsi0,self.X) + dL_dtheta += self.kern.dK_dtheta(self._dL_dpsi1,self.X,self.Z) + dL_dtheta += self.kern.dK_dtheta(self._dL_dKmm,X=self.Z) + dL_dtheta += self._dKmm_dtheta + dL_dtheta += self._dpsi1_dtheta return dL_dtheta def dL_dZ(self): - if self.has_uncertain_inputs: - raise NotImplementedError, "FITC approximation not implemented for uncertain inputs" - else: - dL_dZ = self.kern.dK_dX(self._dL_dpsi1.T,self.Z,self.X) - dL_dZ += 2. * self.kern.dK_dX(self._dL_dKmm,X=self.Z) - dL_dZ += self._dpsi1_dX - dL_dZ += self._dKmm_dX + dL_dZ = self.kern.dK_dX(self._dL_dpsi1.T,self.Z,self.X) + dL_dZ += 2. * self.kern.dK_dX(self._dL_dKmm,X=self.Z) + dL_dZ += self._dpsi1_dX + dL_dZ += self._dKmm_dX return dL_dZ - def _raw_predict(self, Xnew, which_parts, full_cov=False): + def _raw_predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=False): + assert X_variance_new is None, "FITC model is not defined for handling uncertain inputs." if self.likelihood.is_heteroscedastic: Iplus_Dprod_i = 1./(1.+ self.Diag0 * self.likelihood.precision.flatten()) self.Diag = self.Diag0 * Iplus_Dprod_i self.P = Iplus_Dprod_i[:,None] * self.psi1.T self.RPT0 = np.dot(self.Lmi,self.psi1) - self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T)) - self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1) + self.L = np.linalg.cholesky(np.eye(self.num_inducing) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T)) + self.R,info = linalg.lapack.flapack.dtrtrs(self.L,self.Lmi,lower=1) self.RPT = np.dot(self.R,self.P.T) self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT) self.w = self.Diag * self.likelihood.v_tilde @@ -270,13 +204,13 @@ class FITC(SparseGP): # q(u|f) = N(u| R0i*mu_u*f, R0i*C*R0i.T) # Ci = I + (RPT0)Di(RPT0).T - # C = I - [RPT0] * (D+[RPT0].T*[RPT0])^-1*[RPT0].T - # = I - [RPT0] * (D + self.Qnn)^-1 * [RPT0].T + # C = I - [RPT0] * (input_dim+[RPT0].T*[RPT0])^-1*[RPT0].T + # = I - [RPT0] * (input_dim + self.Qnn)^-1 * [RPT0].T # = I - [RPT0] * (U*U.T)^-1 * [RPT0].T # = I - V.T * V U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn) - V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1) - C = np.eye(self.M) - np.dot(V.T,V) + V,info = linalg.lapack.flapack.dtrtrs(U,self.RPT0.T,lower=1) + C = np.eye(self.num_inducing) - np.dot(V.T,V) mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:]) #self.C = C #self.RPT0 = np.dot(self.R0,self.Knm.T) P0.T @@ -292,13 +226,13 @@ class FITC(SparseGP): mu_star = np.dot(KR0T,mu_H) if full_cov: Kxx = self.kern.K(Xnew,which_parts=which_parts) - var = Kxx + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T)) + var = Kxx + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.num_inducing),KR0T.T)) else: Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts) - var = (Kxx + np.sum(KR0T.T*np.dot(Sigma_H - np.eye(self.M),KR0T.T),0))[:,None] + var = (Kxx + np.sum(KR0T.T*np.dot(Sigma_H - np.eye(self.num_inducing),KR0T.T),0))[:,None] return mu_star[:,None],var else: - raise NotImplementedError, "homoscedastic FITC not implemented" + raise NotImplementedError, "Heteroscedastic case not implemented." """ Kx = self.kern.K(self.Z, Xnew) mu = mdot(Kx.T, self.C/self.scale_factor, self.psi1V) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 5d289710..246b8cc9 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -33,8 +33,8 @@ class GP(GPBase): self._set_params(self._get_params()) def _set_params(self, p): - self.kern._set_params_transformed(p[:self.kern.Nparam_transformed()]) - self.likelihood._set_params(p[self.kern.Nparam_transformed():]) + self.kern._set_params_transformed(p[:self.kern.num_params_transformed()]) + self.likelihood._set_params(p[self.kern.num_params_transformed():]) self.K = self.kern.K(self.X) self.K += self.likelihood.covariance_matrix diff --git a/GPy/core/gp_base.py b/GPy/core/gp_base.py index 1455cbb7..9188fe6f 100644 --- a/GPy/core/gp_base.py +++ b/GPy/core/gp_base.py @@ -1,12 +1,12 @@ import numpy as np -import model from .. import kern from ..util.plot import gpplot, Tango, x_frame1D, x_frame2D import pylab as pb +from GPy.core.model import Model -class GPBase(model.model): +class GPBase(Model): """ - Gaussian Process model for holding shared behaviour between + Gaussian Process Model for holding shared behaviour between sprase_GP and GP models """ @@ -28,7 +28,7 @@ class GPBase(model.model): self._Xmean = np.zeros((1, self.input_dim)) self._Xstd = np.ones((1, self.input_dim)) - model.model.__init__(self) + Model.__init__(self) # All leaf nodes should call self._set_params(self._get_params()) at # the end @@ -84,8 +84,8 @@ class GPBase(model.model): Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits, resolution) m, v = self._raw_predict(Xnew, which_parts=which_parts) m = m.reshape(resolution, resolution).T - ax.contour(xx, yy, m, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) - ax.scatter(self.X[:, 0], self.X[:, 1], 40, self.likelihood.Y, linewidth=0, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max()) + ax.contour(xx, yy, m, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) # @UndefinedVariable + ax.scatter(self.X[:, 0], self.X[:, 1], 40, self.likelihood.Y, linewidth=0, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max()) # @UndefinedVariable ax.set_xlim(xmin[0], xmax[0]) ax.set_ylim(xmin[1], xmax[1]) else: @@ -94,9 +94,9 @@ class GPBase(model.model): def plot(self, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, samples=0, fignum=None, ax=None): """ TODO: Docstrings! + :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure - """ # TODO include samples if which_data == 'all': @@ -111,7 +111,7 @@ class GPBase(model.model): Xu = self.X * self._Xstd + self._Xmean # NOTE self.X are the normalized values now Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) - m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) + m, _, lower, upper = self.predict(Xnew, which_parts=which_parts) for d in range(m.shape[1]): gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], axes=ax) ax.plot(Xu[which_data], self.likelihood.data[which_data, d], 'kx', mew=1.5) @@ -122,13 +122,13 @@ class GPBase(model.model): elif self.X.shape[1] == 2: # FIXME resolution = resolution or 50 - Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits, resolution) + Xnew, _, _, xmin, xmax = x_frame2D(self.X, plot_limits, resolution) x, y = np.linspace(xmin[0], xmax[0], resolution), np.linspace(xmin[1], xmax[1], resolution) - m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) + m, _, lower, upper = self.predict(Xnew, which_parts=which_parts) m = m.reshape(resolution, resolution).T - ax.contour(x, y, m, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) + ax.contour(x, y, m, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) # @UndefinedVariable Yf = self.likelihood.Y.flatten() - ax.scatter(self.X[:, 0], self.X[:, 1], 40, Yf, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) + ax.scatter(self.X[:, 0], self.X[:, 1], 40, Yf, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) # @UndefinedVariable ax.set_xlim(xmin[0], xmax[0]) ax.set_ylim(xmin[1], xmax[1]) diff --git a/GPy/core/model.py b/GPy/core/model.py index 7cc21080..582d7313 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -6,37 +6,32 @@ from .. import likelihoods from ..inference import optimization from ..util.linalg import jitchol from GPy.util.misc import opt_wrapper -from parameterised import parameterised -from scipy import optimize +from parameterised import Parameterised import multiprocessing as mp import numpy as np -import priors -import re -import sys -import pdb from GPy.core.domains import POSITIVE, REAL # import numdifftools as ndt -class model(parameterised): +class Model(Parameterised): def __init__(self): - parameterised.__init__(self) + Parameterised.__init__(self) self.priors = None self.optimization_runs = [] self.sampling_runs = [] self.preferred_optimizer = 'scg' - #self._set_params(self._get_params()) has been taken out as it should only be called on leaf nodes + # self._set_params(self._get_params()) has been taken out as it should only be called on leaf nodes def _get_params(self): - raise NotImplementedError, "this needs to be implemented to use the model class" + raise NotImplementedError, "this needs to be implemented to use the Model class" def _set_params(self, x): - raise NotImplementedError, "this needs to be implemented to use the model class" + raise NotImplementedError, "this needs to be implemented to use the Model class" def log_likelihood(self): - raise NotImplementedError, "this needs to be implemented to use the model class" + raise NotImplementedError, "this needs to be implemented to use the Model class" def _log_likelihood_gradients(self): - raise NotImplementedError, "this needs to be implemented to use the model class" + raise NotImplementedError, "this needs to be implemented to use the Model class" def set_prior(self, regexp, what): """ - Sets priors on the model parameters. + Sets priors on the Model parameters. Arguments --------- @@ -65,7 +60,7 @@ class model(parameterised): if len(tie_matches) > 1: raise ValueError, "cannot place Prior across multiple ties" elif len(tie_matches) == 1: - which = which[:1] # just place a Prior object on the first parameter + which = which[:1] # just place a Prior object on the first parameter # check constraints are okay @@ -95,7 +90,7 @@ class model(parameterised): def get_gradient(self, name, return_names=False): """ - Get model gradient(s) by name. The name is applied as a regular expression and all parameters that match that regular expression are returned. + Get Model gradient(s) by name. The name is applied as a regular expression and all parameters that match that regular expression are returned. """ matches = self.grep_param_names(name) if len(matches): @@ -135,7 +130,7 @@ class model(parameterised): def randomize(self): """ - Randomize the model. + Randomize the Model. Make this draw from the Prior if one exists, else draw from N(0,1) """ # first take care of all parameters (from N(0,1)) @@ -147,16 +142,16 @@ class model(parameterised): if self.priors is not None: [np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None] self._set_params(x) - self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...) + self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...) - def optimize_restarts(self, Nrestarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs): + def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs): """ - Perform random restarts of the model, and set the model to the best + Perform random restarts of the Model, and set the Model to the best seen solution. If the robust flag is set, exceptions raised during optimizations will - be handled silently. If _all_ runs fail, the model is reset to the + be handled silently. If _all_ runs fail, the Model is reset to the existing parameter values. Notes @@ -179,19 +174,19 @@ class model(parameterised): try: jobs = [] pool = mp.Pool(processes=num_processes) - for i in range(Nrestarts): + for i in range(num_restarts): self.randomize() job = pool.apply_async(opt_wrapper, args=(self,), kwds=kwargs) jobs.append(job) - pool.close() # signal that no more data coming in - pool.join() # wait for all the tasks to complete + pool.close() # signal that no more data coming in + pool.join() # wait for all the tasks to complete except KeyboardInterrupt: print "Ctrl+c received, terminating and joining pool." pool.terminate() pool.join() - for i in range(Nrestarts): + for i in range(num_restarts): try: if not parallel: self.randomize() @@ -200,10 +195,10 @@ class model(parameterised): self.optimization_runs.append(jobs[i].get()) if verbose: - print("Optimization restart {0}/{1}, f = {2}".format(i + 1, Nrestarts, self.optimization_runs[-1].f_opt)) + print("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt)) except Exception as e: if robust: - print("Warning - optimization restart {0}/{1} failed".format(i + 1, Nrestarts)) + print("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts)) else: raise e @@ -218,11 +213,11 @@ class model(parameterised): Ensure that any variables which should clearly be positive have been constrained somehow. """ positive_strings = ['variance', 'lengthscale', 'precision', 'kappa'] - param_names = self._get_param_names() + # param_names = self._get_param_names() currently_constrained = self.all_constrained_indices() to_make_positive = [] for s in positive_strings: - for i in self.grep_param_names(".*"+s): + for i in self.grep_param_names(".*" + s): if not (i in currently_constrained): to_make_positive.append(i) if len(to_make_positive): @@ -240,18 +235,18 @@ class model(parameterised): Gets the gradients from the likelihood and the priors. """ self._set_params_transformed(x) - obj_grads = - self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients()) + obj_grads = -self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients()) return obj_grads def objective_and_gradients(self, x): self._set_params_transformed(x) obj_f = -self.log_likelihood() - self.log_prior() - obj_grads = - self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients()) + obj_grads = -self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients()) return obj_f, obj_grads def optimize(self, optimizer=None, start=None, **kwargs): """ - Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. + Optimize the Model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. kwargs are passed to the optimizer. They can be: :max_f_eval: maximum number of function evaluations @@ -274,7 +269,7 @@ class model(parameterised): def optimize_SGD(self, momentum=0.1, learning_rate=0.01, iterations=20, **kwargs): # assert self.Y.shape[1] > 1, "SGD only works with D > 1" - sgd = SGD.StochasticGD(self, iterations, learning_rate, momentum, **kwargs) + sgd = SGD.StochasticGD(self, iterations, learning_rate, momentum, **kwargs) # @UndefinedVariable sgd.run() self.optimization_runs.append(sgd) @@ -291,7 +286,7 @@ class model(parameterised): def f(x): self._set_params(x) return self.log_likelihood() - h = ndt.Hessian(f) + h = ndt.Hessian(f) # @UndefinedVariable A = -h(x) self._set_params(x) # check for almost zero components on the diagonal which screw up the cholesky @@ -300,7 +295,7 @@ class model(parameterised): return A def Laplace_evidence(self): - """Returns an estiamte of the model evidence based on the Laplace approximation. + """Returns an estiamte of the Model evidence based on the Laplace approximation. Uses a numerical estimate of the hessian if none is available analytically""" A = self.Laplace_covariance() try: @@ -310,12 +305,12 @@ class model(parameterised): return 0.5 * self._get_params().size * np.log(2 * np.pi) + self.log_likelihood() - hld def __str__(self): - s = parameterised.__str__(self).split('\n') + s = Parameterised.__str__(self).split('\n') # add priors to the string if self.priors is not None: strs = [str(p) if p is not None else '' for p in self.priors] else: - strs = ['']*len(self._get_params()) + strs = [''] * len(self._get_params()) width = np.array(max([len(p) for p in strs] + [5])) + 4 log_like = self.log_likelihood() @@ -336,7 +331,7 @@ class model(parameterised): def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3): """ - Check the gradient of the model by comparing to a numerical estimate. + Check the gradient of the Model by comparing to a numerical estimate. If the verbose flag is passed, invividual components are tested (and printed) :param verbose: If True, print a "full" checking of each parameter @@ -389,7 +384,7 @@ class model(parameterised): param_list = range(len(x)) else: param_list = self.grep_param_names(target_param, transformed=True, search=True) - if not param_list: + if not np.any(param_list): print "No free parameters to check" return @@ -419,15 +414,15 @@ class model(parameterised): def input_sensitivity(self): """ - return an array describing the sesitivity of the model to each input + return an array describing the sesitivity of the Model to each input NB. Right now, we're basing this on the lengthscales (or variances) of the kernel. TODO: proper sensitivity analysis - where we integrate across the model inputs and evaluate the - effect on the variance of the model output. """ + where we integrate across the Model inputs and evaluate the + effect on the variance of the Model output. """ if not hasattr(self, 'kern'): - raise ValueError, "this model has no kernel" + raise ValueError, "this Model has no kernel" k = [p for p in self.kern.parts if p.name in ['rbf', 'linear']] if (not len(k) == 1) or (not k[0].ARD): @@ -474,8 +469,8 @@ class model(parameterised): ll_change = new_ll - last_ll if ll_change < 0: - self.likelihood = last_approximation # restore previous likelihood approximation - self._set_params(last_params) # restore model parameters + self.likelihood = last_approximation # restore previous likelihood approximation + self._set_params(last_params) # restore Model parameters print "Log-likelihood decrement: %s \nLast likelihood update discarded." % ll_change stop = True else: diff --git a/GPy/core/parameterised.py b/GPy/core/parameterised.py index e091e820..b3a5712a 100644 --- a/GPy/core/parameterised.py +++ b/GPy/core/parameterised.py @@ -6,12 +6,10 @@ import numpy as np import re import copy import cPickle -import os -from ..util.squashers import sigmoid import warnings import transformations -class parameterised(object): +class Parameterised(object): def __init__(self): """ This is the base class for model and kernel. Mostly just handles tieing and constraining of parameters @@ -36,7 +34,7 @@ class parameterised(object): """ Returns a **copy** of parameters in non transformed space - :see_also: :py:func:`GPy.core.parameterised.params_transformed` + :see_also: :py:func:`GPy.core.Parameterised.params_transformed` """ return self._get_params() @@ -49,7 +47,7 @@ class parameterised(object): """ Returns a **copy** of parameters in transformed space - :see_also: :py:func:`GPy.core.parameterised.params` + :see_also: :py:func:`GPy.core.Parameterised.params` """ return self._get_params_transformed() @@ -113,7 +111,7 @@ class parameterised(object): if hasattr(self, 'prior'): pass - self._set_params_transformed(self._get_params_transformed()) # sets tied parameters to single value + self._set_params_transformed(self._get_params_transformed()) # sets tied parameters to single value def untie_everything(self): """Unties all parameters by setting tied_indices to an empty list.""" @@ -145,7 +143,7 @@ class parameterised(object): else: return np.nonzero([regexp.match(name) for name in names])[0] - def Nparam_transformed(self): + def num_params_transformed(self): removed = 0 for tie in self.tied_indices: removed += tie.size - 1 @@ -159,18 +157,18 @@ class parameterised(object): """Unconstrain matching parameters. does not untie parameters""" matches = self.grep_param_names(regexp) - #tranformed contraints: + # tranformed contraints: for match in matches: - self.constrained_indices = [i[i<>match] for i in self.constrained_indices] + self.constrained_indices = [i[i <> match] for i in self.constrained_indices] - #remove empty constraints - tmp = zip(*[(i,t) for i,t in zip(self.constrained_indices,self.constraints) if len(i)]) + # remove empty constraints + tmp = zip(*[(i, t) for i, t in zip(self.constrained_indices, self.constraints) if len(i)]) if tmp: - self.constrained_indices, self.constraints = zip(*[(i,t) for i,t in zip(self.constrained_indices,self.constraints) if len(i)]) + self.constrained_indices, self.constraints = zip(*[(i, t) for i, t in zip(self.constrained_indices, self.constraints) if len(i)]) self.constrained_indices, self.constraints = list(self.constrained_indices), list(self.constraints) # fixed: - self.fixed_values = [np.delete(values, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices,values in zip(self.fixed_indices,self.fixed_values)] + self.fixed_values = [np.delete(values, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices, values in zip(self.fixed_indices, self.fixed_values)] self.fixed_indices = [np.delete(indices, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices in self.fixed_indices] # remove empty elements @@ -189,7 +187,7 @@ class parameterised(object): """ Set positive constraints. """ self.constrain(regexp, transformations.logexp()) - def constrain_bounded(self, regexp,lower, upper): + def constrain_bounded(self, regexp, lower, upper): """ Set bounded constraints. """ self.constrain(regexp, transformations.logistic(lower, upper)) @@ -199,8 +197,8 @@ class parameterised(object): else: return np.empty(shape=(0,)) - def constrain(self,regexp,transform): - assert isinstance(transform,transformations.transformation) + def constrain(self, regexp, transform): + assert isinstance(transform, transformations.transformation) matches = self.grep_param_names(regexp) overlap = set(matches).intersection(set(self.all_constrained_indices())) @@ -251,7 +249,7 @@ class parameterised(object): def _get_params_transformed(self): """use self._get_params to get the 'true' parameters of the model, which are then tied, constrained and fixed""" x = self._get_params() - [np.put(x,i,t.finv(x[i])) for i,t in zip(self.constrained_indices,self.constraints)] + [np.put(x, i, t.finv(x[i])) for i, t in zip(self.constrained_indices, self.constraints)] to_remove = self.fixed_indices + [t[1:] for t in self.tied_indices] if len(to_remove): @@ -263,7 +261,7 @@ class parameterised(object): """ takes the vector x, which is then modified (by untying, reparameterising or inserting fixed values), and then call self._set_params""" self._set_params(self._untransform_params(x)) - def _untransform_params(self,x): + def _untransform_params(self, x): """ The transformation required for _set_params_transformed. @@ -290,9 +288,9 @@ class parameterised(object): [np.put(xx, i, v) for i, v in zip(self.fixed_indices, self.fixed_values)] [np.put(xx, i, v) for i, v in [(t[1:], xx[t[0]]) for t in self.tied_indices] ] - [np.put(xx,i,t.f(xx[i])) for i,t in zip(self.constrained_indices, self.constraints)] - if hasattr(self,'debug'): - stop + [np.put(xx, i, t.f(xx[i])) for i, t in zip(self.constrained_indices, self.constraints)] + if hasattr(self, 'debug'): + stop # @UndefinedVariable return xx @@ -316,7 +314,7 @@ class parameterised(object): remove = np.hstack((remove, np.hstack(self.fixed_indices))) # add markers to show that some variables are constrained - for i,t in zip(self.constrained_indices,self.constraints): + for i, t in zip(self.constrained_indices, self.constraints): for ii in i: n[ii] = n[ii] + t.__str__() @@ -333,10 +331,10 @@ class parameterised(object): if not N: return "This object has no free parameters." header = ['Name', 'Value', 'Constraints', 'Ties'] - values = self._get_params() # map(str,self._get_params()) + values = self._get_params() # map(str,self._get_params()) # sort out the constraints constraints = [''] * len(names) - for i,t in zip(self.constrained_indices,self.constraints): + for i, t in zip(self.constrained_indices, self.constraints): for ii in i: constraints[ii] = t.__str__() for i in self.fixed_indices: @@ -354,7 +352,7 @@ class parameterised(object): max_constraint = max([len(constraints[i]) for i in range(len(constraints))] + [len(header[2])]) max_ties = max([len(ties[i]) for i in range(len(ties))] + [len(header[3])]) cols = np.array([max_names, max_values, max_constraint, max_ties]) + 4 - columns = cols.sum() + # columns = cols.sum() header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))] header_string = map(lambda x: '|'.join(x), [header_string]) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 152c1573..2cfc8ae4 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -153,8 +153,8 @@ class SparseGP(GPBase): def _set_params(self, p): self.Z = p[:self.num_inducing * self.input_dim].reshape(self.num_inducing, self.input_dim) - self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam]) - self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:]) + self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.num_params]) + self.likelihood._set_params(p[self.Z.size + self.kern.num_params:]) self._compute_kernel_matrices() self._computations() diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py index edb1c179..bff8dcd1 100644 --- a/GPy/examples/classification.py +++ b/GPy/examples/classification.py @@ -114,7 +114,8 @@ def sparse_toy_linear_1d_classification(seed=default_seed): return m def sparse_crescent_data(inducing=10, seed=default_seed): - """Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood. + """ + Run a Gaussian process classification with DTC approxiamtion on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood. :param model_type: type of model to fit ['Full', 'FITC', 'DTC']. :param seed : seed value for data generation. @@ -137,7 +138,8 @@ def sparse_crescent_data(inducing=10, seed=default_seed): return m def FITC_crescent_data(inducing=10, seed=default_seed): - """Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood. + """ + Run a Gaussian process classification with FITC approximation on the crescent data. The demonstration uses EP to approximate the likelihood. :param model_type: type of model to fit ['Full', 'FITC', 'DTC']. :param seed : seed value for data generation. @@ -146,13 +148,18 @@ def FITC_crescent_data(inducing=10, seed=default_seed): :type inducing: int """ + data = GPy.util.datasets.crescent_data(seed=seed) + Y = data['Y'] + Y[Y.flatten()==-1]=0 + + data = GPy.util.datasets.crescent_data(seed=seed) Y = data['Y'] Y[Y.flatten()==-1]=0 m = GPy.models.FITCClassification(data['X'], Y) m.ensure_default_constraints() - m['.*len'] = 10. + m['.*len'] = 3. m.update_likelihood_approximation() m.optimize() print(m) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 972ae290..a683f6bb 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -10,16 +10,16 @@ import numpy as np import GPy -def toy_rbf_1d(max_nb_eval_optim=100): +def toy_rbf_1d(optimizer='tnc', max_nb_eval_optim=100): """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" data = GPy.util.datasets.toy_rbf_1d() - # create simple GP model + # create simple GP Model m = GPy.models.GPRegression(data['X'],data['Y']) # optimize m.ensure_default_constraints() - m.optimize(max_f_eval=max_nb_eval_optim) + m.optimize(optimizer, max_f_eval=max_nb_eval_optim) # plot m.plot() print(m) @@ -29,7 +29,7 @@ def rogers_girolami_olympics(optim_iters=100): """Run a standard Gaussian process regression on the Rogers and Girolami olympics data.""" data = GPy.util.datasets.rogers_girolami_olympics() - # create simple GP model + # create simple GP Model m = GPy.models.GPRegression(data['X'],data['Y']) #set the lengthscale to be something sensible (defaults to 1) @@ -48,7 +48,7 @@ def toy_rbf_1d_50(optim_iters=100): """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" data = GPy.util.datasets.toy_rbf_1d_50() - # create simple GP model + # create simple GP Model m = GPy.models.GPRegression(data['X'],data['Y']) # optimize @@ -64,7 +64,7 @@ def silhouette(optim_iters=100): """Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper.""" data = GPy.util.datasets.silhouette() - # create simple GP model + # create simple GP Model m = GPy.models.GPRegression(data['X'],data['Y']) # optimize @@ -244,18 +244,18 @@ def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf): lls = [] total_var = np.var(data['Y']) kernel = kernel_call(1, variance=1., lengthscale=1.) - model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel) + Model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel) for log_SNR in log_SNRs: SNR = 10.**log_SNR noise_var = total_var/(1.+SNR) signal_var = total_var - noise_var - model.kern['.*variance'] = signal_var - model['noise_variance'] = noise_var + Model.kern['.*variance'] = signal_var + Model['noise_variance'] = noise_var length_scale_lls = [] for length_scale in length_scales: - model['.*lengthscale'] = length_scale - length_scale_lls.append(model.log_likelihood()) + Model['.*lengthscale'] = length_scale + length_scale_lls.append(Model.log_likelihood()) lls.append(length_scale_lls) @@ -270,7 +270,7 @@ def sparse_GP_regression_1D(N = 400, num_inducing = 5, optim_iters=100): rbf = GPy.kern.rbf(1) noise = GPy.kern.white(1) kernel = rbf + noise - # create simple GP model + # create simple GP Model m = GPy.models.SparseGPRegression(X, Y, kernel, num_inducing=num_inducing) m.ensure_default_constraints() @@ -290,7 +290,7 @@ def sparse_GP_regression_2D(N = 400, num_inducing = 50, optim_iters=100): noise = GPy.kern.white(2) kernel = rbf + noise - # create simple GP model + # create simple GP Model m = GPy.models.SparseGPRegression(X,Y,kernel, num_inducing = num_inducing) # contrain all parameters to be positive (but not inducing inputs) @@ -318,7 +318,7 @@ def uncertain_inputs_sparse_regression(optim_iters=100): k = GPy.kern.rbf(1) + GPy.kern.white(1) - # create simple GP model - no input uncertainty on this one + # create simple GP Model - no input uncertainty on this one m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z) m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=optim_iters) @@ -326,7 +326,7 @@ def uncertain_inputs_sparse_regression(optim_iters=100): axes[0].set_title('no input uncertainty') - #the same model with uncertainty + #the same Model with uncertainty m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S) m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=optim_iters) diff --git a/GPy/examples/tutorials.py b/GPy/examples/tutorials.py index bd403ae8..2eb2acfb 100644 --- a/GPy/examples/tutorials.py +++ b/GPy/examples/tutorials.py @@ -33,7 +33,7 @@ def tuto_GP_regression(): m.optimize() - m.optimize_restarts(Nrestarts = 10) + m.optimize_restarts(num_restarts = 10) ########################### # 2-dimensional example # diff --git a/GPy/inference/sgd.py b/GPy/inference/sgd.py index 2df09ec8..0002bb22 100644 --- a/GPy/inference/sgd.py +++ b/GPy/inference/sgd.py @@ -11,17 +11,17 @@ class opt_SGD(Optimizer): Optimize using stochastic gradient descent. *** Parameters *** - model: reference to the model object + Model: reference to the Model object iterations: number of iterations learning_rate: learning rate momentum: momentum """ - def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, actual_iter=None, schedule=None, **kwargs): + def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, Model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, actual_iter=None, schedule=None, **kwargs): self.opt_name = "Stochastic Gradient Descent" - self.model = model + self.Model = Model self.iterations = iterations self.momentum = momentum self.learning_rate = learning_rate @@ -42,17 +42,17 @@ class opt_SGD(Optimizer): self.learning_rate_0 = self.learning_rate.mean() self.schedule = schedule - # if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1: + # if len([p for p in self.Model.kern.parts if p.name == 'bias']) == 1: # self.param_traces.append(('bias',[])) - # if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1: + # if len([p for p in self.Model.kern.parts if p.name == 'linear']) == 1: # self.param_traces.append(('linear',[])) - # if len([p for p in self.model.kern.parts if p.name == 'rbf']) == 1: + # if len([p for p in self.Model.kern.parts if p.name == 'rbf']) == 1: # self.param_traces.append(('rbf_var',[])) self.param_traces = dict(self.param_traces) self.fopt_trace = [] - num_params = len(self.model._get_params()) + num_params = len(self.Model._get_params()) if isinstance(self.learning_rate, float): self.learning_rate = np.ones((num_params,)) * self.learning_rate @@ -84,7 +84,7 @@ class opt_SGD(Optimizer): return (np.isnan(data).sum(axis=1) == 0) def check_for_missing(self, data): - if sp.sparse.issparse(self.model.likelihood.Y): + if sp.sparse.issparse(self.Model.likelihood.Y): return True else: return np.isnan(data).sum() > 0 @@ -107,32 +107,32 @@ class opt_SGD(Optimizer): def shift_constraints(self, j): - constrained_indices = copy.deepcopy(self.model.constrained_indices) + constrained_indices = copy.deepcopy(self.Model.constrained_indices) for c, constraint in enumerate(constrained_indices): mask = (np.ones_like(constrained_indices[c]) == 1) for i in range(len(constrained_indices[c])): pos = np.where(j == constrained_indices[c][i])[0] if len(pos) == 1: - self.model.constrained_indices[c][i] = pos + self.Model.constrained_indices[c][i] = pos else: mask[i] = False - self.model.constrained_indices[c] = self.model.constrained_indices[c][mask] + self.Model.constrained_indices[c] = self.Model.constrained_indices[c][mask] return constrained_indices # back them up - # bounded_i = copy.deepcopy(self.model.constrained_bounded_indices) - # bounded_l = copy.deepcopy(self.model.constrained_bounded_lowers) - # bounded_u = copy.deepcopy(self.model.constrained_bounded_uppers) + # bounded_i = copy.deepcopy(self.Model.constrained_bounded_indices) + # bounded_l = copy.deepcopy(self.Model.constrained_bounded_lowers) + # bounded_u = copy.deepcopy(self.Model.constrained_bounded_uppers) # for b in range(len(bounded_i)): # for each group of constraints # for bc in range(len(bounded_i[b])): # pos = np.where(j == bounded_i[b][bc])[0] # if len(pos) == 1: - # pos2 = np.where(self.model.constrained_bounded_indices[b] == bounded_i[b][bc])[0][0] - # self.model.constrained_bounded_indices[b][pos2] = pos[0] + # pos2 = np.where(self.Model.constrained_bounded_indices[b] == bounded_i[b][bc])[0][0] + # self.Model.constrained_bounded_indices[b][pos2] = pos[0] # else: - # if len(self.model.constrained_bounded_indices[b]) == 1: + # if len(self.Model.constrained_bounded_indices[b]) == 1: # # if it's the last index to be removed # # the logic here is just a mess. If we remove the last one, then all the # # b-indices change and we have to iterate through everything to find our @@ -140,35 +140,35 @@ class opt_SGD(Optimizer): # raise NotImplementedError # else: # just remove it from the indices - # mask = self.model.constrained_bounded_indices[b] != bc - # self.model.constrained_bounded_indices[b] = self.model.constrained_bounded_indices[b][mask] + # mask = self.Model.constrained_bounded_indices[b] != bc + # self.Model.constrained_bounded_indices[b] = self.Model.constrained_bounded_indices[b][mask] # # here we shif the positive constraints. We cycle through each positive # # constraint - # positive = self.model.constrained_positive_indices.copy() + # positive = self.Model.constrained_positive_indices.copy() # mask = (np.ones_like(positive) == 1) # for p in range(len(positive)): # # we now check whether the constrained index appears in the j vector # # (the vector of the "active" indices) - # pos = np.where(j == self.model.constrained_positive_indices[p])[0] + # pos = np.where(j == self.Model.constrained_positive_indices[p])[0] # if len(pos) == 1: - # self.model.constrained_positive_indices[p] = pos + # self.Model.constrained_positive_indices[p] = pos # else: # mask[p] = False - # self.model.constrained_positive_indices = self.model.constrained_positive_indices[mask] + # self.Model.constrained_positive_indices = self.Model.constrained_positive_indices[mask] # return (bounded_i, bounded_l, bounded_u), positive def restore_constraints(self, c):#b, p): - # self.model.constrained_bounded_indices = b[0] - # self.model.constrained_bounded_lowers = b[1] - # self.model.constrained_bounded_uppers = b[2] - # self.model.constrained_positive_indices = p - self.model.constrained_indices = c + # self.Model.constrained_bounded_indices = b[0] + # self.Model.constrained_bounded_lowers = b[1] + # self.Model.constrained_bounded_uppers = b[2] + # self.Model.constrained_positive_indices = p + self.Model.constrained_indices = c def get_param_shapes(self, N = None, input_dim = None): - model_name = self.model.__class__.__name__ + model_name = self.Model.__class__.__name__ if model_name == 'GPLVM': return [(N, input_dim)] if model_name == 'Bayesian_GPLVM': @@ -179,37 +179,37 @@ class opt_SGD(Optimizer): def step_with_missing_data(self, f_fp, X, step, shapes): N, input_dim = X.shape - if not sp.sparse.issparse(self.model.likelihood.Y): - Y = self.model.likelihood.Y - samples = self.non_null_samples(self.model.likelihood.Y) - self.model.N = samples.sum() + if not sp.sparse.issparse(self.Model.likelihood.Y): + Y = self.Model.likelihood.Y + samples = self.non_null_samples(self.Model.likelihood.Y) + self.Model.N = samples.sum() Y = Y[samples] else: - samples = self.model.likelihood.Y.nonzero()[0] - self.model.N = len(samples) - Y = np.asarray(self.model.likelihood.Y[samples].todense(), dtype = np.float64) + samples = self.Model.likelihood.Y.nonzero()[0] + self.Model.N = len(samples) + Y = np.asarray(self.Model.likelihood.Y[samples].todense(), dtype = np.float64) - if self.model.N == 0 or Y.std() == 0.0: - return 0, step, self.model.N + if self.Model.N == 0 or Y.std() == 0.0: + return 0, step, self.Model.N - self.model.likelihood._offset = Y.mean() - self.model.likelihood._scale = Y.std() - self.model.likelihood.set_data(Y) - # self.model.likelihood.V = self.model.likelihood.Y*self.model.likelihood.precision + self.Model.likelihood._offset = Y.mean() + self.Model.likelihood._scale = Y.std() + self.Model.likelihood.set_data(Y) + # self.Model.likelihood.V = self.Model.likelihood.Y*self.Model.likelihood.precision - sigma = self.model.likelihood._variance - self.model.likelihood._variance = None # invalidate cache - self.model.likelihood._set_params(sigma) + sigma = self.Model.likelihood._variance + self.Model.likelihood._variance = None # invalidate cache + self.Model.likelihood._set_params(sigma) j = self.subset_parameter_vector(self.x_opt, samples, shapes) - self.model.X = X[samples] + self.Model.X = X[samples] - model_name = self.model.__class__.__name__ + model_name = self.Model.__class__.__name__ if model_name == 'Bayesian_GPLVM': - self.model.likelihood.YYT = np.dot(self.model.likelihood.Y, self.model.likelihood.Y.T) - self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT) + self.Model.likelihood.YYT = np.dot(self.Model.likelihood.Y, self.Model.likelihood.Y.T) + self.Model.likelihood.trYYT = np.trace(self.Model.likelihood.YYT) ci = self.shift_constraints(j) f, fp = f_fp(self.x_opt[j]) @@ -218,18 +218,18 @@ class opt_SGD(Optimizer): self.x_opt[j] -= step[j] self.restore_constraints(ci) - self.model.grads[j] = fp + self.Model.grads[j] = fp # restore likelihood _offset and _scale, otherwise when we call set_data(y) on # the next feature, it will get normalized with the mean and std of this one. - self.model.likelihood._offset = 0 - self.model.likelihood._scale = 1 + self.Model.likelihood._offset = 0 + self.Model.likelihood._scale = 1 - return f, step, self.model.N + return f, step, self.Model.N def adapt_learning_rate(self, t, D): if self.learning_rate_adaptation == 'adagrad': if t > 0: - g_k = self.model.grads + g_k = self.Model.grads self.s_k += np.square(g_k) t0 = 100.0 self.learning_rate = 0.1/(t0 + np.sqrt(self.s_k)) @@ -245,8 +245,8 @@ class opt_SGD(Optimizer): elif self.learning_rate_adaptation == 'semi_pesky': - if self.model.__class__.__name__ == 'Bayesian_GPLVM': - g_t = self.model.grads + if self.Model.__class__.__name__ == 'Bayesian_GPLVM': + g_t = self.Model.grads if t == 0: self.hbar_t = 0.0 self.tau_t = 100.0 @@ -259,28 +259,28 @@ class opt_SGD(Optimizer): def opt(self, f_fp=None, f=None, fp=None): - self.x_opt = self.model._get_params_transformed() + self.x_opt = self.Model._get_params_transformed() self.grads = [] - X, Y = self.model.X.copy(), self.model.likelihood.Y.copy() + X, Y = self.Model.X.copy(), self.Model.likelihood.Y.copy() - self.model.likelihood.YYT = 0 - self.model.likelihood.trYYT = 0 - self.model.likelihood._offset = 0.0 - self.model.likelihood._scale = 1.0 + self.Model.likelihood.YYT = 0 + self.Model.likelihood.trYYT = 0 + self.Model.likelihood._offset = 0.0 + self.Model.likelihood._scale = 1.0 - N, input_dim = self.model.X.shape - D = self.model.likelihood.Y.shape[1] - num_params = self.model._get_params() + N, input_dim = self.Model.X.shape + D = self.Model.likelihood.Y.shape[1] + num_params = self.Model._get_params() self.trace = [] - missing_data = self.check_for_missing(self.model.likelihood.Y) + missing_data = self.check_for_missing(self.Model.likelihood.Y) step = np.zeros_like(num_params) for it in range(self.iterations): if self.actual_iter != None: it = self.actual_iter - self.model.grads = np.zeros_like(self.x_opt) # TODO this is ugly + self.Model.grads = np.zeros_like(self.x_opt) # TODO this is ugly if it == 0 or self.self_paced is False: features = np.random.permutation(Y.shape[1]) @@ -292,29 +292,29 @@ class opt_SGD(Optimizer): NLL = [] import pylab as plt for count, j in enumerate(features): - self.model.input_dim = len(j) - self.model.likelihood.input_dim = len(j) - self.model.likelihood.set_data(Y[:, j]) - # self.model.likelihood.V = self.model.likelihood.Y*self.model.likelihood.precision + self.Model.input_dim = len(j) + self.Model.likelihood.input_dim = len(j) + self.Model.likelihood.set_data(Y[:, j]) + # self.Model.likelihood.V = self.Model.likelihood.Y*self.Model.likelihood.precision - sigma = self.model.likelihood._variance - self.model.likelihood._variance = None # invalidate cache - self.model.likelihood._set_params(sigma) + sigma = self.Model.likelihood._variance + self.Model.likelihood._variance = None # invalidate cache + self.Model.likelihood._set_params(sigma) if missing_data: shapes = self.get_param_shapes(N, input_dim) f, step, Nj = self.step_with_missing_data(f_fp, X, step, shapes) else: - self.model.likelihood.YYT = np.dot(self.model.likelihood.Y, self.model.likelihood.Y.T) - self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT) + self.Model.likelihood.YYT = np.dot(self.Model.likelihood.Y, self.Model.likelihood.Y.T) + self.Model.likelihood.trYYT = np.trace(self.Model.likelihood.YYT) Nj = N f, fp = f_fp(self.x_opt) - self.model.grads = fp.copy() + self.Model.grads = fp.copy() step = self.momentum * step + self.learning_rate * fp self.x_opt -= step if self.messages == 2: - noise = self.model.likelihood._variance + noise = self.Model.likelihood._variance status = "evaluating {feature: 5d}/{tot: 5d} \t f: {f: 2.3f} \t non-missing: {nm: 4d}\t noise: {noise: 2.4f}\r".format(feature = count, tot = len(features), f = f, nm = Nj, noise = noise) sys.stdout.write(status) sys.stdout.flush() @@ -328,19 +328,19 @@ class opt_SGD(Optimizer): # plt.plot(self.param_traces['noise']) # for k in self.param_traces.keys(): - # self.param_traces[k].append(self.model.get(k)[0]) - self.grads.append(self.model.grads.tolist()) + # self.param_traces[k].append(self.Model.get(k)[0]) + self.grads.append(self.Model.grads.tolist()) # should really be a sum(), but earlier samples in the iteration will have a very crappy ll self.f_opt = np.mean(NLL) - self.model.N = N - self.model.X = X - self.model.input_dim = D - self.model.likelihood.N = N - self.model.likelihood.input_dim = D - self.model.likelihood.Y = Y - sigma = self.model.likelihood._variance - self.model.likelihood._variance = None # invalidate cache - self.model.likelihood._set_params(sigma) + self.Model.N = N + self.Model.X = X + self.Model.input_dim = D + self.Model.likelihood.N = N + self.Model.likelihood.input_dim = D + self.Model.likelihood.Y = Y + sigma = self.Model.likelihood._variance + self.Model.likelihood._variance = None # invalidate cache + self.Model.likelihood._set_params(sigma) self.trace.append(self.f_opt) if self.iteration_file is not None: diff --git a/GPy/kern/Brownian.py b/GPy/kern/Brownian.py index fe60ec59..76e103af 100644 --- a/GPy/kern/Brownian.py +++ b/GPy/kern/Brownian.py @@ -2,14 +2,14 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np def theta(x): """Heavisdie step function""" return np.where(x>=0.,1.,0.) -class Brownian(kernpart): +class Brownian(Kernpart): """ Brownian Motion kernel. @@ -21,7 +21,7 @@ class Brownian(kernpart): def __init__(self,input_dim,variance=1.): self.input_dim = input_dim assert self.input_dim==1, "Brownian motion in 1D only" - self.Nparam = 1. + self.num_params = 1. self.name = 'Brownian' self._set_params(np.array([variance]).flatten()) diff --git a/GPy/kern/Matern32.py b/GPy/kern/Matern32.py index 6204ca5e..60f0b6e9 100644 --- a/GPy/kern/Matern32.py +++ b/GPy/kern/Matern32.py @@ -2,13 +2,11 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -import hashlib -from ..util.linalg import pdinv,mdot from scipy import integrate -class Matern32(kernpart): +class Matern32(Kernpart): """ Matern 3/2 kernel: @@ -28,11 +26,11 @@ class Matern32(kernpart): """ - def __init__(self,input_dim,variance=1.,lengthscale=None,ARD=False): + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False): self.input_dim = input_dim self.ARD = ARD if ARD == False: - self.Nparam = 2 + self.num_params = 2 self.name = 'Mat32' if lengthscale is not None: lengthscale = np.asarray(lengthscale) @@ -40,76 +38,76 @@ class Matern32(kernpart): else: lengthscale = np.ones(1) else: - self.Nparam = self.input_dim + 1 + self.num_params = self.input_dim + 1 self.name = 'Mat32' if lengthscale is not None: lengthscale = np.asarray(lengthscale) assert lengthscale.size == self.input_dim, "bad number of lengthscales" else: lengthscale = np.ones(self.input_dim) - self._set_params(np.hstack((variance,lengthscale.flatten()))) + self._set_params(np.hstack((variance, lengthscale.flatten()))) def _get_params(self): """return the value of the parameters.""" - return np.hstack((self.variance,self.lengthscale)) + return np.hstack((self.variance, self.lengthscale)) - def _set_params(self,x): + def _set_params(self, x): """set the value of the parameters.""" - assert x.size == self.Nparam + assert x.size == self.num_params self.variance = x[0] self.lengthscale = x[1:] def _get_param_names(self): """return parameter names.""" - if self.Nparam == 2: - return ['variance','lengthscale'] + if self.num_params == 2: + return ['variance', 'lengthscale'] else: - return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscale.size)] + return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)] - def K(self,X,X2,target): + def K(self, X, X2, target): """Compute the covariance matrix between X and X2.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1)) - np.add(self.variance*(1+np.sqrt(3.)*dist)*np.exp(-np.sqrt(3.)*dist), target,target) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1)) + np.add(self.variance * (1 + np.sqrt(3.) * dist) * np.exp(-np.sqrt(3.) * dist), target, target) - def Kdiag(self,X,target): + def Kdiag(self, X, target): """Compute the diagonal of the covariance matrix associated to X.""" - np.add(target,self.variance,target) + np.add(target, self.variance, target) - def dK_dtheta(self,dL_dK,X,X2,target): + def dK_dtheta(self, dL_dK, X, X2, target): """derivative of the covariance matrix with respect to the parameters.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1)) - dvar = (1+np.sqrt(3.)*dist)*np.exp(-np.sqrt(3.)*dist) - invdist = 1./np.where(dist!=0.,dist,np.inf) - dist2M = np.square(X[:,None,:]-X2[None,:,:])/self.lengthscale**3 - #dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis] - target[0] += np.sum(dvar*dL_dK) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1)) + dvar = (1 + np.sqrt(3.) * dist) * np.exp(-np.sqrt(3.) * dist) + invdist = 1. / np.where(dist != 0., dist, np.inf) + dist2M = np.square(X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 3 + # dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis] + target[0] += np.sum(dvar * dL_dK) if self.ARD == True: - dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis] - #dl = self.variance*dvar[:,:,None]*dist2M*invdist[:,:,None] - target[1:] += (dl*dL_dK[:,:,None]).sum(0).sum(0) + dl = (self.variance * 3 * dist * np.exp(-np.sqrt(3.) * dist))[:, :, np.newaxis] * dist2M * invdist[:, :, np.newaxis] + # dl = self.variance*dvar[:,:,None]*dist2M*invdist[:,:,None] + target[1:] += (dl * dL_dK[:, :, None]).sum(0).sum(0) else: - dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist)) * dist2M.sum(-1)*invdist - #dl = self.variance*dvar*dist2M.sum(-1)*invdist - target[1] += np.sum(dl*dL_dK) + dl = (self.variance * 3 * dist * np.exp(-np.sqrt(3.) * dist)) * dist2M.sum(-1) * invdist + # dl = self.variance*dvar*dist2M.sum(-1)*invdist + target[1] += np.sum(dl * dL_dK) - def dKdiag_dtheta(self,dL_dKdiag,X,target): + def dKdiag_dtheta(self, dL_dKdiag, X, target): """derivative of the diagonal of the covariance matrix with respect to the parameters.""" target[0] += np.sum(dL_dKdiag) - def dK_dX(self,dL_dK,X,X2,target): + def dK_dX(self, dL_dK, X, X2, target): """derivative of the covariance matrix with respect to X.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))[:,:,None] - ddist_dX = (X[:,None,:]-X2[None,:,:])/self.lengthscale**2/np.where(dist!=0.,dist,np.inf) - dK_dX = - np.transpose(3*self.variance*dist*np.exp(-np.sqrt(3)*dist)*ddist_dX,(1,0,2)) - target += np.sum(dK_dX*dL_dK.T[:,:,None],0) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))[:, :, None] + ddist_dX = (X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 2 / np.where(dist != 0., dist, np.inf) + dK_dX = -np.transpose(3 * self.variance * dist * np.exp(-np.sqrt(3) * dist) * ddist_dX, (1, 0, 2)) + target += np.sum(dK_dX * dL_dK.T[:, :, None], 0) - def dKdiag_dX(self,dL_dKdiag,X,target): + def dKdiag_dX(self, dL_dKdiag, X, target): pass - def Gram_matrix(self,F,F1,F2,lower,upper): + def Gram_matrix(self, F, F1, F2, lower, upper): """ Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1. @@ -123,15 +121,15 @@ class Matern32(kernpart): :type lower,upper: floats """ assert self.input_dim == 1 - def L(x,i): - return(3./self.lengthscale**2*F[i](x) + 2*np.sqrt(3)/self.lengthscale*F1[i](x) + F2[i](x)) + def L(x, i): + return(3. / self.lengthscale ** 2 * F[i](x) + 2 * np.sqrt(3) / self.lengthscale * F1[i](x) + F2[i](x)) n = F.shape[0] - G = np.zeros((n,n)) + G = np.zeros((n, n)) for i in range(n): - for j in range(i,n): - G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0] - Flower = np.array([f(lower) for f in F])[:,None] - F1lower = np.array([f(lower) for f in F1])[:,None] - #print "OLD \n", np.dot(F1lower,F1lower.T), "\n \n" - #return(G) - return(self.lengthscale**3/(12.*np.sqrt(3)*self.variance) * G + 1./self.variance*np.dot(Flower,Flower.T) + self.lengthscale**2/(3.*self.variance)*np.dot(F1lower,F1lower.T)) + for j in range(i, n): + G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0] + Flower = np.array([f(lower) for f in F])[:, None] + F1lower = np.array([f(lower) for f in F1])[:, None] + # print "OLD \n", np.dot(F1lower,F1lower.T), "\n \n" + # return(G) + return(self.lengthscale ** 3 / (12.*np.sqrt(3) * self.variance) * G + 1. / self.variance * np.dot(Flower, Flower.T) + self.lengthscale ** 2 / (3.*self.variance) * np.dot(F1lower, F1lower.T)) diff --git a/GPy/kern/Matern52.py b/GPy/kern/Matern52.py index c35715ee..e02cb9bf 100644 --- a/GPy/kern/Matern52.py +++ b/GPy/kern/Matern52.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib from scipy import integrate -class Matern52(kernpart): +class Matern52(Kernpart): """ Matern 5/2 kernel: @@ -30,7 +30,7 @@ class Matern52(kernpart): self.input_dim = input_dim self.ARD = ARD if ARD == False: - self.Nparam = 2 + self.num_params = 2 self.name = 'Mat52' if lengthscale is not None: lengthscale = np.asarray(lengthscale) @@ -38,7 +38,7 @@ class Matern52(kernpart): else: lengthscale = np.ones(1) else: - self.Nparam = self.input_dim + 1 + self.num_params = self.input_dim + 1 self.name = 'Mat52' if lengthscale is not None: lengthscale = np.asarray(lengthscale) @@ -53,13 +53,13 @@ class Matern52(kernpart): def _set_params(self,x): """set the value of the parameters.""" - assert x.size == self.Nparam + assert x.size == self.num_params self.variance = x[0] self.lengthscale = x[1:] def _get_param_names(self): """return parameter names.""" - if self.Nparam == 2: + if self.num_params == 2: return ['variance','lengthscale'] else: return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscale.size)] diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index a71d38f4..97c1d88f 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from constructors import rbf, Matern32, Matern52, exponential, linear, white, bias, finite_dimensional, spline, Brownian, periodic_exponential, periodic_Matern32, periodic_Matern52, prod, symmetric, Coregionalise, rational_quadratic, fixed, rbfcos, independent_outputs +from constructors import rbf, Matern32, Matern52, exponential, linear, white, bias, finite_dimensional, spline, Brownian, periodic_exponential, periodic_Matern32, periodic_Matern52, prod, symmetric, Coregionalise, rational_quadratic, Fixed, rbfcos, IndependentOutputs try: from constructors import rbf_sympy, sympykern # these depend on sympy except: diff --git a/GPy/kern/bias.py b/GPy/kern/bias.py index 9defbf95..8ec3741d 100644 --- a/GPy/kern/bias.py +++ b/GPy/kern/bias.py @@ -2,11 +2,11 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib -class bias(kernpart): +class bias(Kernpart): def __init__(self,input_dim,variance=1.): """ :param input_dim: the number of input dimensions @@ -15,7 +15,7 @@ class bias(kernpart): :type variance: float """ self.input_dim = input_dim - self.Nparam = 1 + self.num_params = 1 self.name = 'bias' self._set_params(np.array([variance]).flatten()) diff --git a/GPy/kern/constructors.py b/GPy/kern/constructors.py index a6ed1145..520c931b 100644 --- a/GPy/kern/constructors.py +++ b/GPy/kern/constructors.py @@ -12,7 +12,7 @@ from exponential import exponential as exponentialpart from Matern32 import Matern32 as Matern32part from Matern52 import Matern52 as Matern52part from bias import bias as biaspart -from fixed import fixed as fixedpart +from fixed import Fixed as fixedpart from finite_dimensional import finite_dimensional as finite_dimensionalpart from spline import spline as splinepart from Brownian import Brownian as Brownianpart @@ -24,7 +24,7 @@ from symmetric import symmetric as symmetric_part from coregionalise import Coregionalise as coregionalise_part from rational_quadratic import rational_quadratic as rational_quadraticpart from rbfcos import rbfcos as rbfcospart -from independent_outputs import independent_outputs as independent_output_part +from independent_outputs import IndependentOutputs as independent_output_part #TODO these s=constructors are not as clean as we'd like. Tidy the code up #using meta-classes to make the objects construct properly wthout them. @@ -294,9 +294,9 @@ def rational_quadratic(D,variance=1., lengthscale=1., power=1.): part = rational_quadraticpart(D,variance, lengthscale, power) return kern(D, [part]) -def fixed(D, K, variance=1.): +def Fixed(D, K, variance=1.): """ - Construct a fixed effect kernel. + Construct a Fixed effect kernel. Arguments --------- @@ -314,7 +314,7 @@ def rbfcos(D,variance=1.,frequencies=None,bandwidths=None,ARD=False): part = rbfcospart(D,variance,frequencies,bandwidths,ARD) return kern(D,[part]) -def independent_outputs(k): +def IndependentOutputs(k): """ Construct a kernel with independent outputs from an existing kernel """ diff --git a/GPy/kern/coregionalise.py b/GPy/kern/coregionalise.py index 329d1015..8faceafe 100644 --- a/GPy/kern/coregionalise.py +++ b/GPy/kern/coregionalise.py @@ -1,13 +1,13 @@ # Copyright (c) 2012, James Hensman and Ricardo Andrade # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np from GPy.util.linalg import mdot, pdinv import pdb from scipy import weave -class Coregionalise(kernpart): +class Coregionalise(Kernpart): """ Kernel for Intrinsic Corregionalization Models """ @@ -26,14 +26,14 @@ class Coregionalise(kernpart): else: assert kappa.shape==(self.Nout,) self.kappa = kappa - self.Nparam = self.Nout*(self.R + 1) + self.num_params = self.Nout*(self.R + 1) self._set_params(np.hstack([self.W.flatten(),self.kappa])) def _get_params(self): return np.hstack([self.W.flatten(),self.kappa]) def _set_params(self,x): - assert x.size == self.Nparam + assert x.size == self.num_params self.kappa = x[-self.Nout:] self.W = x[:-self.Nout].reshape(self.Nout,self.R) self.B = np.dot(self.W,self.W.T) + np.diag(self.kappa) diff --git a/GPy/kern/exponential.py b/GPy/kern/exponential.py index 9e50712b..5cb0f584 100644 --- a/GPy/kern/exponential.py +++ b/GPy/kern/exponential.py @@ -2,21 +2,20 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -import hashlib from scipy import integrate -class exponential(kernpart): +class exponential(Kernpart): """ Exponential kernel (aka Ornstein-Uhlenbeck or Matern 1/2) .. math:: - k(r) = \sigma^2 \exp(- r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^D \\frac{(x_i-y_i)^2}{\ell_i^2} } + k(r) = \sigma^2 \exp(- r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} } - :param D: the number of input dimensions - :type D: int + :param input_dim: the number of input dimensions + :type input_dim: int :param variance: the variance :math:`\sigma^2` :type variance: float :param lengthscale: the vector of lengthscale :math:`\ell_i` @@ -26,11 +25,11 @@ class exponential(kernpart): :rtype: kernel object """ - def __init__(self,D,variance=1.,lengthscale=None,ARD=False): - self.D = D + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False): + self.input_dim = input_dim self.ARD = ARD if ARD == False: - self.Nparam = 2 + self.num_params = 2 self.name = 'exp' if lengthscale is not None: lengthscale = np.asarray(lengthscale) @@ -38,76 +37,76 @@ class exponential(kernpart): else: lengthscale = np.ones(1) else: - self.Nparam = self.D + 1 + self.num_params = self.input_dim + 1 self.name = 'exp' if lengthscale is not None: lengthscale = np.asarray(lengthscale) - assert lengthscale.size == self.D, "bad number of lengthscales" + assert lengthscale.size == self.input_dim, "bad number of lengthscales" else: - lengthscale = np.ones(self.D) - self._set_params(np.hstack((variance,lengthscale.flatten()))) + lengthscale = np.ones(self.input_dim) + self._set_params(np.hstack((variance, lengthscale.flatten()))) def _get_params(self): """return the value of the parameters.""" - return np.hstack((self.variance,self.lengthscale)) + return np.hstack((self.variance, self.lengthscale)) - def _set_params(self,x): + def _set_params(self, x): """set the value of the parameters.""" - assert x.size == self.Nparam + assert x.size == self.num_params self.variance = x[0] self.lengthscale = x[1:] def _get_param_names(self): """return parameter names.""" - if self.Nparam == 2: - return ['variance','lengthscale'] + if self.num_params == 2: + return ['variance', 'lengthscale'] else: - return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscale.size)] + return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)] - def K(self,X,X2,target): + def K(self, X, X2, target): """Compute the covariance matrix between X and X2.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1)) - np.add(self.variance*np.exp(-dist), target,target) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1)) + np.add(self.variance * np.exp(-dist), target, target) - def Kdiag(self,X,target): + def Kdiag(self, X, target): """Compute the diagonal of the covariance matrix associated to X.""" - np.add(target,self.variance,target) + np.add(target, self.variance, target) - def dK_dtheta(self,dL_dK,X,X2,target): + def dK_dtheta(self, dL_dK, X, X2, target): """derivative of the covariance matrix with respect to the parameters.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1)) - invdist = 1./np.where(dist!=0.,dist,np.inf) - dist2M = np.square(X[:,None,:]-X2[None,:,:])/self.lengthscale**3 + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1)) + invdist = 1. / np.where(dist != 0., dist, np.inf) + dist2M = np.square(X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 3 dvar = np.exp(-dist) - target[0] += np.sum(dvar*dL_dK) + target[0] += np.sum(dvar * dL_dK) if self.ARD == True: - dl = self.variance*dvar[:,:,None]*dist2M*invdist[:,:,None] - target[1:] += (dl*dL_dK[:,:,None]).sum(0).sum(0) + dl = self.variance * dvar[:, :, None] * dist2M * invdist[:, :, None] + target[1:] += (dl * dL_dK[:, :, None]).sum(0).sum(0) else: - dl = self.variance*dvar*dist2M.sum(-1)*invdist - target[1] += np.sum(dl*dL_dK) + dl = self.variance * dvar * dist2M.sum(-1) * invdist + target[1] += np.sum(dl * dL_dK) - def dKdiag_dtheta(self,dL_dKdiag,X,target): + def dKdiag_dtheta(self, dL_dKdiag, X, target): """derivative of the diagonal of the covariance matrix with respect to the parameters.""" - #NB: derivative of diagonal elements wrt lengthscale is 0 + # NB: derivative of diagonal elements wrt lengthscale is 0 target[0] += np.sum(dL_dKdiag) - def dK_dX(self,dL_dK,X,X2,target): + def dK_dX(self, dL_dK, X, X2, target): """derivative of the covariance matrix with respect to X.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))[:,:,None] - ddist_dX = (X[:,None,:]-X2[None,:,:])/self.lengthscale**2/np.where(dist!=0.,dist,np.inf) - dK_dX = - np.transpose(self.variance*np.exp(-dist)*ddist_dX,(1,0,2)) - target += np.sum(dK_dX*dL_dK.T[:,:,None],0) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))[:, :, None] + ddist_dX = (X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 2 / np.where(dist != 0., dist, np.inf) + dK_dX = -np.transpose(self.variance * np.exp(-dist) * ddist_dX, (1, 0, 2)) + target += np.sum(dK_dX * dL_dK.T[:, :, None], 0) - def dKdiag_dX(self,dL_dKdiag,X,target): + def dKdiag_dX(self, dL_dKdiag, X, target): pass - def Gram_matrix(self,F,F1,lower,upper): + def Gram_matrix(self, F, F1, lower, upper): """ - Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to D=1. + Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1. :param F: vector of functions :type F: np.array @@ -116,13 +115,13 @@ class exponential(kernpart): :param lower,upper: boundaries of the input domain :type lower,upper: floats """ - assert self.D == 1 - def L(x,i): - return(1./self.lengthscale*F[i](x) + F1[i](x)) + assert self.input_dim == 1 + def L(x, i): + return(1. / self.lengthscale * F[i](x) + F1[i](x)) n = F.shape[0] - G = np.zeros((n,n)) + G = np.zeros((n, n)) for i in range(n): - for j in range(i,n): - G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0] - Flower = np.array([f(lower) for f in F])[:,None] - return(self.lengthscale/2./self.variance * G + 1./self.variance * np.dot(Flower,Flower.T)) + for j in range(i, n): + G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0] + Flower = np.array([f(lower) for f in F])[:, None] + return(self.lengthscale / 2. / self.variance * G + 1. / self.variance * np.dot(Flower, Flower.T)) diff --git a/GPy/kern/finite_dimensional.py b/GPy/kern/finite_dimensional.py index 36afd1dc..b23ddb16 100644 --- a/GPy/kern/finite_dimensional.py +++ b/GPy/kern/finite_dimensional.py @@ -2,21 +2,21 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np from ..util.linalg import pdinv,mdot -class finite_dimensional(kernpart): - def __init__(self, D, F, G, variance=1., weights=None): +class finite_dimensional(Kernpart): + def __init__(self, input_dim, F, G, variance=1., weights=None): """ Argumnents ---------- - D: int - the number of input dimensions + input_dim: int - the number of input dimensions F: np.array of functions with shape (n,) - the n basis functions G: np.array with shape (n,n) - the Gram matrix associated to F weights : np.ndarray with shape (n,) """ - self.D = D + self.input_dim = input_dim self.F = F self.G = G self.G_1 ,L,Li,logdet = pdinv(G) @@ -25,14 +25,14 @@ class finite_dimensional(kernpart): assert weights.shape==(self.n,) else: weights = np.ones(self.n) - self.Nparam = self.n + 1 + self.num_params = self.n + 1 self.name = 'finite_dim' self._set_params(np.hstack((variance,weights))) def _get_params(self): return np.hstack((self.variance,self.weights)) def _set_params(self,x): - assert x.size == (self.Nparam) + assert x.size == (self.num_params) self.variance = x[0] self.weights = x[1:] def _get_param_names(self): @@ -48,7 +48,7 @@ class finite_dimensional(kernpart): product = self.variance * mdot(FX,np.diag(np.sqrt(self.weights)),self.G_1,np.diag(np.sqrt(self.weights)),FX2.T) np.add(product,target,target) def Kdiag(self,X,target): - product = np.diag(self.K(X,X2)) + product = np.diag(self.K(X, X)) np.add(target,product,target) def dK_dtheta(self,X,X2,target): """Return shape is NxMx(Ntheta)""" diff --git a/GPy/kern/fixed.py b/GPy/kern/fixed.py index 8732ec8f..9e8f6226 100644 --- a/GPy/kern/fixed.py +++ b/GPy/kern/fixed.py @@ -1,42 +1,41 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -import hashlib -class fixed(kernpart): - def __init__(self,D,K,variance=1.): +class Fixed(Kernpart): + def __init__(self, input_dim, K, variance=1.): """ - :param D: the number of input dimensions - :type D: int + :param input_dim: the number of input dimensions + :type input_dim: int :param variance: the variance of the kernel :type variance: float """ - self.D = D + self.input_dim = input_dim self.fixed_K = K - self.Nparam = 1 - self.name = 'fixed' + self.num_params = 1 + self.name = 'Fixed' self._set_params(np.array([variance]).flatten()) def _get_params(self): return self.variance - def _set_params(self,x): - assert x.shape==(1,) + def _set_params(self, x): + assert x.shape == (1,) self.variance = x def _get_param_names(self): return ['variance'] - def K(self,X,X2,target): + def K(self, X, X2, target): target += self.variance * self.fixed_K - def dK_dtheta(self,partial,X,X2,target): + def dK_dtheta(self, partial, X, X2, target): target += (partial * self.fixed_K).sum() - def dK_dX(self, partial,X, X2, target): + def dK_dX(self, partial, X, X2, target): pass - def dKdiag_dX(self,partial,X,target): + def dKdiag_dX(self, partial, X, target): pass diff --git a/GPy/kern/independent_outputs.py b/GPy/kern/independent_outputs.py index b94202d7..f88b0ff5 100644 --- a/GPy/kern/independent_outputs.py +++ b/GPy/kern/independent_outputs.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np def index_to_slices(index): @@ -31,7 +31,7 @@ def index_to_slices(index): [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))] return ret -class independent_outputs(kernpart): +class IndependentOutputs(Kernpart): """ A kernel part shich can reopresent several independent functions. this kernel 'switches off' parts of the matrix where the output indexes are different. @@ -41,8 +41,8 @@ class independent_outputs(kernpart): """ def __init__(self,k): - self.D = k.D + 1 - self.Nparam = k.Nparam + self.input_dim = k.input_dim + 1 + self.num_params = k.num_params self.name = 'iops('+ k.name + ')' self.k = k diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index efd49020..4a05783b 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -4,30 +4,29 @@ import numpy as np import pylab as pb -from ..core.parameterised import parameterised -from kernpart import kernpart +from ..core.parameterised import Parameterised +from kernpart import Kernpart import itertools from prod import prod -from ..util.linalg import symmetrify -class kern(parameterised): +class kern(Parameterised): def __init__(self, input_dim, parts=[], input_slices=None): """ This is the main kernel class for GPy. It handles multiple (additive) kernel functions, and keeps track of variaous things like which parameters live where. The technical code for kernels is divided into _parts_ (see e.g. rbf.py). This obnject contains a list of parts, which are computed additively. For multiplication, special _prod_ parts are used. - :param input_dim: The dimensioality of the kernel's input space + :param input_dim: The dimensionality of the kernel's input space :type input_dim: int :param parts: the 'parts' (PD functions) of the kernel - :type parts: list of kernpart objects + :type parts: list of Kernpart objects :param input_slices: the slices on the inputs which apply to each kernel :type input_slices: list of slice objects, or list of bools """ self.parts = parts self.Nparts = len(parts) - self.Nparam = sum([p.Nparam for p in self.parts]) + self.num_params = sum([p.num_params for p in self.parts]) self.input_dim = input_dim @@ -39,11 +38,11 @@ class kern(parameterised): self.input_slices = [sl if type(sl) is slice else slice(None) for sl in input_slices] for p in self.parts: - assert isinstance(p, kernpart), "bad kernel part" + assert isinstance(p, Kernpart), "bad kernel part" self.compute_param_slices() - parameterised.__init__(self) + Parameterised.__init__(self) def plot_ARD(self, fignum=None, ax=None): @@ -80,8 +79,8 @@ class kern(parameterised): self.param_slices = [] count = 0 for p in self.parts: - self.param_slices.append(slice(count, count + p.Nparam)) - count += p.Nparam + self.param_slices.append(slice(count, count + p.num_params)) + count += p.num_params def __add__(self, other): """ @@ -104,21 +103,21 @@ class kern(parameterised): newkern = kern(D, self.parts + other.parts, self_input_slices + other_input_slices) # transfer constraints: - newkern.constrained_indices = self.constrained_indices + [x + self.Nparam for x in other.constrained_indices] + newkern.constrained_indices = self.constrained_indices + [x + self.num_params for x in other.constrained_indices] newkern.constraints = self.constraints + other.constraints - newkern.fixed_indices = self.fixed_indices + [self.Nparam + x for x in other.fixed_indices] + newkern.fixed_indices = self.fixed_indices + [self.num_params + x for x in other.fixed_indices] newkern.fixed_values = self.fixed_values + other.fixed_values newkern.constraints = self.constraints + other.constraints - newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices] + newkern.tied_indices = self.tied_indices + [self.num_params + x for x in other.tied_indices] else: assert self.input_dim == other.input_dim newkern = kern(self.input_dim, self.parts + other.parts, self.input_slices + other.input_slices) # transfer constraints: - newkern.constrained_indices = self.constrained_indices + [i + self.Nparam for i in other.constrained_indices] + newkern.constrained_indices = self.constrained_indices + [i + self.num_params for i in other.constrained_indices] newkern.constraints = self.constraints + other.constraints - newkern.fixed_indices = self.fixed_indices + [self.Nparam + x for x in other.fixed_indices] + newkern.fixed_indices = self.fixed_indices + [self.num_params + x for x in other.fixed_indices] newkern.fixed_values = self.fixed_values + other.fixed_values - newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices] + newkern.tied_indices = self.tied_indices + [self.num_params + x for x in other.tied_indices] return newkern def __mul__(self, other): @@ -158,13 +157,13 @@ class kern(parameterised): K1_param = [] n = 0 for k1 in K1.parts: - K1_param += [range(n, n + k1.Nparam)] - n += k1.Nparam + K1_param += [range(n, n + k1.num_params)] + n += k1.num_params n = 0 K2_param = [] for k2 in K2.parts: - K2_param += [range(K1.Nparam + n, K1.Nparam + n + k2.Nparam)] - n += k2.Nparam + K2_param += [range(K1.num_params + n, K1.num_params + n + k2.num_params)] + n += k2.num_params index_param = [] for p1 in K1_param: for p2 in K2_param: @@ -172,12 +171,12 @@ class kern(parameterised): index_param = np.array(index_param) # Get the ties and constrains of the kernels before the multiplication - prev_ties = K1.tied_indices + [arr + K1.Nparam for arr in K2.tied_indices] + prev_ties = K1.tied_indices + [arr + K1.num_params for arr in K2.tied_indices] - prev_constr_ind = [K1.constrained_indices] + [K1.Nparam + i for i in K2.constrained_indices] + prev_constr_ind = [K1.constrained_indices] + [K1.num_params + i for i in K2.constrained_indices] prev_constr = K1.constraints + K2.constraints - # prev_constr_fix = K1.fixed_indices + [arr + K1.Nparam for arr in K2.fixed_indices] + # prev_constr_fix = K1.fixed_indices + [arr + K1.num_params for arr in K2.fixed_indices] # prev_constr_fix_values = K1.fixed_values + K2.fixed_values # follow the previous ties @@ -186,7 +185,7 @@ class kern(parameterised): index_param[np.where(index_param == j)[0]] = arr[0] # ties and constrains - for i in range(K1.Nparam + K2.Nparam): + for i in range(K1.num_params + K2.num_params): index = np.where(index_param == i)[0] if index.size > 1: self.tie_params(index) @@ -230,7 +229,7 @@ class kern(parameterised): :type X2: np.ndarray (num_inducing x input_dim) """ assert X.shape[1] == self.input_dim - target = np.zeros(self.Nparam) + target = np.zeros(self.num_params) if X2 is None: [p.dK_dtheta(dL_dK, X[:, i_s], None, target[ps]) for p, i_s, ps, in zip(self.parts, self.input_slices, self.param_slices)] else: @@ -259,7 +258,7 @@ class kern(parameterised): def dKdiag_dtheta(self, dL_dKdiag, X): assert X.shape[1] == self.input_dim assert dL_dKdiag.size == X.shape[0] - target = np.zeros(self.Nparam) + target = np.zeros(self.num_params) [p.dKdiag_dtheta(dL_dKdiag, X[:, i_s], target[ps]) for p, i_s, ps in zip(self.parts, self.input_slices, self.param_slices)] return self._transform_gradients(target) @@ -275,7 +274,7 @@ class kern(parameterised): return target def dpsi0_dtheta(self, dL_dpsi0, Z, mu, S): - target = np.zeros(self.Nparam) + target = np.zeros(self.num_params) [p.dpsi0_dtheta(dL_dpsi0, Z[:, i_s], mu[:, i_s], S[:, i_s], target[ps]) for p, ps, i_s in zip(self.parts, self.param_slices, self.input_slices)] return self._transform_gradients(target) @@ -290,7 +289,7 @@ class kern(parameterised): return target def dpsi1_dtheta(self, dL_dpsi1, Z, mu, S): - target = np.zeros((self.Nparam)) + target = np.zeros((self.num_params)) [p.dpsi1_dtheta(dL_dpsi1, Z[:, i_s], mu[:, i_s], S[:, i_s], target[ps]) for p, ps, i_s in zip(self.parts, self.param_slices, self.input_slices)] return self._transform_gradients(target) @@ -327,13 +326,13 @@ class kern(parameterised): p2.psi1(Z, mu, S, tmp2) prod = np.multiply(tmp1, tmp2) - crossterms += prod[:,:,None] + prod[:, None, :] + crossterms += prod[:, :, None] + prod[:, None, :] target += crossterms return target def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S): - target = np.zeros(self.Nparam) + target = np.zeros(self.num_params) [p.dpsi2_dtheta(dL_dpsi2, Z[:, i_s], mu[:, i_s], S[:, i_s], target[ps]) for p, i_s, ps in zip(self.parts, self.input_slices, self.param_slices)] # compute the "cross" terms @@ -345,14 +344,14 @@ class kern(parameterised): tmp = np.zeros((mu.shape[0], Z.shape[0])) p1.psi1(Z, mu, S, tmp) - p2.dpsi1_dtheta((tmp[:,None,:]*dL_dpsi2).sum(1)*2., Z, mu, S, target[ps2]) + p2.dpsi1_dtheta((tmp[:, None, :] * dL_dpsi2).sum(1) * 2., Z, mu, S, target[ps2]) return self._transform_gradients(target) def dpsi2_dZ(self, dL_dpsi2, Z, mu, S): target = np.zeros_like(Z) [p.dpsi2_dZ(dL_dpsi2, Z[:, i_s], mu[:, i_s], S[:, i_s], target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] - #target *= 2 + # target *= 2 # compute the "cross" terms # TODO: we need input_slices here. @@ -362,7 +361,7 @@ class kern(parameterised): tmp = np.zeros((mu.shape[0], Z.shape[0])) p1.psi1(Z, mu, S, tmp) tmp2 = np.zeros_like(target) - p2.dpsi1_dZ((tmp[:,None,:]*dL_dpsi2).sum(1).T, Z, mu, S, tmp2) + p2.dpsi1_dZ((tmp[:, None, :] * dL_dpsi2).sum(1).T, Z, mu, S, tmp2) target += tmp2 return target * 2 @@ -379,7 +378,7 @@ class kern(parameterised): tmp = np.zeros((mu.shape[0], Z.shape[0])) p1.psi1(Z, mu, S, tmp) - p2.dpsi1_dmuS((tmp[:,None,:]*dL_dpsi2).sum(1).T*2., Z, mu, S, target_mu, target_S) + p2.dpsi1_dmuS((tmp[:, None, :] * dL_dpsi2).sum(1).T * 2., Z, mu, S, target_mu, target_S) return target_mu, target_S @@ -430,7 +429,7 @@ class kern(parameterised): Xnew = np.vstack((xx.flatten(), yy.flatten())).T Kx = self.K(Xnew, x, which_parts) Kx = Kx.reshape(resolution, resolution).T - pb.contour(xg, yg, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) + pb.contour(xg, yg, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) # @UndefinedVariable pb.xlim(xmin[0], xmax[0]) pb.ylim(xmin[1], xmax[1]) pb.xlabel("x1") diff --git a/GPy/kern/kernpart.py b/GPy/kern/kernpart.py index a4dfdd92..2ed56d66 100644 --- a/GPy/kern/kernpart.py +++ b/GPy/kern/kernpart.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -class kernpart(object): +class Kernpart(object): def __init__(self,input_dim): """ The base class for a kernpart: a positive definite function which forms part of a kernel @@ -13,7 +13,7 @@ class kernpart(object): Do not instantiate. """ self.input_dim = input_dim - self.Nparam = 1 + self.num_params = 1 self.name = 'unnamed' def _get_params(self): diff --git a/GPy/kern/linear.py b/GPy/kern/linear.py index 40dd2007..b2aa29f9 100644 --- a/GPy/kern/linear.py +++ b/GPy/kern/linear.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np from ..util.linalg import tdot from scipy import weave -class linear(kernpart): +class linear(Kernpart): """ Linear kernel @@ -28,7 +28,7 @@ class linear(kernpart): self.input_dim = input_dim self.ARD = ARD if ARD == False: - self.Nparam = 1 + self.num_params = 1 self.name = 'linear' if variances is not None: variances = np.asarray(variances) @@ -37,7 +37,7 @@ class linear(kernpart): variances = np.ones(1) self._Xcache, self._X2cache = np.empty(shape=(2,)) else: - self.Nparam = self.input_dim + self.num_params = self.input_dim self.name = 'linear' if variances is not None: variances = np.asarray(variances) @@ -54,12 +54,12 @@ class linear(kernpart): return self.variances def _set_params(self, x): - assert x.size == (self.Nparam) + assert x.size == (self.num_params) self.variances = x self.variances2 = np.square(self.variances) def _get_param_names(self): - if self.Nparam == 1: + if self.num_params == 1: return ['variance'] else: return ['variance_%i' % i for i in range(self.variances.size)] diff --git a/GPy/kern/periodic_Matern32.py b/GPy/kern/periodic_Matern32.py index 8892b93e..664a5183 100644 --- a/GPy/kern/periodic_Matern32.py +++ b/GPy/kern/periodic_Matern32.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -from GPy.util.linalg import mdot, pdinv +from GPy.util.linalg import mdot from GPy.util.decorators import silence_errors -class periodic_Matern32(kernpart): +class periodic_Matern32(Kernpart): """ Kernel of the periodic subspace (up to a given frequency) of a Matern 3/2 RKHS. Only defined for input_dim=1. @@ -25,7 +25,7 @@ class periodic_Matern32(kernpart): """ - def __init__(self,input_dim=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi): + def __init__(self, input_dim=1, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi): assert input_dim==1, "Periodic kernels are only defined for input_dim=1" self.name = 'periodic_Mat32' self.input_dim = input_dim @@ -35,7 +35,7 @@ class periodic_Matern32(kernpart): else: lengthscale = np.ones(1) self.lower,self.upper = lower, upper - self.Nparam = 3 + self.num_params = 3 self.n_freq = n_freq self.n_basis = 2*n_freq self._set_params(np.hstack((variance,lengthscale,period))) diff --git a/GPy/kern/periodic_Matern52.py b/GPy/kern/periodic_Matern52.py index e7ced7b8..c01d2d26 100644 --- a/GPy/kern/periodic_Matern52.py +++ b/GPy/kern/periodic_Matern52.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -from GPy.util.linalg import mdot, pdinv +from GPy.util.linalg import mdot from GPy.util.decorators import silence_errors -class periodic_Matern52(kernpart): +class periodic_Matern52(Kernpart): """ Kernel of the periodic subspace (up to a given frequency) of a Matern 5/2 RKHS. Only defined for input_dim=1. @@ -35,7 +35,7 @@ class periodic_Matern52(kernpart): else: lengthscale = np.ones(1) self.lower,self.upper = lower, upper - self.Nparam = 3 + self.num_params = 3 self.n_freq = n_freq self.n_basis = 2*n_freq self._set_params(np.hstack((variance,lengthscale,period))) @@ -209,7 +209,7 @@ class periodic_Matern52(kernpart): F2lower = np.array(self._cos(self.basis_alpha*self.basis_omega**2,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None] #dK_dvar - dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T) + dK_dvar = 1. / self.variance * mdot(FX, self.Gi, FX.T) #dK_dlen da_dlen = [-3*self.a[0]/self.lengthscale, -2*self.a[1]/self.lengthscale, -self.a[2]/self.lengthscale, 0.] diff --git a/GPy/kern/periodic_exponential.py b/GPy/kern/periodic_exponential.py index 0b69c342..fcaf6420 100644 --- a/GPy/kern/periodic_exponential.py +++ b/GPy/kern/periodic_exponential.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -from GPy.util.linalg import mdot, pdinv +from GPy.util.linalg import mdot from GPy.util.decorators import silence_errors -class periodic_exponential(kernpart): +class periodic_exponential(Kernpart): """ Kernel of the periodic subspace (up to a given frequency) of a exponential (Matern 1/2) RKHS. Only defined for input_dim=1. @@ -25,7 +25,7 @@ class periodic_exponential(kernpart): """ - def __init__(self,input_dim=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi): + def __init__(self, input_dim=1, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi): assert input_dim==1, "Periodic kernels are only defined for input_dim=1" self.name = 'periodic_exp' self.input_dim = input_dim @@ -35,7 +35,7 @@ class periodic_exponential(kernpart): else: lengthscale = np.ones(1) self.lower,self.upper = lower, upper - self.Nparam = 3 + self.num_params = 3 self.n_freq = n_freq self.n_basis = 2*n_freq self._set_params(np.hstack((variance,lengthscale,period))) diff --git a/GPy/kern/prod.py b/GPy/kern/prod.py index 4d1fe17f..493129c6 100644 --- a/GPy/kern/prod.py +++ b/GPy/kern/prod.py @@ -1,23 +1,23 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib -class prod(kernpart): +class prod(Kernpart): """ Computes the product of 2 kernels :param k1, k2: the kernels to multiply - :type k1, k2: kernpart + :type k1, k2: Kernpart :param tensor: The kernels are either multiply as functions defined on the same input space (default) or on the product of the input spaces :type tensor: Boolean :rtype: kernel object """ def __init__(self,k1,k2,tensor=False): - self.Nparam = k1.Nparam + k2.Nparam + self.num_params = k1.num_params + k2.num_params self.name = k1.name + '' + k2.name self.k1 = k1 self.k2 = k2 @@ -40,8 +40,8 @@ class prod(kernpart): def _set_params(self,x): """set the value of the parameters.""" - self.k1._set_params(x[:self.k1.Nparam]) - self.k2._set_params(x[self.k1.Nparam:]) + self.k1._set_params(x[:self.k1.num_params]) + self.k2._set_params(x[self.k1.num_params:]) def _get_param_names(self): """return parameter names.""" @@ -55,11 +55,11 @@ class prod(kernpart): """derivative of the covariance matrix with respect to the parameters.""" self._K_computations(X,X2) if X2 is None: - self.k1.dK_dtheta(dL_dK*self._K2, X[:,self.slice1], None, target[:self.k1.Nparam]) - self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.slice2], None, target[self.k1.Nparam:]) + self.k1.dK_dtheta(dL_dK*self._K2, X[:,self.slice1], None, target[:self.k1.num_params]) + self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.slice2], None, target[self.k1.num_params:]) else: - self.k1.dK_dtheta(dL_dK*self._K2, X[:,self.slice1], X2[:,self.slice1], target[:self.k1.Nparam]) - self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.slice2], X2[:,self.slice2], target[self.k1.Nparam:]) + self.k1.dK_dtheta(dL_dK*self._K2, X[:,self.slice1], X2[:,self.slice1], target[:self.k1.num_params]) + self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.slice2], X2[:,self.slice2], target[self.k1.num_params:]) def Kdiag(self,X,target): """Compute the diagonal of the covariance matrix associated to X.""" @@ -74,8 +74,8 @@ class prod(kernpart): K2 = np.zeros(X.shape[0]) self.k1.Kdiag(X[:,self.slice1],K1) self.k2.Kdiag(X[:,self.slice2],K2) - self.k1.dKdiag_dtheta(dL_dKdiag*K2,X[:,self.slice1],target[:self.k1.Nparam]) - self.k2.dKdiag_dtheta(dL_dKdiag*K1,X[:,self.slice2],target[self.k1.Nparam:]) + self.k1.dKdiag_dtheta(dL_dKdiag*K2,X[:,self.slice1],target[:self.k1.num_params]) + self.k2.dKdiag_dtheta(dL_dKdiag*K1,X[:,self.slice2],target[self.k1.num_params:]) def dK_dX(self,dL_dK,X,X2,target): """derivative of the covariance matrix with respect to X.""" diff --git a/GPy/kern/prod_orthogonal.py b/GPy/kern/prod_orthogonal.py index 0c0d5d98..237c9557 100644 --- a/GPy/kern/prod_orthogonal.py +++ b/GPy/kern/prod_orthogonal.py @@ -1,23 +1,23 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib #from scipy import integrate # This may not be necessary (Nicolas, 20th Feb) -class prod_orthogonal(kernpart): +class prod_orthogonal(Kernpart): """ Computes the product of 2 kernels :param k1, k2: the kernels to multiply - :type k1, k2: kernpart + :type k1, k2: Kernpart :rtype: kernel object """ def __init__(self,k1,k2): self.input_dim = k1.input_dim + k2.input_dim - self.Nparam = k1.Nparam + k2.Nparam + self.num_params = k1.num_params + k2.num_params self.name = k1.name + '' + k2.name self.k1 = k1 self.k2 = k2 @@ -30,8 +30,8 @@ class prod_orthogonal(kernpart): def _set_params(self,x): """set the value of the parameters.""" - self.k1._set_params(x[:self.k1.Nparam]) - self.k2._set_params(x[self.k1.Nparam:]) + self.k1._set_params(x[:self.k1.num_params]) + self.k2._set_params(x[self.k1.num_params:]) def _get_param_names(self): """return parameter names.""" @@ -45,11 +45,11 @@ class prod_orthogonal(kernpart): """derivative of the covariance matrix with respect to the parameters.""" self._K_computations(X,X2) if X2 is None: - self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.input_dim], None, target[:self.k1.Nparam]) - self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.input_dim:], None, target[self.k1.Nparam:]) + self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.input_dim], None, target[:self.k1.num_params]) + self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.input_dim:], None, target[self.k1.num_params:]) else: - self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.input_dim], X2[:,:self.k1.input_dim], target[:self.k1.Nparam]) - self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.input_dim:], X2[:,self.k1.input_dim:], target[self.k1.Nparam:]) + self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.input_dim], X2[:,:self.k1.input_dim], target[:self.k1.num_params]) + self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.input_dim:], X2[:,self.k1.input_dim:], target[self.k1.num_params:]) def Kdiag(self,X,target): """Compute the diagonal of the covariance matrix associated to X.""" @@ -64,8 +64,8 @@ class prod_orthogonal(kernpart): K2 = np.zeros(X.shape[0]) self.k1.Kdiag(X[:,:self.k1.input_dim],K1) self.k2.Kdiag(X[:,self.k1.input_dim:],K2) - self.k1.dKdiag_dtheta(dL_dKdiag*K2,X[:,:self.k1.input_dim],target[:self.k1.Nparam]) - self.k2.dKdiag_dtheta(dL_dKdiag*K1,X[:,self.k1.input_dim:],target[self.k1.Nparam:]) + self.k1.dKdiag_dtheta(dL_dKdiag*K2,X[:,:self.k1.input_dim],target[:self.k1.num_params]) + self.k2.dKdiag_dtheta(dL_dKdiag*K1,X[:,self.k1.input_dim:],target[self.k1.num_params:]) def dK_dX(self,dL_dK,X,X2,target): """derivative of the covariance matrix with respect to X.""" diff --git a/GPy/kern/rational_quadratic.py b/GPy/kern/rational_quadratic.py index a9139940..d1e7a7e3 100644 --- a/GPy/kern/rational_quadratic.py +++ b/GPy/kern/rational_quadratic.py @@ -2,10 +2,10 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -class rational_quadratic(kernpart): +class rational_quadratic(Kernpart): """ rational quadratic kernel @@ -21,13 +21,13 @@ class rational_quadratic(kernpart): :type lengthscale: float :param power: the power :math:`\\alpha` :type power: float - :rtype: kernpart object + :rtype: Kernpart object """ def __init__(self,input_dim,variance=1.,lengthscale=1.,power=1.): assert input_dim == 1, "For this kernel we assume input_dim=1" self.input_dim = input_dim - self.Nparam = 3 + self.num_params = 3 self.name = 'rat_quad' self.variance = variance self.lengthscale = lengthscale diff --git a/GPy/kern/rbf.py b/GPy/kern/rbf.py index ccf7112f..2d316234 100644 --- a/GPy/kern/rbf.py +++ b/GPy/kern/rbf.py @@ -2,13 +2,13 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib from scipy import weave from ..util.linalg import tdot -class rbf(kernpart): +class rbf(Kernpart): """ Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel: @@ -36,14 +36,14 @@ class rbf(kernpart): self.name = 'rbf' self.ARD = ARD if not ARD: - self.Nparam = 2 + self.num_params = 2 if lengthscale is not None: lengthscale = np.asarray(lengthscale) assert lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel" else: lengthscale = np.ones(1) else: - self.Nparam = self.input_dim + 1 + self.num_params = self.input_dim + 1 if lengthscale is not None: lengthscale = np.asarray(lengthscale) assert lengthscale.size == self.input_dim, "bad number of lengthscales" @@ -67,7 +67,7 @@ class rbf(kernpart): return np.hstack((self.variance, self.lengthscale)) def _set_params(self, x): - assert x.size == (self.Nparam) + assert x.size == (self.num_params) self.variance = x[0] self.lengthscale = x[1:] self.lengthscale2 = np.square(self.lengthscale) @@ -76,7 +76,7 @@ class rbf(kernpart): self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S def _get_param_names(self): - if self.Nparam == 2: + if self.num_params == 2: return ['variance', 'lengthscale'] else: return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)] diff --git a/GPy/kern/rbfcos.py b/GPy/kern/rbfcos.py index 047dad0b..b1e99d3c 100644 --- a/GPy/kern/rbfcos.py +++ b/GPy/kern/rbfcos.py @@ -3,10 +3,10 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -class rbfcos(kernpart): +class rbfcos(Kernpart): def __init__(self,input_dim,variance=1.,frequencies=None,bandwidths=None,ARD=False): self.input_dim = input_dim self.name = 'rbfcos' @@ -14,9 +14,9 @@ class rbfcos(kernpart): print "Warning: the rbfcos kernel requires a lot of memory for high dimensional inputs" self.ARD = ARD - #set the default frequencies and bandwidths, appropriate Nparam + #set the default frequencies and bandwidths, appropriate num_params if ARD: - self.Nparam = 2*self.input_dim + 1 + self.num_params = 2*self.input_dim + 1 if frequencies is not None: frequencies = np.asarray(frequencies) assert frequencies.size == self.input_dim, "bad number of frequencies" @@ -28,7 +28,7 @@ class rbfcos(kernpart): else: bandwidths = np.ones(self.input_dim) else: - self.Nparam = 3 + self.num_params = 3 if frequencies is not None: frequencies = np.asarray(frequencies) assert frequencies.size == 1, "Exactly one frequency needed for non-ARD kernel" @@ -51,7 +51,7 @@ class rbfcos(kernpart): return np.hstack((self.variance,self.frequencies, self.bandwidths)) def _set_params(self,x): - assert x.size==(self.Nparam) + assert x.size==(self.num_params) if self.ARD: self.variance = x[0] self.frequencies = x[1:1+self.input_dim] @@ -60,7 +60,7 @@ class rbfcos(kernpart): self.variance, self.frequencies, self.bandwidths = x def _get_param_names(self): - if self.Nparam == 3: + if self.num_params == 3: return ['variance','frequency','bandwidth'] else: return ['variance']+['frequency_%i'%i for i in range(self.input_dim)]+['bandwidth_%i'%i for i in range(self.input_dim)] @@ -106,7 +106,7 @@ class rbfcos(kernpart): self._dist2 = np.square(self._dist) #ensure the next section is computed: - self._params = np.empty(self.Nparam) + self._params = np.empty(self.num_params) if not np.all(self._params == self._get_params()): self._params == self._get_params().copy() diff --git a/GPy/kern/spline.py b/GPy/kern/spline.py index fbfc7573..f2802180 100644 --- a/GPy/kern/spline.py +++ b/GPy/kern/spline.py @@ -2,14 +2,14 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib def theta(x): """Heaviside step function""" return np.where(x>=0.,1.,0.) -class spline(kernpart): +class spline(Kernpart): """ Spline kernel @@ -23,7 +23,7 @@ class spline(kernpart): def __init__(self,input_dim,variance=1.,lengthscale=1.): self.input_dim = input_dim assert self.input_dim==1 - self.Nparam = 1 + self.num_params = 1 self.name = 'spline' self._set_params(np.squeeze(variance)) diff --git a/GPy/kern/symmetric.py b/GPy/kern/symmetric.py index be26e6db..c7099a6f 100644 --- a/GPy/kern/symmetric.py +++ b/GPy/kern/symmetric.py @@ -1,18 +1,18 @@ # Copyright (c) 2012 James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -class symmetric(kernpart): +class symmetric(Kernpart): """ Symmetrical kernels :param k: the kernel to symmetrify - :type k: kernpart + :type k: Kernpart :param transform: the transform to use in symmetrification (allows symmetry on specified axes) :type transform: A numpy array (input_dim x input_dim) specifiying the transform - :rtype: kernpart + :rtype: Kernpart """ def __init__(self,k,transform=None): @@ -21,7 +21,7 @@ class symmetric(kernpart): assert transform.shape == (k.input_dim, k.input_dim) self.transform = transform self.input_dim = k.input_dim - self.Nparam = k.Nparam + self.num_params = k.num_params self.name = k.name + '_symm' self.k = k self._set_params(k._get_params()) diff --git a/GPy/kern/sympykern.py b/GPy/kern/sympykern.py index f8d66872..def1bc5f 100644 --- a/GPy/kern/sympykern.py +++ b/GPy/kern/sympykern.py @@ -9,9 +9,9 @@ import sys current_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) import tempfile import pdb -from kernpart import kernpart +from kernpart import Kernpart -class spkern(kernpart): +class spkern(Kernpart): """ A kernel object, where all the hard work in done by sympy. @@ -38,12 +38,12 @@ class spkern(kernpart): self.input_dim = len(self._sp_x) assert self.input_dim == input_dim self._sp_theta = sorted([e for e in sp_vars if not (e.name[0]=='x' or e.name[0]=='z')],key=lambda e:e.name) - self.Nparam = len(self._sp_theta) + self.num_params = len(self._sp_theta) #deal with param if param is None: - param = np.ones(self.Nparam) - assert param.size==self.Nparam + param = np.ones(self.num_params) + assert param.size==self.num_params self._set_params(param) #Differentiate! @@ -115,7 +115,7 @@ class spkern(kernpart): #Here's some code to do the looping for K arglist = ", ".join(["X[i*input_dim+%s]"%x.name[1:] for x in self._sp_x]\ + ["Z[j*input_dim+%s]"%z.name[1:] for z in self._sp_z]\ - + ["param[%i]"%i for i in range(self.Nparam)]) + + ["param[%i]"%i for i in range(self.num_params)]) self._K_code =\ """ diff --git a/GPy/kern/white.py b/GPy/kern/white.py index 92e25bde..41f075c3 100644 --- a/GPy/kern/white.py +++ b/GPy/kern/white.py @@ -2,9 +2,9 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -class white(kernpart): +class white(Kernpart): """ White noise kernel. @@ -15,7 +15,7 @@ class white(kernpart): """ def __init__(self,input_dim,variance=1.): self.input_dim = input_dim - self.Nparam = 1 + self.num_params = 1 self.name = 'white' self._set_params(np.array([variance]).flatten()) self._psi1 = 0 # TODO: more elegance here diff --git a/GPy/models/generalized_fitc.py b/GPy/models/generalized_fitc.py index 9072b9b6..70fedcbc 100644 --- a/GPy/models/generalized_fitc.py +++ b/GPy/models/generalized_fitc.py @@ -2,21 +2,14 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -import pylab as pb -from ..util.linalg import mdot, jitchol, chol_inv, pdinv, trace_dot -from ..util.plot import gpplot -from .. import kern -from scipy import stats, linalg -<<<<<<< HEAD:GPy/models/generalized_FITC.py -from sparse_GP import sparse_GP -======= -from ..core import SparseGP ->>>>>>> 7040b26f41f382edfdca3d3f7b689b9bbfc1a54f:GPy/models/generalized_fitc.py +from scipy import linalg +from GPy.core.sparse_gp import SparseGP +from GPy.util.linalg import mdot -def backsub_both_sides(L,X): +def backsub_both_sides(L, X): """ Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky""" - tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1) - return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T + tmp, _ = linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1) + return linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T class GeneralizedFITC(SparseGP): @@ -45,17 +38,13 @@ class GeneralizedFITC(SparseGP): self.num_inducing = self.Z.shape[0] self.true_precision = likelihood.precision -<<<<<<< HEAD:GPy/models/generalized_FITC.py - sparse_GP.__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False) -======= super(GeneralizedFITC, self).__init__(X, likelihood, kernel=kernel, Z=self.Z, X_variance=X_variance, normalize_X=normalize_X) self._set_params(self._get_params()) ->>>>>>> 7040b26f41f382edfdca3d3f7b689b9bbfc1a54f:GPy/models/generalized_fitc.py def _set_params(self, p): - self.Z = p[:self.num_inducing*self.input_dim].reshape(self.num_inducing, self.input_dim) - self.kern._set_params(p[self.Z.size:self.Z.size+self.kern.Nparam]) - self.likelihood._set_params(p[self.Z.size+self.kern.Nparam:]) + self.Z = p[:self.num_inducing * self.input_dim].reshape(self.num_inducing, self.input_dim) + self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.num_params]) + self.likelihood._set_params(p[self.Z.size + self.kern.num_params:]) self._compute_kernel_matrices() self._computations() self._FITC_computations() @@ -73,9 +62,9 @@ class GeneralizedFITC(SparseGP): if self.has_uncertain_inputs: raise NotImplementedError, "FITC approximation not implemented for uncertain inputs" else: - self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0) + self.likelihood.fit_FITC(self.Kmm, self.psi1, self.psi0) self.true_precision = self.likelihood.precision # Save the true precision - self.likelihood.precision = self.true_precision/(1. + self.true_precision*self.Diag0[:,None]) # Add the diagonal element of the FITC approximation + self.likelihood.precision = self.true_precision / (1. + self.true_precision * self.Diag0[:, None]) # Add the diagonal element of the FITC approximation self._set_params(self._get_params()) # update the GP def _FITC_computations(self): @@ -87,37 +76,37 @@ class GeneralizedFITC(SparseGP): - removes the extra terms computed in the SparseGP approximation - computes the likelihood gradients wrt the true precision. """ - #NOTE the true precison is now 'true_precision' not 'precision' + # NOTE the true precison is now 'true_precision' not 'precision' if self.likelihood.is_heteroscedastic: # Compute generalized FITC's diagonal term of the covariance - self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.num_inducing),lower=1) - Lmipsi1 = np.dot(self.Lmi,self.psi1) - self.Qnn = np.dot(Lmipsi1.T,Lmipsi1) - #self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm) - #self.Qnn = mdot(self.psi1.T,self.Kmmi,self.psi1) - #a = kj + self.Lmi, info = linalg.lapack.flapack.dtrtrs(self.Lm, np.eye(self.num_inducing), lower=1) + Lmipsi1 = np.dot(self.Lmi, self.psi1) + self.Qnn = np.dot(Lmipsi1.T, Lmipsi1) + # self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm) + # self.Qnn = mdot(self.psi1.T,self.Kmmi,self.psi1) + # a = kj self.Diag0 = self.psi0 - np.diag(self.Qnn) - Iplus_Dprod_i = 1./(1.+ self.Diag0 * self.true_precision.flatten()) + Iplus_Dprod_i = 1. / (1. + self.Diag0 * self.true_precision.flatten()) self.Diag = self.Diag0 * Iplus_Dprod_i - self.P = Iplus_Dprod_i[:,None] * self.psi1.T - self.RPT0 = np.dot(self.Lmi,self.psi1) - self.L = np.linalg.cholesky(np.eye(self.num_inducing) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T)) - self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1) - self.RPT = np.dot(self.R,self.P.T) - self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT) + self.P = Iplus_Dprod_i[:, None] * self.psi1.T + self.RPT0 = np.dot(self.Lmi, self.psi1) + self.L = np.linalg.cholesky(np.eye(self.num_inducing) + np.dot(self.RPT0, ((1. - Iplus_Dprod_i) / self.Diag0)[:, None] * self.RPT0.T)) + self.R, info = linalg.lapack.dtrtrs(self.L, self.Lmi, lower=1) + self.RPT = np.dot(self.R, self.P.T) + self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T, self.RPT) self.w = self.Diag * self.likelihood.v_tilde - self.Gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde)) - self.mu = self.w + np.dot(self.P,self.Gamma) + self.Gamma = np.dot(self.R.T, np.dot(self.RPT, self.likelihood.v_tilde)) + self.mu = self.w + np.dot(self.P, self.Gamma) # Remove extra term from dL_dpsi1 - self.dL_dpsi1 -= mdot(self.Lmi.T,Lmipsi1*self.likelihood.precision.flatten().reshape(1,self.num_data)) + self.dL_dpsi1 -= mdot(self.Lmi.T,Lmipsi1 * self.likelihood.precision.flatten().reshape(1,self.num_data)) #self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm) #self.dL_dpsi1 -= mdot(self.Kmmi,self.psi1*self.likelihood.precision.flatten().reshape(1,self.num_data)) #dB #########333333 - #self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B) + # self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B) #########333333 @@ -125,16 +114,16 @@ class GeneralizedFITC(SparseGP): else: raise NotImplementedError, "homoscedastic fitc not implemented" # Remove extra term from dL_dpsi1 - #self.dL_dpsi1 += -mdot(self.Kmmi,self.psi1*self.likelihood.precision) #dB + # self.dL_dpsi1 += -mdot(self.Kmmi,self.psi1*self.likelihood.precision) #dB sf = self.scale_factor - sf2 = sf**2 + sf2 = sf ** 2 # Remove extra term from dL_dKmm - self.dL_dKmm += 0.5 * self.input_dim * mdot(self.Lmi.T, self.A, self.Lmi)*sf2 # dB + self.dL_dKmm += 0.5 * self.input_dim * mdot(self.Lmi.T, self.A, self.Lmi) * sf2 # dB self.dL_dpsi0 = None - #the partial derivative vector for the likelihood + # the partial derivative vector for the likelihood if self.likelihood.Nparams == 0: self.partial_for_likelihood = None elif self.likelihood.is_heteroscedastic: @@ -151,18 +140,18 @@ class GeneralizedFITC(SparseGP): """ Compute and return the derivative of the log marginal likelihood wrt the parameters of the kernel """ - dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm,self.Z) + dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm, self.Z) if self.has_uncertain_inputs: raise NotImplementedError, "heteroscedatic derivates not implemented" else: - #NOTE in SparseGP this would include the gradient wrt psi0 - dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1,self.Z,self.X) + # NOTE in SparseGP this would include the gradient wrt psi0 + dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1, self.Z, self.X) return dL_dtheta def log_likelihood(self): """ Compute the (lower bound on the) log marginal likelihood """ - sf2 = self.scale_factor**2 + sf2 = self.scale_factor ** 2 if self.likelihood.is_heteroscedastic: A = -0.5*self.num_data*self.input_dim*np.log(2.*np.pi) +0.5*np.sum(np.log(self.likelihood.precision)) -0.5*np.sum(self.V*self.likelihood.Y) else: @@ -172,7 +161,7 @@ class GeneralizedFITC(SparseGP): D = 0.5*np.sum(np.square(self._LBi_Lmi_psi1V)) #self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T) #D_ = 0.5*np.trace(self.Cpsi1VVpsi1) - return A+C+D + return A + C + D def _raw_predict(self, Xnew, which_parts, full_cov=False): if self.likelihood.is_heteroscedastic: @@ -191,30 +180,30 @@ class GeneralizedFITC(SparseGP): # = I - [RPT0] * (U*U.T)^-1 * [RPT0].T # = I - V.T * V U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn) - V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1) - C = np.eye(self.num_inducing) - np.dot(V.T,V) - mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:]) - #self.C = C - #self.RPT0 = np.dot(self.R0,self.Knm.T) P0.T - #self.mu_u = mu_u - #self.U = U + V, info = linalg.flapack.dtrtrs(U, self.RPT0.T, lower=1) + C = np.eye(self.num_inducing) - np.dot(V.T, V) + mu_u = np.dot(C, self.RPT0) * (1. / self.Diag0[None, :]) + # self.C = C + # self.RPT0 = np.dot(self.R0,self.Knm.T) P0.T + # self.mu_u = mu_u + # self.U = U # q(u|y) = N(u| R0i*mu_H,R0i*Sigma_H*R0i.T) - mu_H = np.dot(mu_u,self.mu) + mu_H = np.dot(mu_u, self.mu) self.mu_H = mu_H - Sigma_H = C + np.dot(mu_u,np.dot(self.Sigma,mu_u.T)) + Sigma_H = C + np.dot(mu_u, np.dot(self.Sigma, mu_u.T)) # q(f_star|y) = N(f_star|mu_star,sigma2_star) Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts) - KR0T = np.dot(Kx.T,self.Lmi.T) - mu_star = np.dot(KR0T,mu_H) + KR0T = np.dot(Kx.T, self.Lmi.T) + mu_star = np.dot(KR0T, mu_H) if full_cov: - Kxx = self.kern.K(Xnew,which_parts=which_parts) - var = Kxx + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.num_inducing),KR0T.T)) + Kxx = self.kern.K(Xnew, which_parts=which_parts) + var = Kxx + np.dot(KR0T, np.dot(Sigma_H - np.eye(self.num_inducing), KR0T.T)) else: - Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts) - Kxx_ = self.kern.K(Xnew,which_parts=which_parts) # TODO: RA, is this line needed? - var_ = Kxx_ + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.num_inducing),KR0T.T)) # TODO: RA, is this line needed? - var = (Kxx + np.sum(KR0T.T*np.dot(Sigma_H - np.eye(self.num_inducing),KR0T.T),0))[:,None] - return mu_star[:,None],var + Kxx = self.kern.Kdiag(Xnew, which_parts=which_parts) + Kxx_ = self.kern.K(Xnew, which_parts=which_parts) # TODO: RA, is this line needed? + var_ = Kxx_ + np.dot(KR0T, np.dot(Sigma_H - np.eye(self.num_inducing), KR0T.T)) # TODO: RA, is this line needed? + var = (Kxx + np.sum(KR0T.T * np.dot(Sigma_H - np.eye(self.num_inducing), KR0T.T), 0))[:, None] + return mu_star[:, None], var else: raise NotImplementedError, "homoscedastic fitc not implemented" """ diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index ba861831..e602a59a 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -6,7 +6,7 @@ import numpy as np import pylab as pb import sys, pdb from .. import kern -from ..core import model +from ..core import Model from ..util.linalg import pdinv, PCA from ..core import GP from ..likelihoods import Gaussian diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 898b9ae1..b078fd27 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -3,7 +3,7 @@ Created on 10 Apr 2013 @author: Max Zwiessele ''' -from GPy.core import model +from GPy.core import Model from GPy.core import SparseGP from GPy.util.linalg import PCA import numpy @@ -12,7 +12,7 @@ import pylab from GPy.kern.kern import kern from GPy.models.bayesian_gplvm import BayesianGPLVM -class MRD(model): +class MRD(Model): """ Do MRD on given Datasets in Ylist. All Ys in likelihood_list are in [N x Dn], where Dn can be different per Yn, @@ -78,7 +78,7 @@ class MRD(model): self.NQ = self.num_data * self.input_dim self.MQ = self.num_inducing * self.input_dim - model.__init__(self) # @UndefinedVariable + Model.__init__(self) # @UndefinedVariable self._set_params(self._get_params()) @property diff --git a/GPy/testing/examples_tests.py b/GPy/testing/examples_tests.py index 0ff0d3de..55c783f8 100644 --- a/GPy/testing/examples_tests.py +++ b/GPy/testing/examples_tests.py @@ -12,21 +12,21 @@ from nose.tools import nottest import sys class ExamplesTests(unittest.TestCase): - def _checkgrad(self, model): - self.assertTrue(model.checkgrad()) + def _checkgrad(self, Model): + self.assertTrue(Model.checkgrad()) - def _model_instance(self, model): - self.assertTrue(isinstance(model, GPy.models)) + def _model_instance(self, Model): + self.assertTrue(isinstance(Model, GPy.models)) """ -def model_instance_generator(model): +def model_instance_generator(Model): def check_model_returned(self): - self._model_instance(model) + self._model_instance(Model) return check_model_returned -def checkgrads_generator(model): +def checkgrads_generator(Model): def model_checkgrads(self): - self._checkgrad(model) + self._checkgrad(Model) return model_checkgrads """ @@ -35,10 +35,9 @@ def model_checkgrads(model): #assert model.checkgrad() return model.checkgrad() - def model_instance(model): #assert isinstance(model, GPy.core.model) - return isinstance(model, GPy.core.model) + return isinstance(model, GPy.core.Model) @nottest def test_models(): @@ -69,12 +68,12 @@ def test_models(): # Create tests for instance check """ - test = model_instance_generator(model) + test = model_instance_generator(Model) test.__name__ = 'test_instance_%s' % example[0] setattr(ExamplesTests, test.__name__, test) #Create tests for checkgrads check - test = checkgrads_generator(model) + test = checkgrads_generator(Model) test.__name__ = 'test_checkgrads_%s' % example[0] setattr(ExamplesTests, test.__name__, test) """ diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 2d97c82c..98c75827 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -21,7 +21,7 @@ class KernelTests(unittest.TestCase): """ X = np.random.rand(30, 4) K = np.dot(X, X.T) - kernel = GPy.kern.fixed(4, K) + kernel = GPy.kern.Fixed(4, K) Y = np.ones((30,1)) m = GPy.models.GPRegression(X,Y,kernel=kernel) self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/psi_stat_gradient_tests.py b/GPy/testing/psi_stat_gradient_tests.py index ce16bf9b..c110d270 100644 --- a/GPy/testing/psi_stat_gradient_tests.py +++ b/GPy/testing/psi_stat_gradient_tests.py @@ -8,9 +8,9 @@ import numpy import GPy import itertools -from GPy.core import model +from GPy.core import Model -class PsiStatModel(model): +class PsiStatModel(Model): def __init__(self, which, X, X_variance, Z, num_inducing, kernel): self.which = which self.X = X @@ -64,12 +64,9 @@ class DPsiStatTest(unittest.TestCase): def testPsi0(self): for k in self.kernels: - m = PsiStatModel('psi1', X=self.X, X_variance=self.X_var, Z=self.Z, - num_inducing=self.num_inducing, kernel=k) - try: - assert m.checkgrad(), "{} x psi0".format("+".join(map(lambda x: x.name, k.parts))) - except: - import ipdb;ipdb.set_trace() + m = PsiStatModel('psi0', X=self.X, X_variance=self.X_var, Z=self.Z, + num_inducing=self.num_inducing, kernel=k) + assert m.checkgrad(), "{} x psi0".format("+".join(map(lambda x: x.name, k.parts))) # def testPsi1(self): # for k in self.kernels: diff --git a/GPy/util/plot_latent.py b/GPy/util/plot_latent.py index c09b5c93..e147d840 100644 --- a/GPy/util/plot_latent.py +++ b/GPy/util/plot_latent.py @@ -2,9 +2,9 @@ import pylab as pb import numpy as np from .. import util -def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, marker='o', s=40): +def plot_latent(Model, labels=None, which_indices=None, resolution=50, ax=None, marker='o', s=40): """ - :param labels: a np.array of size model.N containing labels for the points (can be number, strings, etc) + :param labels: a np.array of size Model.N containing labels for the points (can be number, strings, etc) :param resolution: the resolution of the grid on which to evaluate the predictive variance """ if ax is None: @@ -12,26 +12,26 @@ def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, util.plot.Tango.reset() if labels is None: - labels = np.ones(model.N) + labels = np.ones(Model.N) if which_indices is None: - if model.input_dim==1: + if Model.input_dim==1: input_1 = 0 input_2 = None - if model.input_dim==2: + if Model.input_dim==2: input_1, input_2 = 0,1 else: try: - input_1, input_2 = np.argsort(model.input_sensitivity())[:2] + input_1, input_2 = np.argsort(Model.input_sensitivity())[:2] except: raise ValueError, "cannot Atomatically determine which dimensions to plot, please pass 'which_indices'" else: input_1, input_2 = which_indices #first, plot the output variance as a function of the latent space - Xtest, xx,yy,xmin,xmax = util.plot.x_frame2D(model.X[:,[input_1, input_2]],resolution=resolution) - Xtest_full = np.zeros((Xtest.shape[0], model.X.shape[1])) + Xtest, xx,yy,xmin,xmax = util.plot.x_frame2D(Model.X[:,[input_1, input_2]],resolution=resolution) + Xtest_full = np.zeros((Xtest.shape[0], Model.X.shape[1])) Xtest_full[:, :2] = Xtest - mu, var, low, up = model.predict(Xtest_full) + mu, var, low, up = Model.predict(Xtest_full) var = var[:, :1] ax.imshow(var.reshape(resolution, resolution).T, extent=[xmin[0], xmax[0], xmin[1], xmax[1]], cmap=pb.cm.binary,interpolation='bilinear',origin='lower') @@ -55,12 +55,12 @@ def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, m = marker index = np.nonzero(labels==ul)[0] - if model.input_dim==1: - x = model.X[index,input_1] + if Model.input_dim==1: + x = Model.X[index,input_1] y = np.zeros(index.size) else: - x = model.X[index,input_1] - y = model.X[index,input_2] + x = Model.X[index,input_1] + y = Model.X[index,input_2] ax.scatter(x, y, marker=m, s=s, color=util.plot.Tango.nextMedium(), label=this_label) ax.set_xlabel('latent dimension %i'%input_1) @@ -76,16 +76,16 @@ def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, return ax -def plot_latent_indices(model, which_indices=None, *args, **kwargs): +def plot_latent_indices(Model, which_indices=None, *args, **kwargs): if which_indices is None: try: - input_1, input_2 = np.argsort(model.input_sensitivity())[:2] + input_1, input_2 = np.argsort(Model.input_sensitivity())[:2] except: raise ValueError, "cannot Automatically determine which dimensions to plot, please pass 'which_indices'" else: input_1, input_2 = which_indices - ax = plot_latent(model, which_indices=[input_1, input_2], *args, **kwargs) + ax = plot_latent(Model, which_indices=[input_1, input_2], *args, **kwargs) # TODO: Here test if there are inducing points... - ax.plot(model.Z[:, input_1], model.Z[:, input_2], '^w') + ax.plot(Model.Z[:, input_1], Model.Z[:, input_2], '^w') return ax \ No newline at end of file diff --git a/GPy/util/visualize.py b/GPy/util/visualize.py index a3c10b97..66322c15 100644 --- a/GPy/util/visualize.py +++ b/GPy/util/visualize.py @@ -43,16 +43,16 @@ class vector_show(data_show): class lvm(data_show): - def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1]): - """Visualize a latent variable model + def __init__(self, vals, Model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1]): + """Visualize a latent variable Model - :param model: the latent variable model to visualize. + :param Model: the latent variable Model to visualize. :param data_visualize: the object used to visualize the data which has been modelled. :type data_visualize: visualize.data_show type. :param latent_axes: the axes where the latent visualization should be plotted. """ if vals == None: - vals = model.X[0] + vals = Model.X[0] data_show.__init__(self, vals, axes=latent_axes) @@ -68,13 +68,13 @@ class lvm(data_show): self.cid = latent_axes[0].figure.canvas.mpl_connect('axes_enter_event', self.on_enter) self.data_visualize = data_visualize - self.model = model + self.Model = Model self.latent_axes = latent_axes self.sense_axes = sense_axes self.called = False self.move_on = False self.latent_index = latent_index - self.latent_dim = model.input_dim + self.latent_dim = Model.input_dim # The red cross which shows current latent point. self.latent_values = vals @@ -85,7 +85,7 @@ class lvm(data_show): def modify(self, vals): """When latent values are modified update the latent representation and ulso update the output visualization.""" self.vals = vals.copy() - y = self.model.predict(self.vals)[0] + y = self.Model.predict(self.vals)[0] self.data_visualize.modify(y) self.latent_handle.set_data(self.vals[self.latent_index[0]], self.vals[self.latent_index[1]]) self.axes.figure.canvas.draw() @@ -113,15 +113,15 @@ class lvm(data_show): # A click in the bar chart axis for selection a dimension. if self.sense_axes != None: self.sense_axes.cla() - self.sense_axes.bar(np.arange(self.model.input_dim),1./self.model.input_sensitivity(),color='b') + self.sense_axes.bar(np.arange(self.Model.input_dim),1./self.Model.input_sensitivity(),color='b') if self.latent_index[1] == self.latent_index[0]: - self.sense_axes.bar(np.array(self.latent_index[0]),1./self.model.input_sensitivity()[self.latent_index[0]],color='y') - self.sense_axes.bar(np.array(self.latent_index[1]),1./self.model.input_sensitivity()[self.latent_index[1]],color='y') + self.sense_axes.bar(np.array(self.latent_index[0]),1./self.Model.input_sensitivity()[self.latent_index[0]],color='y') + self.sense_axes.bar(np.array(self.latent_index[1]),1./self.Model.input_sensitivity()[self.latent_index[1]],color='y') else: - self.sense_axes.bar(np.array(self.latent_index[0]),1./self.model.input_sensitivity()[self.latent_index[0]],color='g') - self.sense_axes.bar(np.array(self.latent_index[1]),1./self.model.input_sensitivity()[self.latent_index[1]],color='r') + self.sense_axes.bar(np.array(self.latent_index[0]),1./self.Model.input_sensitivity()[self.latent_index[0]],color='g') + self.sense_axes.bar(np.array(self.latent_index[1]),1./self.Model.input_sensitivity()[self.latent_index[1]],color='r') self.sense_axes.figure.canvas.draw() @@ -131,21 +131,21 @@ class lvm_subplots(lvm): latent_axes is a np array of dimension np.ceil(input_dim/2), one for each pair of the latent dimensions. """ - def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None): - self.nplots = int(np.ceil(model.input_dim/2.))+1 + def __init__(self, vals, Model, data_visualize, latent_axes=None, sense_axes=None): + self.nplots = int(np.ceil(Model.input_dim/2.))+1 assert len(latent_axes)==self.nplots if vals==None: - vals = model.X[0, :] + vals = Model.X[0, :] self.latent_values = vals for i, axis in enumerate(latent_axes): if i == self.nplots-1: - if self.nplots*2!=model.input_dim: + if self.nplots*2!=Model.input_dim: latent_index = [i*2, i*2] - lvm.__init__(self, self.latent_vals, model, data_visualize, axis, sense_axes, latent_index=latent_index) + lvm.__init__(self, self.latent_vals, Model, data_visualize, axis, sense_axes, latent_index=latent_index) else: latent_index = [i*2, i*2+1] - lvm.__init__(self, self.latent_vals, model, data_visualize, axis, latent_index=latent_index) + lvm.__init__(self, self.latent_vals, Model, data_visualize, axis, latent_index=latent_index) @@ -158,7 +158,7 @@ class lvm_dimselect(lvm): GPy.examples.dimensionality_reduction.BGPVLM_oil() """ - def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0, 1]): + def __init__(self, vals, Model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0, 1]): if latent_axes==None and sense_axes==None: self.fig,(latent_axes,self.sense_axes) = plt.subplots(1,2) elif sense_axes==None: @@ -167,14 +167,14 @@ class lvm_dimselect(lvm): else: self.sense_axes = sense_axes - lvm.__init__(self,vals,model,data_visualize,latent_axes,sense_axes,latent_index) + lvm.__init__(self,vals,Model,data_visualize,latent_axes,sense_axes,latent_index) print "use left and right mouse butons to select dimensions" def on_click(self, event): if event.inaxes==self.sense_axes: - new_index = max(0,min(int(np.round(event.xdata-0.5)),self.model.input_dim-1)) + new_index = max(0,min(int(np.round(event.xdata-0.5)),self.Model.input_dim-1)) if event.button == 1: # Make it red if and y-axis (red=port=left) if it is a left button click self.latent_index[1] = new_index @@ -185,7 +185,7 @@ class lvm_dimselect(lvm): self.show_sensitivities() self.latent_axes.cla() - self.model.plot_latent(which_indices=self.latent_index, + self.Model.plot_latent(which_indices=self.latent_index, ax=self.latent_axes) self.latent_handle = self.latent_axes.plot([0],[0],'rx',mew=2)[0] self.modify(self.latent_values) @@ -199,7 +199,7 @@ class lvm_dimselect(lvm): def on_leave(self,event): latent_values = self.latent_values.copy() - y = self.model.predict(latent_values[None,:])[0] + y = self.Model.predict(latent_values[None,:])[0] self.data_visualize.modify(y)