diff --git a/GPy/core/fitc.py b/GPy/core/fitc.py index a6bfb3d5..670c0354 100644 --- a/GPy/core/fitc.py +++ b/GPy/core/fitc.py @@ -57,8 +57,8 @@ class FITC(SparseGP): def _set_params(self, p): self.Z = p[:self.M * self.input_dim].reshape(self.M, self.input_dim) - self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam]) - self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:]) + self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.num_params]) + self.likelihood._set_params(p[self.Z.size + self.kern.num_params:]) self._compute_kernel_matrices() self._computations() @@ -198,14 +198,14 @@ class FITC(SparseGP): aux_1 = self.likelihood.Y.T * np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1) aux_2 = np.dot(LBiLmipsi1.T,self._LBi_Lmi_psi1V) - dA_dnoise = 0.5 * self.D * (dbstar_dnoise/self.beta_star).sum() - 0.5 * self.D * np.sum(self.likelihood.Y**2 * dbstar_dnoise) + dA_dnoise = 0.5 * self.input_dim * (dbstar_dnoise/self.beta_star).sum() - 0.5 * self.input_dim * np.sum(self.likelihood.Y**2 * dbstar_dnoise) dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T) dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T) dD_dnoise_1 = mdot(self.V_star*LBiLmipsi1.T,LBiLmipsi1*dbstar_dnoise.T*self.likelihood.Y.T) alpha = mdot(LBiLmipsi1,self.V_star) alpha_ = mdot(LBiLmipsi1.T,alpha) - dD_dnoise_2 = -0.5 * self.D * np.sum(alpha_**2 * dbstar_dnoise ) + dD_dnoise_2 = -0.5 * self.input_dim * np.sum(alpha_**2 * dbstar_dnoise ) dD_dnoise_1 = mdot(self.V_star.T,self.psi1.T,self.Lmi.T,self.LBi.T,self.LBi,self.Lmi,self.psi1,dbstar_dnoise*self.likelihood.Y) dD_dnoise_2 = 0.5*mdot(self.V_star.T,self.psi1.T,Hi,self.psi1,dbstar_dnoise*self.psi1.T,Hi,self.psi1,self.V_star) @@ -270,8 +270,8 @@ class FITC(SparseGP): # q(u|f) = N(u| R0i*mu_u*f, R0i*C*R0i.T) # Ci = I + (RPT0)Di(RPT0).T - # C = I - [RPT0] * (D+[RPT0].T*[RPT0])^-1*[RPT0].T - # = I - [RPT0] * (D + self.Qnn)^-1 * [RPT0].T + # C = I - [RPT0] * (input_dim+[RPT0].T*[RPT0])^-1*[RPT0].T + # = I - [RPT0] * (input_dim + self.Qnn)^-1 * [RPT0].T # = I - [RPT0] * (U*U.T)^-1 * [RPT0].T # = I - V.T * V U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn) diff --git a/GPy/core/gp_base.py b/GPy/core/gp_base.py index 11b4726b..c236115e 100644 --- a/GPy/core/gp_base.py +++ b/GPy/core/gp_base.py @@ -1,12 +1,12 @@ import numpy as np -import model from .. import kern from ..util.plot import gpplot, Tango, x_frame1D, x_frame2D import pylab as pb +from GPy.core.model import Model -class GPBase(model.model): +class GPBase(Model): """ - Gaussian Process model for holding shared behaviour between + Gaussian Process Model for holding shared behaviour between sprase_GP and GP models """ @@ -28,7 +28,7 @@ class GPBase(model.model): self._Xmean = np.zeros((1, self.input_dim)) self._Xstd = np.ones((1, self.input_dim)) - model.model.__init__(self) + Model.__init__(self) # All leaf nodes should call self._set_params(self._get_params()) at # the end @@ -84,8 +84,8 @@ class GPBase(model.model): Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits, resolution) m, v = self._raw_predict(Xnew, which_parts=which_parts) m = m.reshape(resolution, resolution).T - ax.contour(xx, yy, m, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) - ax.scatter(self.X[:, 0], self.X[:, 1], 40, self.likelihood.Y, linewidth=0, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max()) + ax.contour(xx, yy, m, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) # @UndefinedVariable + ax.scatter(self.X[:, 0], self.X[:, 1], 40, self.likelihood.Y, linewidth=0, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max()) # @UndefinedVariable ax.set_xlim(xmin[0], xmax[0]) ax.set_ylim(xmin[1], xmax[1]) else: @@ -94,9 +94,9 @@ class GPBase(model.model): def plot(self, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, samples=0, fignum=None, ax=None): """ TODO: Docstrings! + :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure - """ # TODO include samples if which_data == 'all': @@ -111,7 +111,7 @@ class GPBase(model.model): Xu = self.X * self._Xstd + self._Xmean # NOTE self.X are the normalized values now Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) - m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) + m, _, lower, upper = self.predict(Xnew, which_parts=which_parts) for d in range(m.shape[1]): gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], axes=ax) ax.plot(Xu[which_data], self.likelihood.data[which_data, d], 'kx', mew=1.5) @@ -122,13 +122,13 @@ class GPBase(model.model): elif self.X.shape[1] == 2: # FIXME resolution = resolution or 50 - Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits, resolution) + Xnew, _, _, xmin, xmax = x_frame2D(self.X, plot_limits, resolution) x, y = np.linspace(xmin[0], xmax[0], resolution), np.linspace(xmin[1], xmax[1], resolution) - m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) + m, _, lower, upper = self.predict(Xnew, which_parts=which_parts) m = m.reshape(resolution, resolution).T - ax.contour(x, y, m, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) + ax.contour(x, y, m, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) # @UndefinedVariable Yf = self.likelihood.Y.flatten() - ax.scatter(self.X[:, 0], self.X[:, 1], 40, Yf, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) + ax.scatter(self.X[:, 0], self.X[:, 1], 40, Yf, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) # @UndefinedVariable ax.set_xlim(xmin[0], xmax[0]) ax.set_ylim(xmin[1], xmax[1]) diff --git a/GPy/core/model.py b/GPy/core/model.py index ac5f0dba..582d7313 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -6,37 +6,32 @@ from .. import likelihoods from ..inference import optimization from ..util.linalg import jitchol from GPy.util.misc import opt_wrapper -from parameterised import parameterised -from scipy import optimize +from parameterised import Parameterised import multiprocessing as mp import numpy as np -import priors -import re -import sys -import pdb from GPy.core.domains import POSITIVE, REAL # import numdifftools as ndt -class model(parameterised): +class Model(Parameterised): def __init__(self): - parameterised.__init__(self) + Parameterised.__init__(self) self.priors = None self.optimization_runs = [] self.sampling_runs = [] self.preferred_optimizer = 'scg' # self._set_params(self._get_params()) has been taken out as it should only be called on leaf nodes def _get_params(self): - raise NotImplementedError, "this needs to be implemented to use the model class" + raise NotImplementedError, "this needs to be implemented to use the Model class" def _set_params(self, x): - raise NotImplementedError, "this needs to be implemented to use the model class" + raise NotImplementedError, "this needs to be implemented to use the Model class" def log_likelihood(self): - raise NotImplementedError, "this needs to be implemented to use the model class" + raise NotImplementedError, "this needs to be implemented to use the Model class" def _log_likelihood_gradients(self): - raise NotImplementedError, "this needs to be implemented to use the model class" + raise NotImplementedError, "this needs to be implemented to use the Model class" def set_prior(self, regexp, what): """ - Sets priors on the model parameters. + Sets priors on the Model parameters. Arguments --------- @@ -95,7 +90,7 @@ class model(parameterised): def get_gradient(self, name, return_names=False): """ - Get model gradient(s) by name. The name is applied as a regular expression and all parameters that match that regular expression are returned. + Get Model gradient(s) by name. The name is applied as a regular expression and all parameters that match that regular expression are returned. """ matches = self.grep_param_names(name) if len(matches): @@ -135,7 +130,7 @@ class model(parameterised): def randomize(self): """ - Randomize the model. + Randomize the Model. Make this draw from the Prior if one exists, else draw from N(0,1) """ # first take care of all parameters (from N(0,1)) @@ -152,11 +147,11 @@ class model(parameterised): def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs): """ - Perform random restarts of the model, and set the model to the best + Perform random restarts of the Model, and set the Model to the best seen solution. If the robust flag is set, exceptions raised during optimizations will - be handled silently. If _all_ runs fail, the model is reset to the + be handled silently. If _all_ runs fail, the Model is reset to the existing parameter values. Notes @@ -218,7 +213,7 @@ class model(parameterised): Ensure that any variables which should clearly be positive have been constrained somehow. """ positive_strings = ['variance', 'lengthscale', 'precision', 'kappa'] - param_names = self._get_param_names() + # param_names = self._get_param_names() currently_constrained = self.all_constrained_indices() to_make_positive = [] for s in positive_strings: @@ -251,7 +246,7 @@ class model(parameterised): def optimize(self, optimizer=None, start=None, **kwargs): """ - Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. + Optimize the Model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. kwargs are passed to the optimizer. They can be: :max_f_eval: maximum number of function evaluations @@ -274,7 +269,7 @@ class model(parameterised): def optimize_SGD(self, momentum=0.1, learning_rate=0.01, iterations=20, **kwargs): # assert self.Y.shape[1] > 1, "SGD only works with D > 1" - sgd = SGD.StochasticGD(self, iterations, learning_rate, momentum, **kwargs) + sgd = SGD.StochasticGD(self, iterations, learning_rate, momentum, **kwargs) # @UndefinedVariable sgd.run() self.optimization_runs.append(sgd) @@ -291,7 +286,7 @@ class model(parameterised): def f(x): self._set_params(x) return self.log_likelihood() - h = ndt.Hessian(f) + h = ndt.Hessian(f) # @UndefinedVariable A = -h(x) self._set_params(x) # check for almost zero components on the diagonal which screw up the cholesky @@ -300,7 +295,7 @@ class model(parameterised): return A def Laplace_evidence(self): - """Returns an estiamte of the model evidence based on the Laplace approximation. + """Returns an estiamte of the Model evidence based on the Laplace approximation. Uses a numerical estimate of the hessian if none is available analytically""" A = self.Laplace_covariance() try: @@ -310,7 +305,7 @@ class model(parameterised): return 0.5 * self._get_params().size * np.log(2 * np.pi) + self.log_likelihood() - hld def __str__(self): - s = parameterised.__str__(self).split('\n') + s = Parameterised.__str__(self).split('\n') # add priors to the string if self.priors is not None: strs = [str(p) if p is not None else '' for p in self.priors] @@ -336,7 +331,7 @@ class model(parameterised): def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3): """ - Check the gradient of the model by comparing to a numerical estimate. + Check the gradient of the Model by comparing to a numerical estimate. If the verbose flag is passed, invividual components are tested (and printed) :param verbose: If True, print a "full" checking of each parameter @@ -389,7 +384,7 @@ class model(parameterised): param_list = range(len(x)) else: param_list = self.grep_param_names(target_param, transformed=True, search=True) - if not param_list: + if not np.any(param_list): print "No free parameters to check" return @@ -419,15 +414,15 @@ class model(parameterised): def input_sensitivity(self): """ - return an array describing the sesitivity of the model to each input + return an array describing the sesitivity of the Model to each input NB. Right now, we're basing this on the lengthscales (or variances) of the kernel. TODO: proper sensitivity analysis - where we integrate across the model inputs and evaluate the - effect on the variance of the model output. """ + where we integrate across the Model inputs and evaluate the + effect on the variance of the Model output. """ if not hasattr(self, 'kern'): - raise ValueError, "this model has no kernel" + raise ValueError, "this Model has no kernel" k = [p for p in self.kern.parts if p.name in ['rbf', 'linear']] if (not len(k) == 1) or (not k[0].ARD): @@ -475,7 +470,7 @@ class model(parameterised): if ll_change < 0: self.likelihood = last_approximation # restore previous likelihood approximation - self._set_params(last_params) # restore model parameters + self._set_params(last_params) # restore Model parameters print "Log-likelihood decrement: %s \nLast likelihood update discarded." % ll_change stop = True else: diff --git a/GPy/core/parameterised.py b/GPy/core/parameterised.py index 1db7842b..b3a5712a 100644 --- a/GPy/core/parameterised.py +++ b/GPy/core/parameterised.py @@ -9,7 +9,7 @@ import cPickle import warnings import transformations -class parameterised(object): +class Parameterised(object): def __init__(self): """ This is the base class for model and kernel. Mostly just handles tieing and constraining of parameters @@ -34,7 +34,7 @@ class parameterised(object): """ Returns a **copy** of parameters in non transformed space - :see_also: :py:func:`GPy.core.parameterised.params_transformed` + :see_also: :py:func:`GPy.core.Parameterised.params_transformed` """ return self._get_params() @@ -47,7 +47,7 @@ class parameterised(object): """ Returns a **copy** of parameters in transformed space - :see_also: :py:func:`GPy.core.parameterised.params` + :see_also: :py:func:`GPy.core.Parameterised.params` """ return self._get_params_transformed() diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 91902e3d..8a10ca28 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -152,7 +152,7 @@ class SparseGP(GPBase): return A + B + C + D + self.likelihood.Z def _set_params(self, p): - self.Z = p[:self.num_inducing * self.output_dim].reshape(self.num_inducing, self.input_dim) + self.Z = p[:self.num_inducing * self.input_dim].reshape(self.num_inducing, self.input_dim) self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.num_params]) self.likelihood._set_params(p[self.Z.size + self.kern.num_params:]) self._compute_kernel_matrices() diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 1512e842..a6031640 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -10,16 +10,16 @@ import numpy as np import GPy -def toy_rbf_1d(max_nb_eval_optim=100): +def toy_rbf_1d(optimizer='tnc', max_nb_eval_optim=100): """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" data = GPy.util.datasets.toy_rbf_1d() - # create simple GP model + # create simple GP Model m = GPy.models.GPRegression(data['X'],data['Y']) # optimize m.ensure_default_constraints() - m.optimize(max_f_eval=max_nb_eval_optim) + m.optimize(optimizer, max_f_eval=max_nb_eval_optim) # plot m.plot() print(m) @@ -29,7 +29,7 @@ def rogers_girolami_olympics(optim_iters=100): """Run a standard Gaussian process regression on the Rogers and Girolami olympics data.""" data = GPy.util.datasets.rogers_girolami_olympics() - # create simple GP model + # create simple GP Model m = GPy.models.GPRegression(data['X'],data['Y']) #set the lengthscale to be something sensible (defaults to 1) @@ -48,7 +48,7 @@ def toy_rbf_1d_50(optim_iters=100): """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" data = GPy.util.datasets.toy_rbf_1d_50() - # create simple GP model + # create simple GP Model m = GPy.models.GPRegression(data['X'],data['Y']) # optimize @@ -64,7 +64,7 @@ def silhouette(optim_iters=100): """Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper.""" data = GPy.util.datasets.silhouette() - # create simple GP model + # create simple GP Model m = GPy.models.GPRegression(data['X'],data['Y']) # optimize @@ -244,18 +244,18 @@ def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf): lls = [] total_var = np.var(data['Y']) kernel = kernel_call(1, variance=1., lengthscale=1.) - model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel) + Model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel) for log_SNR in log_SNRs: SNR = 10.**log_SNR noise_var = total_var/(1.+SNR) signal_var = total_var - noise_var - model.kern['.*variance'] = signal_var - model['noise_variance'] = noise_var + Model.kern['.*variance'] = signal_var + Model['noise_variance'] = noise_var length_scale_lls = [] for length_scale in length_scales: - model['.*lengthscale'] = length_scale - length_scale_lls.append(model.log_likelihood()) + Model['.*lengthscale'] = length_scale + length_scale_lls.append(Model.log_likelihood()) lls.append(length_scale_lls) @@ -270,7 +270,7 @@ def sparse_GP_regression_1D(N = 400, M = 5, optim_iters=100): rbf = GPy.kern.rbf(1) noise = GPy.kern.white(1) kernel = rbf + noise - # create simple GP model + # create simple GP Model m = GPy.models.SparseGPRegression(X, Y, kernel, M=M) m.ensure_default_constraints() @@ -290,7 +290,7 @@ def sparse_GP_regression_2D(N = 400, M = 50, optim_iters=100): noise = GPy.kern.white(2) kernel = rbf + noise - # create simple GP model + # create simple GP Model m = GPy.models.SparseGPRegression(X,Y,kernel, M = M) # contrain all parameters to be positive (but not inducing inputs) @@ -318,7 +318,7 @@ def uncertain_inputs_sparse_regression(optim_iters=100): k = GPy.kern.rbf(1) + GPy.kern.white(1) - # create simple GP model - no input uncertainty on this one + # create simple GP Model - no input uncertainty on this one m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z) m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=optim_iters) @@ -326,7 +326,7 @@ def uncertain_inputs_sparse_regression(optim_iters=100): axes[0].set_title('no input uncertainty') - #the same model with uncertainty + #the same Model with uncertainty m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S) m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=optim_iters) diff --git a/GPy/inference/sgd.py b/GPy/inference/sgd.py index 2df09ec8..0002bb22 100644 --- a/GPy/inference/sgd.py +++ b/GPy/inference/sgd.py @@ -11,17 +11,17 @@ class opt_SGD(Optimizer): Optimize using stochastic gradient descent. *** Parameters *** - model: reference to the model object + Model: reference to the Model object iterations: number of iterations learning_rate: learning rate momentum: momentum """ - def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, actual_iter=None, schedule=None, **kwargs): + def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, Model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, actual_iter=None, schedule=None, **kwargs): self.opt_name = "Stochastic Gradient Descent" - self.model = model + self.Model = Model self.iterations = iterations self.momentum = momentum self.learning_rate = learning_rate @@ -42,17 +42,17 @@ class opt_SGD(Optimizer): self.learning_rate_0 = self.learning_rate.mean() self.schedule = schedule - # if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1: + # if len([p for p in self.Model.kern.parts if p.name == 'bias']) == 1: # self.param_traces.append(('bias',[])) - # if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1: + # if len([p for p in self.Model.kern.parts if p.name == 'linear']) == 1: # self.param_traces.append(('linear',[])) - # if len([p for p in self.model.kern.parts if p.name == 'rbf']) == 1: + # if len([p for p in self.Model.kern.parts if p.name == 'rbf']) == 1: # self.param_traces.append(('rbf_var',[])) self.param_traces = dict(self.param_traces) self.fopt_trace = [] - num_params = len(self.model._get_params()) + num_params = len(self.Model._get_params()) if isinstance(self.learning_rate, float): self.learning_rate = np.ones((num_params,)) * self.learning_rate @@ -84,7 +84,7 @@ class opt_SGD(Optimizer): return (np.isnan(data).sum(axis=1) == 0) def check_for_missing(self, data): - if sp.sparse.issparse(self.model.likelihood.Y): + if sp.sparse.issparse(self.Model.likelihood.Y): return True else: return np.isnan(data).sum() > 0 @@ -107,32 +107,32 @@ class opt_SGD(Optimizer): def shift_constraints(self, j): - constrained_indices = copy.deepcopy(self.model.constrained_indices) + constrained_indices = copy.deepcopy(self.Model.constrained_indices) for c, constraint in enumerate(constrained_indices): mask = (np.ones_like(constrained_indices[c]) == 1) for i in range(len(constrained_indices[c])): pos = np.where(j == constrained_indices[c][i])[0] if len(pos) == 1: - self.model.constrained_indices[c][i] = pos + self.Model.constrained_indices[c][i] = pos else: mask[i] = False - self.model.constrained_indices[c] = self.model.constrained_indices[c][mask] + self.Model.constrained_indices[c] = self.Model.constrained_indices[c][mask] return constrained_indices # back them up - # bounded_i = copy.deepcopy(self.model.constrained_bounded_indices) - # bounded_l = copy.deepcopy(self.model.constrained_bounded_lowers) - # bounded_u = copy.deepcopy(self.model.constrained_bounded_uppers) + # bounded_i = copy.deepcopy(self.Model.constrained_bounded_indices) + # bounded_l = copy.deepcopy(self.Model.constrained_bounded_lowers) + # bounded_u = copy.deepcopy(self.Model.constrained_bounded_uppers) # for b in range(len(bounded_i)): # for each group of constraints # for bc in range(len(bounded_i[b])): # pos = np.where(j == bounded_i[b][bc])[0] # if len(pos) == 1: - # pos2 = np.where(self.model.constrained_bounded_indices[b] == bounded_i[b][bc])[0][0] - # self.model.constrained_bounded_indices[b][pos2] = pos[0] + # pos2 = np.where(self.Model.constrained_bounded_indices[b] == bounded_i[b][bc])[0][0] + # self.Model.constrained_bounded_indices[b][pos2] = pos[0] # else: - # if len(self.model.constrained_bounded_indices[b]) == 1: + # if len(self.Model.constrained_bounded_indices[b]) == 1: # # if it's the last index to be removed # # the logic here is just a mess. If we remove the last one, then all the # # b-indices change and we have to iterate through everything to find our @@ -140,35 +140,35 @@ class opt_SGD(Optimizer): # raise NotImplementedError # else: # just remove it from the indices - # mask = self.model.constrained_bounded_indices[b] != bc - # self.model.constrained_bounded_indices[b] = self.model.constrained_bounded_indices[b][mask] + # mask = self.Model.constrained_bounded_indices[b] != bc + # self.Model.constrained_bounded_indices[b] = self.Model.constrained_bounded_indices[b][mask] # # here we shif the positive constraints. We cycle through each positive # # constraint - # positive = self.model.constrained_positive_indices.copy() + # positive = self.Model.constrained_positive_indices.copy() # mask = (np.ones_like(positive) == 1) # for p in range(len(positive)): # # we now check whether the constrained index appears in the j vector # # (the vector of the "active" indices) - # pos = np.where(j == self.model.constrained_positive_indices[p])[0] + # pos = np.where(j == self.Model.constrained_positive_indices[p])[0] # if len(pos) == 1: - # self.model.constrained_positive_indices[p] = pos + # self.Model.constrained_positive_indices[p] = pos # else: # mask[p] = False - # self.model.constrained_positive_indices = self.model.constrained_positive_indices[mask] + # self.Model.constrained_positive_indices = self.Model.constrained_positive_indices[mask] # return (bounded_i, bounded_l, bounded_u), positive def restore_constraints(self, c):#b, p): - # self.model.constrained_bounded_indices = b[0] - # self.model.constrained_bounded_lowers = b[1] - # self.model.constrained_bounded_uppers = b[2] - # self.model.constrained_positive_indices = p - self.model.constrained_indices = c + # self.Model.constrained_bounded_indices = b[0] + # self.Model.constrained_bounded_lowers = b[1] + # self.Model.constrained_bounded_uppers = b[2] + # self.Model.constrained_positive_indices = p + self.Model.constrained_indices = c def get_param_shapes(self, N = None, input_dim = None): - model_name = self.model.__class__.__name__ + model_name = self.Model.__class__.__name__ if model_name == 'GPLVM': return [(N, input_dim)] if model_name == 'Bayesian_GPLVM': @@ -179,37 +179,37 @@ class opt_SGD(Optimizer): def step_with_missing_data(self, f_fp, X, step, shapes): N, input_dim = X.shape - if not sp.sparse.issparse(self.model.likelihood.Y): - Y = self.model.likelihood.Y - samples = self.non_null_samples(self.model.likelihood.Y) - self.model.N = samples.sum() + if not sp.sparse.issparse(self.Model.likelihood.Y): + Y = self.Model.likelihood.Y + samples = self.non_null_samples(self.Model.likelihood.Y) + self.Model.N = samples.sum() Y = Y[samples] else: - samples = self.model.likelihood.Y.nonzero()[0] - self.model.N = len(samples) - Y = np.asarray(self.model.likelihood.Y[samples].todense(), dtype = np.float64) + samples = self.Model.likelihood.Y.nonzero()[0] + self.Model.N = len(samples) + Y = np.asarray(self.Model.likelihood.Y[samples].todense(), dtype = np.float64) - if self.model.N == 0 or Y.std() == 0.0: - return 0, step, self.model.N + if self.Model.N == 0 or Y.std() == 0.0: + return 0, step, self.Model.N - self.model.likelihood._offset = Y.mean() - self.model.likelihood._scale = Y.std() - self.model.likelihood.set_data(Y) - # self.model.likelihood.V = self.model.likelihood.Y*self.model.likelihood.precision + self.Model.likelihood._offset = Y.mean() + self.Model.likelihood._scale = Y.std() + self.Model.likelihood.set_data(Y) + # self.Model.likelihood.V = self.Model.likelihood.Y*self.Model.likelihood.precision - sigma = self.model.likelihood._variance - self.model.likelihood._variance = None # invalidate cache - self.model.likelihood._set_params(sigma) + sigma = self.Model.likelihood._variance + self.Model.likelihood._variance = None # invalidate cache + self.Model.likelihood._set_params(sigma) j = self.subset_parameter_vector(self.x_opt, samples, shapes) - self.model.X = X[samples] + self.Model.X = X[samples] - model_name = self.model.__class__.__name__ + model_name = self.Model.__class__.__name__ if model_name == 'Bayesian_GPLVM': - self.model.likelihood.YYT = np.dot(self.model.likelihood.Y, self.model.likelihood.Y.T) - self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT) + self.Model.likelihood.YYT = np.dot(self.Model.likelihood.Y, self.Model.likelihood.Y.T) + self.Model.likelihood.trYYT = np.trace(self.Model.likelihood.YYT) ci = self.shift_constraints(j) f, fp = f_fp(self.x_opt[j]) @@ -218,18 +218,18 @@ class opt_SGD(Optimizer): self.x_opt[j] -= step[j] self.restore_constraints(ci) - self.model.grads[j] = fp + self.Model.grads[j] = fp # restore likelihood _offset and _scale, otherwise when we call set_data(y) on # the next feature, it will get normalized with the mean and std of this one. - self.model.likelihood._offset = 0 - self.model.likelihood._scale = 1 + self.Model.likelihood._offset = 0 + self.Model.likelihood._scale = 1 - return f, step, self.model.N + return f, step, self.Model.N def adapt_learning_rate(self, t, D): if self.learning_rate_adaptation == 'adagrad': if t > 0: - g_k = self.model.grads + g_k = self.Model.grads self.s_k += np.square(g_k) t0 = 100.0 self.learning_rate = 0.1/(t0 + np.sqrt(self.s_k)) @@ -245,8 +245,8 @@ class opt_SGD(Optimizer): elif self.learning_rate_adaptation == 'semi_pesky': - if self.model.__class__.__name__ == 'Bayesian_GPLVM': - g_t = self.model.grads + if self.Model.__class__.__name__ == 'Bayesian_GPLVM': + g_t = self.Model.grads if t == 0: self.hbar_t = 0.0 self.tau_t = 100.0 @@ -259,28 +259,28 @@ class opt_SGD(Optimizer): def opt(self, f_fp=None, f=None, fp=None): - self.x_opt = self.model._get_params_transformed() + self.x_opt = self.Model._get_params_transformed() self.grads = [] - X, Y = self.model.X.copy(), self.model.likelihood.Y.copy() + X, Y = self.Model.X.copy(), self.Model.likelihood.Y.copy() - self.model.likelihood.YYT = 0 - self.model.likelihood.trYYT = 0 - self.model.likelihood._offset = 0.0 - self.model.likelihood._scale = 1.0 + self.Model.likelihood.YYT = 0 + self.Model.likelihood.trYYT = 0 + self.Model.likelihood._offset = 0.0 + self.Model.likelihood._scale = 1.0 - N, input_dim = self.model.X.shape - D = self.model.likelihood.Y.shape[1] - num_params = self.model._get_params() + N, input_dim = self.Model.X.shape + D = self.Model.likelihood.Y.shape[1] + num_params = self.Model._get_params() self.trace = [] - missing_data = self.check_for_missing(self.model.likelihood.Y) + missing_data = self.check_for_missing(self.Model.likelihood.Y) step = np.zeros_like(num_params) for it in range(self.iterations): if self.actual_iter != None: it = self.actual_iter - self.model.grads = np.zeros_like(self.x_opt) # TODO this is ugly + self.Model.grads = np.zeros_like(self.x_opt) # TODO this is ugly if it == 0 or self.self_paced is False: features = np.random.permutation(Y.shape[1]) @@ -292,29 +292,29 @@ class opt_SGD(Optimizer): NLL = [] import pylab as plt for count, j in enumerate(features): - self.model.input_dim = len(j) - self.model.likelihood.input_dim = len(j) - self.model.likelihood.set_data(Y[:, j]) - # self.model.likelihood.V = self.model.likelihood.Y*self.model.likelihood.precision + self.Model.input_dim = len(j) + self.Model.likelihood.input_dim = len(j) + self.Model.likelihood.set_data(Y[:, j]) + # self.Model.likelihood.V = self.Model.likelihood.Y*self.Model.likelihood.precision - sigma = self.model.likelihood._variance - self.model.likelihood._variance = None # invalidate cache - self.model.likelihood._set_params(sigma) + sigma = self.Model.likelihood._variance + self.Model.likelihood._variance = None # invalidate cache + self.Model.likelihood._set_params(sigma) if missing_data: shapes = self.get_param_shapes(N, input_dim) f, step, Nj = self.step_with_missing_data(f_fp, X, step, shapes) else: - self.model.likelihood.YYT = np.dot(self.model.likelihood.Y, self.model.likelihood.Y.T) - self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT) + self.Model.likelihood.YYT = np.dot(self.Model.likelihood.Y, self.Model.likelihood.Y.T) + self.Model.likelihood.trYYT = np.trace(self.Model.likelihood.YYT) Nj = N f, fp = f_fp(self.x_opt) - self.model.grads = fp.copy() + self.Model.grads = fp.copy() step = self.momentum * step + self.learning_rate * fp self.x_opt -= step if self.messages == 2: - noise = self.model.likelihood._variance + noise = self.Model.likelihood._variance status = "evaluating {feature: 5d}/{tot: 5d} \t f: {f: 2.3f} \t non-missing: {nm: 4d}\t noise: {noise: 2.4f}\r".format(feature = count, tot = len(features), f = f, nm = Nj, noise = noise) sys.stdout.write(status) sys.stdout.flush() @@ -328,19 +328,19 @@ class opt_SGD(Optimizer): # plt.plot(self.param_traces['noise']) # for k in self.param_traces.keys(): - # self.param_traces[k].append(self.model.get(k)[0]) - self.grads.append(self.model.grads.tolist()) + # self.param_traces[k].append(self.Model.get(k)[0]) + self.grads.append(self.Model.grads.tolist()) # should really be a sum(), but earlier samples in the iteration will have a very crappy ll self.f_opt = np.mean(NLL) - self.model.N = N - self.model.X = X - self.model.input_dim = D - self.model.likelihood.N = N - self.model.likelihood.input_dim = D - self.model.likelihood.Y = Y - sigma = self.model.likelihood._variance - self.model.likelihood._variance = None # invalidate cache - self.model.likelihood._set_params(sigma) + self.Model.N = N + self.Model.X = X + self.Model.input_dim = D + self.Model.likelihood.N = N + self.Model.likelihood.input_dim = D + self.Model.likelihood.Y = Y + sigma = self.Model.likelihood._variance + self.Model.likelihood._variance = None # invalidate cache + self.Model.likelihood._set_params(sigma) self.trace.append(self.f_opt) if self.iteration_file is not None: diff --git a/GPy/kern/Brownian.py b/GPy/kern/Brownian.py index dea963bf..76e103af 100644 --- a/GPy/kern/Brownian.py +++ b/GPy/kern/Brownian.py @@ -2,14 +2,14 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np def theta(x): """Heavisdie step function""" return np.where(x>=0.,1.,0.) -class Brownian(kernpart): +class Brownian(Kernpart): """ Brownian Motion kernel. diff --git a/GPy/kern/Matern32.py b/GPy/kern/Matern32.py index e81692d0..60f0b6e9 100644 --- a/GPy/kern/Matern32.py +++ b/GPy/kern/Matern32.py @@ -2,13 +2,11 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -import hashlib -from ..util.linalg import pdinv,mdot from scipy import integrate -class Matern32(kernpart): +class Matern32(Kernpart): """ Matern 3/2 kernel: @@ -28,7 +26,7 @@ class Matern32(kernpart): """ - def __init__(self,input_dim,variance=1.,lengthscale=None,ARD=False): + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False): self.input_dim = input_dim self.ARD = ARD if ARD == False: @@ -47,13 +45,13 @@ class Matern32(kernpart): assert lengthscale.size == self.input_dim, "bad number of lengthscales" else: lengthscale = np.ones(self.input_dim) - self._set_params(np.hstack((variance,lengthscale.flatten()))) + self._set_params(np.hstack((variance, lengthscale.flatten()))) def _get_params(self): """return the value of the parameters.""" - return np.hstack((self.variance,self.lengthscale)) + return np.hstack((self.variance, self.lengthscale)) - def _set_params(self,x): + def _set_params(self, x): """set the value of the parameters.""" assert x.size == self.num_params self.variance = x[0] @@ -62,54 +60,54 @@ class Matern32(kernpart): def _get_param_names(self): """return parameter names.""" if self.num_params == 2: - return ['variance','lengthscale'] + return ['variance', 'lengthscale'] else: - return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscale.size)] + return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)] - def K(self,X,X2,target): + def K(self, X, X2, target): """Compute the covariance matrix between X and X2.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1)) - np.add(self.variance*(1+np.sqrt(3.)*dist)*np.exp(-np.sqrt(3.)*dist), target,target) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1)) + np.add(self.variance * (1 + np.sqrt(3.) * dist) * np.exp(-np.sqrt(3.) * dist), target, target) - def Kdiag(self,X,target): + def Kdiag(self, X, target): """Compute the diagonal of the covariance matrix associated to X.""" - np.add(target,self.variance,target) + np.add(target, self.variance, target) - def dK_dtheta(self,dL_dK,X,X2,target): + def dK_dtheta(self, dL_dK, X, X2, target): """derivative of the covariance matrix with respect to the parameters.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1)) - dvar = (1+np.sqrt(3.)*dist)*np.exp(-np.sqrt(3.)*dist) - invdist = 1./np.where(dist!=0.,dist,np.inf) - dist2M = np.square(X[:,None,:]-X2[None,:,:])/self.lengthscale**3 - #dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis] - target[0] += np.sum(dvar*dL_dK) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1)) + dvar = (1 + np.sqrt(3.) * dist) * np.exp(-np.sqrt(3.) * dist) + invdist = 1. / np.where(dist != 0., dist, np.inf) + dist2M = np.square(X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 3 + # dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis] + target[0] += np.sum(dvar * dL_dK) if self.ARD == True: - dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis] - #dl = self.variance*dvar[:,:,None]*dist2M*invdist[:,:,None] - target[1:] += (dl*dL_dK[:,:,None]).sum(0).sum(0) + dl = (self.variance * 3 * dist * np.exp(-np.sqrt(3.) * dist))[:, :, np.newaxis] * dist2M * invdist[:, :, np.newaxis] + # dl = self.variance*dvar[:,:,None]*dist2M*invdist[:,:,None] + target[1:] += (dl * dL_dK[:, :, None]).sum(0).sum(0) else: - dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist)) * dist2M.sum(-1)*invdist - #dl = self.variance*dvar*dist2M.sum(-1)*invdist - target[1] += np.sum(dl*dL_dK) + dl = (self.variance * 3 * dist * np.exp(-np.sqrt(3.) * dist)) * dist2M.sum(-1) * invdist + # dl = self.variance*dvar*dist2M.sum(-1)*invdist + target[1] += np.sum(dl * dL_dK) - def dKdiag_dtheta(self,dL_dKdiag,X,target): + def dKdiag_dtheta(self, dL_dKdiag, X, target): """derivative of the diagonal of the covariance matrix with respect to the parameters.""" target[0] += np.sum(dL_dKdiag) - def dK_dX(self,dL_dK,X,X2,target): + def dK_dX(self, dL_dK, X, X2, target): """derivative of the covariance matrix with respect to X.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))[:,:,None] - ddist_dX = (X[:,None,:]-X2[None,:,:])/self.lengthscale**2/np.where(dist!=0.,dist,np.inf) - dK_dX = - np.transpose(3*self.variance*dist*np.exp(-np.sqrt(3)*dist)*ddist_dX,(1,0,2)) - target += np.sum(dK_dX*dL_dK.T[:,:,None],0) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))[:, :, None] + ddist_dX = (X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 2 / np.where(dist != 0., dist, np.inf) + dK_dX = -np.transpose(3 * self.variance * dist * np.exp(-np.sqrt(3) * dist) * ddist_dX, (1, 0, 2)) + target += np.sum(dK_dX * dL_dK.T[:, :, None], 0) - def dKdiag_dX(self,dL_dKdiag,X,target): + def dKdiag_dX(self, dL_dKdiag, X, target): pass - def Gram_matrix(self,F,F1,F2,lower,upper): + def Gram_matrix(self, F, F1, F2, lower, upper): """ Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1. @@ -123,15 +121,15 @@ class Matern32(kernpart): :type lower,upper: floats """ assert self.input_dim == 1 - def L(x,i): - return(3./self.lengthscale**2*F[i](x) + 2*np.sqrt(3)/self.lengthscale*F1[i](x) + F2[i](x)) + def L(x, i): + return(3. / self.lengthscale ** 2 * F[i](x) + 2 * np.sqrt(3) / self.lengthscale * F1[i](x) + F2[i](x)) n = F.shape[0] - G = np.zeros((n,n)) + G = np.zeros((n, n)) for i in range(n): - for j in range(i,n): - G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0] - Flower = np.array([f(lower) for f in F])[:,None] - F1lower = np.array([f(lower) for f in F1])[:,None] - #print "OLD \n", np.dot(F1lower,F1lower.T), "\n \n" - #return(G) - return(self.lengthscale**3/(12.*np.sqrt(3)*self.variance) * G + 1./self.variance*np.dot(Flower,Flower.T) + self.lengthscale**2/(3.*self.variance)*np.dot(F1lower,F1lower.T)) + for j in range(i, n): + G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0] + Flower = np.array([f(lower) for f in F])[:, None] + F1lower = np.array([f(lower) for f in F1])[:, None] + # print "OLD \n", np.dot(F1lower,F1lower.T), "\n \n" + # return(G) + return(self.lengthscale ** 3 / (12.*np.sqrt(3) * self.variance) * G + 1. / self.variance * np.dot(Flower, Flower.T) + self.lengthscale ** 2 / (3.*self.variance) * np.dot(F1lower, F1lower.T)) diff --git a/GPy/kern/Matern52.py b/GPy/kern/Matern52.py index 45210cb4..e02cb9bf 100644 --- a/GPy/kern/Matern52.py +++ b/GPy/kern/Matern52.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib from scipy import integrate -class Matern52(kernpart): +class Matern52(Kernpart): """ Matern 5/2 kernel: diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index a71d38f4..97c1d88f 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from constructors import rbf, Matern32, Matern52, exponential, linear, white, bias, finite_dimensional, spline, Brownian, periodic_exponential, periodic_Matern32, periodic_Matern52, prod, symmetric, Coregionalise, rational_quadratic, fixed, rbfcos, independent_outputs +from constructors import rbf, Matern32, Matern52, exponential, linear, white, bias, finite_dimensional, spline, Brownian, periodic_exponential, periodic_Matern32, periodic_Matern52, prod, symmetric, Coregionalise, rational_quadratic, Fixed, rbfcos, IndependentOutputs try: from constructors import rbf_sympy, sympykern # these depend on sympy except: diff --git a/GPy/kern/bias.py b/GPy/kern/bias.py index e3905c16..8ec3741d 100644 --- a/GPy/kern/bias.py +++ b/GPy/kern/bias.py @@ -2,11 +2,11 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib -class bias(kernpart): +class bias(Kernpart): def __init__(self,input_dim,variance=1.): """ :param input_dim: the number of input dimensions diff --git a/GPy/kern/constructors.py b/GPy/kern/constructors.py index a6ed1145..520c931b 100644 --- a/GPy/kern/constructors.py +++ b/GPy/kern/constructors.py @@ -12,7 +12,7 @@ from exponential import exponential as exponentialpart from Matern32 import Matern32 as Matern32part from Matern52 import Matern52 as Matern52part from bias import bias as biaspart -from fixed import fixed as fixedpart +from fixed import Fixed as fixedpart from finite_dimensional import finite_dimensional as finite_dimensionalpart from spline import spline as splinepart from Brownian import Brownian as Brownianpart @@ -24,7 +24,7 @@ from symmetric import symmetric as symmetric_part from coregionalise import Coregionalise as coregionalise_part from rational_quadratic import rational_quadratic as rational_quadraticpart from rbfcos import rbfcos as rbfcospart -from independent_outputs import independent_outputs as independent_output_part +from independent_outputs import IndependentOutputs as independent_output_part #TODO these s=constructors are not as clean as we'd like. Tidy the code up #using meta-classes to make the objects construct properly wthout them. @@ -294,9 +294,9 @@ def rational_quadratic(D,variance=1., lengthscale=1., power=1.): part = rational_quadraticpart(D,variance, lengthscale, power) return kern(D, [part]) -def fixed(D, K, variance=1.): +def Fixed(D, K, variance=1.): """ - Construct a fixed effect kernel. + Construct a Fixed effect kernel. Arguments --------- @@ -314,7 +314,7 @@ def rbfcos(D,variance=1.,frequencies=None,bandwidths=None,ARD=False): part = rbfcospart(D,variance,frequencies,bandwidths,ARD) return kern(D,[part]) -def independent_outputs(k): +def IndependentOutputs(k): """ Construct a kernel with independent outputs from an existing kernel """ diff --git a/GPy/kern/coregionalise.py b/GPy/kern/coregionalise.py index 7ac17dbe..cfe8a35a 100644 --- a/GPy/kern/coregionalise.py +++ b/GPy/kern/coregionalise.py @@ -1,13 +1,13 @@ # Copyright (c) 2012, James Hensman and Ricardo Andrade # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np from GPy.util.linalg import mdot, pdinv import pdb from scipy import weave -class Coregionalise(kernpart): +class Coregionalise(Kernpart): """ Kernel for Intrinsic Corregionalization Models """ diff --git a/GPy/kern/exponential.py b/GPy/kern/exponential.py index 9e50712b..5cb0f584 100644 --- a/GPy/kern/exponential.py +++ b/GPy/kern/exponential.py @@ -2,21 +2,20 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -import hashlib from scipy import integrate -class exponential(kernpart): +class exponential(Kernpart): """ Exponential kernel (aka Ornstein-Uhlenbeck or Matern 1/2) .. math:: - k(r) = \sigma^2 \exp(- r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^D \\frac{(x_i-y_i)^2}{\ell_i^2} } + k(r) = \sigma^2 \exp(- r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} } - :param D: the number of input dimensions - :type D: int + :param input_dim: the number of input dimensions + :type input_dim: int :param variance: the variance :math:`\sigma^2` :type variance: float :param lengthscale: the vector of lengthscale :math:`\ell_i` @@ -26,11 +25,11 @@ class exponential(kernpart): :rtype: kernel object """ - def __init__(self,D,variance=1.,lengthscale=None,ARD=False): - self.D = D + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False): + self.input_dim = input_dim self.ARD = ARD if ARD == False: - self.Nparam = 2 + self.num_params = 2 self.name = 'exp' if lengthscale is not None: lengthscale = np.asarray(lengthscale) @@ -38,76 +37,76 @@ class exponential(kernpart): else: lengthscale = np.ones(1) else: - self.Nparam = self.D + 1 + self.num_params = self.input_dim + 1 self.name = 'exp' if lengthscale is not None: lengthscale = np.asarray(lengthscale) - assert lengthscale.size == self.D, "bad number of lengthscales" + assert lengthscale.size == self.input_dim, "bad number of lengthscales" else: - lengthscale = np.ones(self.D) - self._set_params(np.hstack((variance,lengthscale.flatten()))) + lengthscale = np.ones(self.input_dim) + self._set_params(np.hstack((variance, lengthscale.flatten()))) def _get_params(self): """return the value of the parameters.""" - return np.hstack((self.variance,self.lengthscale)) + return np.hstack((self.variance, self.lengthscale)) - def _set_params(self,x): + def _set_params(self, x): """set the value of the parameters.""" - assert x.size == self.Nparam + assert x.size == self.num_params self.variance = x[0] self.lengthscale = x[1:] def _get_param_names(self): """return parameter names.""" - if self.Nparam == 2: - return ['variance','lengthscale'] + if self.num_params == 2: + return ['variance', 'lengthscale'] else: - return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscale.size)] + return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)] - def K(self,X,X2,target): + def K(self, X, X2, target): """Compute the covariance matrix between X and X2.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1)) - np.add(self.variance*np.exp(-dist), target,target) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1)) + np.add(self.variance * np.exp(-dist), target, target) - def Kdiag(self,X,target): + def Kdiag(self, X, target): """Compute the diagonal of the covariance matrix associated to X.""" - np.add(target,self.variance,target) + np.add(target, self.variance, target) - def dK_dtheta(self,dL_dK,X,X2,target): + def dK_dtheta(self, dL_dK, X, X2, target): """derivative of the covariance matrix with respect to the parameters.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1)) - invdist = 1./np.where(dist!=0.,dist,np.inf) - dist2M = np.square(X[:,None,:]-X2[None,:,:])/self.lengthscale**3 + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1)) + invdist = 1. / np.where(dist != 0., dist, np.inf) + dist2M = np.square(X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 3 dvar = np.exp(-dist) - target[0] += np.sum(dvar*dL_dK) + target[0] += np.sum(dvar * dL_dK) if self.ARD == True: - dl = self.variance*dvar[:,:,None]*dist2M*invdist[:,:,None] - target[1:] += (dl*dL_dK[:,:,None]).sum(0).sum(0) + dl = self.variance * dvar[:, :, None] * dist2M * invdist[:, :, None] + target[1:] += (dl * dL_dK[:, :, None]).sum(0).sum(0) else: - dl = self.variance*dvar*dist2M.sum(-1)*invdist - target[1] += np.sum(dl*dL_dK) + dl = self.variance * dvar * dist2M.sum(-1) * invdist + target[1] += np.sum(dl * dL_dK) - def dKdiag_dtheta(self,dL_dKdiag,X,target): + def dKdiag_dtheta(self, dL_dKdiag, X, target): """derivative of the diagonal of the covariance matrix with respect to the parameters.""" - #NB: derivative of diagonal elements wrt lengthscale is 0 + # NB: derivative of diagonal elements wrt lengthscale is 0 target[0] += np.sum(dL_dKdiag) - def dK_dX(self,dL_dK,X,X2,target): + def dK_dX(self, dL_dK, X, X2, target): """derivative of the covariance matrix with respect to X.""" if X2 is None: X2 = X - dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))[:,:,None] - ddist_dX = (X[:,None,:]-X2[None,:,:])/self.lengthscale**2/np.where(dist!=0.,dist,np.inf) - dK_dX = - np.transpose(self.variance*np.exp(-dist)*ddist_dX,(1,0,2)) - target += np.sum(dK_dX*dL_dK.T[:,:,None],0) + dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))[:, :, None] + ddist_dX = (X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 2 / np.where(dist != 0., dist, np.inf) + dK_dX = -np.transpose(self.variance * np.exp(-dist) * ddist_dX, (1, 0, 2)) + target += np.sum(dK_dX * dL_dK.T[:, :, None], 0) - def dKdiag_dX(self,dL_dKdiag,X,target): + def dKdiag_dX(self, dL_dKdiag, X, target): pass - def Gram_matrix(self,F,F1,lower,upper): + def Gram_matrix(self, F, F1, lower, upper): """ - Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to D=1. + Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1. :param F: vector of functions :type F: np.array @@ -116,13 +115,13 @@ class exponential(kernpart): :param lower,upper: boundaries of the input domain :type lower,upper: floats """ - assert self.D == 1 - def L(x,i): - return(1./self.lengthscale*F[i](x) + F1[i](x)) + assert self.input_dim == 1 + def L(x, i): + return(1. / self.lengthscale * F[i](x) + F1[i](x)) n = F.shape[0] - G = np.zeros((n,n)) + G = np.zeros((n, n)) for i in range(n): - for j in range(i,n): - G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0] - Flower = np.array([f(lower) for f in F])[:,None] - return(self.lengthscale/2./self.variance * G + 1./self.variance * np.dot(Flower,Flower.T)) + for j in range(i, n): + G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0] + Flower = np.array([f(lower) for f in F])[:, None] + return(self.lengthscale / 2. / self.variance * G + 1. / self.variance * np.dot(Flower, Flower.T)) diff --git a/GPy/kern/finite_dimensional.py b/GPy/kern/finite_dimensional.py index 36afd1dc..b23ddb16 100644 --- a/GPy/kern/finite_dimensional.py +++ b/GPy/kern/finite_dimensional.py @@ -2,21 +2,21 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np from ..util.linalg import pdinv,mdot -class finite_dimensional(kernpart): - def __init__(self, D, F, G, variance=1., weights=None): +class finite_dimensional(Kernpart): + def __init__(self, input_dim, F, G, variance=1., weights=None): """ Argumnents ---------- - D: int - the number of input dimensions + input_dim: int - the number of input dimensions F: np.array of functions with shape (n,) - the n basis functions G: np.array with shape (n,n) - the Gram matrix associated to F weights : np.ndarray with shape (n,) """ - self.D = D + self.input_dim = input_dim self.F = F self.G = G self.G_1 ,L,Li,logdet = pdinv(G) @@ -25,14 +25,14 @@ class finite_dimensional(kernpart): assert weights.shape==(self.n,) else: weights = np.ones(self.n) - self.Nparam = self.n + 1 + self.num_params = self.n + 1 self.name = 'finite_dim' self._set_params(np.hstack((variance,weights))) def _get_params(self): return np.hstack((self.variance,self.weights)) def _set_params(self,x): - assert x.size == (self.Nparam) + assert x.size == (self.num_params) self.variance = x[0] self.weights = x[1:] def _get_param_names(self): @@ -48,7 +48,7 @@ class finite_dimensional(kernpart): product = self.variance * mdot(FX,np.diag(np.sqrt(self.weights)),self.G_1,np.diag(np.sqrt(self.weights)),FX2.T) np.add(product,target,target) def Kdiag(self,X,target): - product = np.diag(self.K(X,X2)) + product = np.diag(self.K(X, X)) np.add(target,product,target) def dK_dtheta(self,X,X2,target): """Return shape is NxMx(Ntheta)""" diff --git a/GPy/kern/fixed.py b/GPy/kern/fixed.py index 8732ec8f..9e8f6226 100644 --- a/GPy/kern/fixed.py +++ b/GPy/kern/fixed.py @@ -1,42 +1,41 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -import hashlib -class fixed(kernpart): - def __init__(self,D,K,variance=1.): +class Fixed(Kernpart): + def __init__(self, input_dim, K, variance=1.): """ - :param D: the number of input dimensions - :type D: int + :param input_dim: the number of input dimensions + :type input_dim: int :param variance: the variance of the kernel :type variance: float """ - self.D = D + self.input_dim = input_dim self.fixed_K = K - self.Nparam = 1 - self.name = 'fixed' + self.num_params = 1 + self.name = 'Fixed' self._set_params(np.array([variance]).flatten()) def _get_params(self): return self.variance - def _set_params(self,x): - assert x.shape==(1,) + def _set_params(self, x): + assert x.shape == (1,) self.variance = x def _get_param_names(self): return ['variance'] - def K(self,X,X2,target): + def K(self, X, X2, target): target += self.variance * self.fixed_K - def dK_dtheta(self,partial,X,X2,target): + def dK_dtheta(self, partial, X, X2, target): target += (partial * self.fixed_K).sum() - def dK_dX(self, partial,X, X2, target): + def dK_dX(self, partial, X, X2, target): pass - def dKdiag_dX(self,partial,X,target): + def dKdiag_dX(self, partial, X, target): pass diff --git a/GPy/kern/independent_outputs.py b/GPy/kern/independent_outputs.py index b94202d7..f88b0ff5 100644 --- a/GPy/kern/independent_outputs.py +++ b/GPy/kern/independent_outputs.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np def index_to_slices(index): @@ -31,7 +31,7 @@ def index_to_slices(index): [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))] return ret -class independent_outputs(kernpart): +class IndependentOutputs(Kernpart): """ A kernel part shich can reopresent several independent functions. this kernel 'switches off' parts of the matrix where the output indexes are different. @@ -41,8 +41,8 @@ class independent_outputs(kernpart): """ def __init__(self,k): - self.D = k.D + 1 - self.Nparam = k.Nparam + self.input_dim = k.input_dim + 1 + self.num_params = k.num_params self.name = 'iops('+ k.name + ')' self.k = k diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index 3d5c0609..5de9fbb4 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -4,23 +4,22 @@ import numpy as np import pylab as pb -from ..core.parameterised import parameterised -from kernpart import kernpart +from ..core.parameterised import Parameterised +from kernpart import Kernpart import itertools from prod import prod -from ..util.linalg import symmetrify -class kern(parameterised): +class kern(Parameterised): def __init__(self, input_dim, parts=[], input_slices=None): """ This is the main kernel class for GPy. It handles multiple (additive) kernel functions, and keeps track of variaous things like which parameters live where. The technical code for kernels is divided into _parts_ (see e.g. rbf.py). This obnject contains a list of parts, which are computed additively. For multiplication, special _prod_ parts are used. - :param input_dim: The dimensioality of the kernel's input space + :param input_dim: The dimensionality of the kernel's input space :type input_dim: int :param parts: the 'parts' (PD functions) of the kernel - :type parts: list of kernpart objects + :type parts: list of Kernpart objects :param input_slices: the slices on the inputs which apply to each kernel :type input_slices: list of slice objects, or list of bools @@ -39,11 +38,11 @@ class kern(parameterised): self.input_slices = [sl if type(sl) is slice else slice(None) for sl in input_slices] for p in self.parts: - assert isinstance(p, kernpart), "bad kernel part" + assert isinstance(p, Kernpart), "bad kernel part" self.compute_param_slices() - parameterised.__init__(self) + Parameterised.__init__(self) def plot_ARD(self, fignum=None, ax=None): @@ -327,8 +326,8 @@ class kern(parameterised): p2.psi1(Z, mu, S, tmp2) prod = np.multiply(tmp1, tmp2) - crossterms += prod[:,:,None] + prod[:, None, :] - + crossterms += prod[:, :, None] + prod[:, None, :] + target += crossterms return target @@ -345,14 +344,14 @@ class kern(parameterised): tmp = np.zeros((mu.shape[0], Z.shape[0])) p1.psi1(Z, mu, S, tmp) - p2.dpsi1_dtheta((tmp[:,None,:]*dL_dpsi2).sum(1)*2., Z, mu, S, target[ps2]) + p2.dpsi1_dtheta((tmp[:, None, :] * dL_dpsi2).sum(1) * 2., Z, mu, S, target[ps2]) return self._transform_gradients(target) def dpsi2_dZ(self, dL_dpsi2, Z, mu, S): target = np.zeros_like(Z) [p.dpsi2_dZ(dL_dpsi2, Z[:, i_s], mu[:, i_s], S[:, i_s], target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] - #target *= 2 + # target *= 2 # compute the "cross" terms # TODO: we need input_slices here. @@ -362,7 +361,7 @@ class kern(parameterised): tmp = np.zeros((mu.shape[0], Z.shape[0])) p1.psi1(Z, mu, S, tmp) tmp2 = np.zeros_like(target) - p2.dpsi1_dZ((tmp[:,None,:]*dL_dpsi2).sum(1).T, Z, mu, S, tmp2) + p2.dpsi1_dZ((tmp[:, None, :] * dL_dpsi2).sum(1).T, Z, mu, S, tmp2) target += tmp2 return target * 2 @@ -379,7 +378,7 @@ class kern(parameterised): tmp = np.zeros((mu.shape[0], Z.shape[0])) p1.psi1(Z, mu, S, tmp) - p2.dpsi1_dmuS((tmp[:,None,:]*dL_dpsi2).sum(1).T*2., Z, mu, S, target_mu, target_S) + p2.dpsi1_dmuS((tmp[:, None, :] * dL_dpsi2).sum(1).T * 2., Z, mu, S, target_mu, target_S) return target_mu, target_S @@ -430,7 +429,7 @@ class kern(parameterised): Xnew = np.vstack((xx.flatten(), yy.flatten())).T Kx = self.K(Xnew, x, which_parts) Kx = Kx.reshape(resolution, resolution).T - pb.contour(xg, yg, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) + pb.contour(xg, yg, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) # @UndefinedVariable pb.xlim(xmin[0], xmax[0]) pb.ylim(xmin[1], xmax[1]) pb.xlabel("x1") diff --git a/GPy/kern/kernpart.py b/GPy/kern/kernpart.py index 92dc637a..2ed56d66 100644 --- a/GPy/kern/kernpart.py +++ b/GPy/kern/kernpart.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -class kernpart(object): +class Kernpart(object): def __init__(self,input_dim): """ The base class for a kernpart: a positive definite function which forms part of a kernel diff --git a/GPy/kern/linear.py b/GPy/kern/linear.py index 65c2a355..a6ff2278 100644 --- a/GPy/kern/linear.py +++ b/GPy/kern/linear.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np from ..util.linalg import tdot from scipy import weave -class linear(kernpart): +class linear(Kernpart): """ Linear kernel diff --git a/GPy/kern/periodic_Matern32.py b/GPy/kern/periodic_Matern32.py index cf5945b6..52c8bede 100644 --- a/GPy/kern/periodic_Matern32.py +++ b/GPy/kern/periodic_Matern32.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -from GPy.util.linalg import mdot, pdinv +from GPy.util.linalg import mdot from GPy.util.decorators import silence_errors -class periodic_Matern32(kernpart): +class periodic_Matern32(Kernpart): """ Kernel of the periodic subspace (up to a given frequency) of a Matern 3/2 RKHS. Only defined for input_dim=1. @@ -25,7 +25,7 @@ class periodic_Matern32(kernpart): """ - def __init__(self,input_dim=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi): + def __init__(self, input_dim=1, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi): assert input_dim==1, "Periodic kernels are only defined for input_dim=1" self.name = 'periodic_Mat32' self.input_dim = input_dim diff --git a/GPy/kern/periodic_Matern52.py b/GPy/kern/periodic_Matern52.py index c2a366e1..02c12e21 100644 --- a/GPy/kern/periodic_Matern52.py +++ b/GPy/kern/periodic_Matern52.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -from GPy.util.linalg import mdot, pdinv +from GPy.util.linalg import mdot from GPy.util.decorators import silence_errors -class periodic_Matern52(kernpart): +class periodic_Matern52(Kernpart): """ Kernel of the periodic subspace (up to a given frequency) of a Matern 5/2 RKHS. Only defined for input_dim=1. @@ -209,7 +209,7 @@ class periodic_Matern52(kernpart): F2lower = np.array(self._cos(self.basis_alpha*self.basis_omega**2,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None] #dK_dvar - dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T) + dK_dvar = 1. / self.variance * mdot(FX, self.Gi, FX.T) #dK_dlen da_dlen = [-3*self.a[0]/self.lengthscale, -2*self.a[1]/self.lengthscale, -self.a[2]/self.lengthscale, 0.] diff --git a/GPy/kern/periodic_exponential.py b/GPy/kern/periodic_exponential.py index 4ed724ef..124eeb2a 100644 --- a/GPy/kern/periodic_exponential.py +++ b/GPy/kern/periodic_exponential.py @@ -2,12 +2,12 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -from GPy.util.linalg import mdot, pdinv +from GPy.util.linalg import mdot from GPy.util.decorators import silence_errors -class periodic_exponential(kernpart): +class periodic_exponential(Kernpart): """ Kernel of the periodic subspace (up to a given frequency) of a exponential (Matern 1/2) RKHS. Only defined for input_dim=1. @@ -25,7 +25,7 @@ class periodic_exponential(kernpart): """ - def __init__(self,input_dim=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi): + def __init__(self, input_dim=1, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi): assert input_dim==1, "Periodic kernels are only defined for input_dim=1" self.name = 'periodic_exp' self.input_dim = input_dim diff --git a/GPy/kern/prod.py b/GPy/kern/prod.py index b88ffbc0..493129c6 100644 --- a/GPy/kern/prod.py +++ b/GPy/kern/prod.py @@ -1,16 +1,16 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib -class prod(kernpart): +class prod(Kernpart): """ Computes the product of 2 kernels :param k1, k2: the kernels to multiply - :type k1, k2: kernpart + :type k1, k2: Kernpart :param tensor: The kernels are either multiply as functions defined on the same input space (default) or on the product of the input spaces :type tensor: Boolean :rtype: kernel object diff --git a/GPy/kern/prod_orthogonal.py b/GPy/kern/prod_orthogonal.py index 334803a0..237c9557 100644 --- a/GPy/kern/prod_orthogonal.py +++ b/GPy/kern/prod_orthogonal.py @@ -1,17 +1,17 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib #from scipy import integrate # This may not be necessary (Nicolas, 20th Feb) -class prod_orthogonal(kernpart): +class prod_orthogonal(Kernpart): """ Computes the product of 2 kernels :param k1, k2: the kernels to multiply - :type k1, k2: kernpart + :type k1, k2: Kernpart :rtype: kernel object """ diff --git a/GPy/kern/rational_quadratic.py b/GPy/kern/rational_quadratic.py index c555330e..d1e7a7e3 100644 --- a/GPy/kern/rational_quadratic.py +++ b/GPy/kern/rational_quadratic.py @@ -2,10 +2,10 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -class rational_quadratic(kernpart): +class rational_quadratic(Kernpart): """ rational quadratic kernel @@ -21,7 +21,7 @@ class rational_quadratic(kernpart): :type lengthscale: float :param power: the power :math:`\\alpha` :type power: float - :rtype: kernpart object + :rtype: Kernpart object """ def __init__(self,input_dim,variance=1.,lengthscale=1.,power=1.): diff --git a/GPy/kern/rbf.py b/GPy/kern/rbf.py index 8047a9ad..08af8964 100644 --- a/GPy/kern/rbf.py +++ b/GPy/kern/rbf.py @@ -2,13 +2,13 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib from scipy import weave from ..util.linalg import tdot -class rbf(kernpart): +class rbf(Kernpart): """ Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel: diff --git a/GPy/kern/rbfcos.py b/GPy/kern/rbfcos.py index 1450e01a..b1e99d3c 100644 --- a/GPy/kern/rbfcos.py +++ b/GPy/kern/rbfcos.py @@ -3,10 +3,10 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -class rbfcos(kernpart): +class rbfcos(Kernpart): def __init__(self,input_dim,variance=1.,frequencies=None,bandwidths=None,ARD=False): self.input_dim = input_dim self.name = 'rbfcos' diff --git a/GPy/kern/spline.py b/GPy/kern/spline.py index 9471e2c8..f2802180 100644 --- a/GPy/kern/spline.py +++ b/GPy/kern/spline.py @@ -2,14 +2,14 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np import hashlib def theta(x): """Heaviside step function""" return np.where(x>=0.,1.,0.) -class spline(kernpart): +class spline(Kernpart): """ Spline kernel diff --git a/GPy/kern/symmetric.py b/GPy/kern/symmetric.py index 246e34cd..c7099a6f 100644 --- a/GPy/kern/symmetric.py +++ b/GPy/kern/symmetric.py @@ -1,18 +1,18 @@ # Copyright (c) 2012 James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -class symmetric(kernpart): +class symmetric(Kernpart): """ Symmetrical kernels :param k: the kernel to symmetrify - :type k: kernpart + :type k: Kernpart :param transform: the transform to use in symmetrification (allows symmetry on specified axes) :type transform: A numpy array (input_dim x input_dim) specifiying the transform - :rtype: kernpart + :rtype: Kernpart """ def __init__(self,k,transform=None): diff --git a/GPy/kern/sympykern.py b/GPy/kern/sympykern.py index 21796fc6..9e1cdd54 100644 --- a/GPy/kern/sympykern.py +++ b/GPy/kern/sympykern.py @@ -9,9 +9,9 @@ import sys current_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) import tempfile import pdb -from kernpart import kernpart +from kernpart import Kernpart -class spkern(kernpart): +class spkern(Kernpart): """ A kernel object, where all the hard work in done by sympy. diff --git a/GPy/kern/white.py b/GPy/kern/white.py index 6944db13..41f075c3 100644 --- a/GPy/kern/white.py +++ b/GPy/kern/white.py @@ -2,9 +2,9 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernpart import kernpart +from kernpart import Kernpart import numpy as np -class white(kernpart): +class white(Kernpart): """ White noise kernel. diff --git a/GPy/models/generalized_fitc.py b/GPy/models/generalized_fitc.py index a96609cc..1960d0e5 100644 --- a/GPy/models/generalized_fitc.py +++ b/GPy/models/generalized_fitc.py @@ -2,21 +2,14 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -import pylab as pb -from ..util.linalg import mdot, jitchol, chol_inv, pdinv, trace_dot -from ..util.plot import gpplot -from .. import kern -from scipy import stats, linalg -<<<<<<< HEAD:GPy/models/generalized_FITC.py -from sparse_GP import sparse_GP -======= -from ..core import SparseGP ->>>>>>> 7040b26f41f382edfdca3d3f7b689b9bbfc1a54f:GPy/models/generalized_fitc.py +from scipy import linalg +from GPy.core.sparse_gp import SparseGP +from GPy.util.linalg import mdot -def backsub_both_sides(L,X): +def backsub_both_sides(L, X): """ Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky""" - tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1) - return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T + tmp, _ = linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1) + return linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T class GeneralizedFITC(SparseGP): @@ -45,17 +38,13 @@ class GeneralizedFITC(SparseGP): self.num_inducing = self.Z.shape[0] self.true_precision = likelihood.precision -<<<<<<< HEAD:GPy/models/generalized_FITC.py - sparse_GP.__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False) -======= super(GeneralizedFITC, self).__init__(X, likelihood, kernel=kernel, Z=self.Z, X_variance=X_variance, normalize_X=normalize_X) self._set_params(self._get_params()) ->>>>>>> 7040b26f41f382edfdca3d3f7b689b9bbfc1a54f:GPy/models/generalized_fitc.py def _set_params(self, p): - self.Z = p[:self.num_inducing*self.input_dim].reshape(self.num_inducing, self.input_dim) - self.kern._set_params(p[self.Z.size:self.Z.size+self.kern.Nparam]) - self.likelihood._set_params(p[self.Z.size+self.kern.Nparam:]) + self.Z = p[:self.num_inducing * self.input_dim].reshape(self.num_inducing, self.input_dim) + self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.num_params]) + self.likelihood._set_params(p[self.Z.size + self.kern.num_params:]) self._compute_kernel_matrices() self._computations() self._FITC_computations() @@ -73,9 +62,9 @@ class GeneralizedFITC(SparseGP): if self.has_uncertain_inputs: raise NotImplementedError, "FITC approximation not implemented for uncertain inputs" else: - self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0) + self.likelihood.fit_FITC(self.Kmm, self.psi1, self.psi0) self.true_precision = self.likelihood.precision # Save the true precision - self.likelihood.precision = self.true_precision/(1. + self.true_precision*self.Diag0[:,None]) # Add the diagonal element of the FITC approximation + self.likelihood.precision = self.true_precision / (1. + self.true_precision * self.Diag0[:, None]) # Add the diagonal element of the FITC approximation self._set_params(self._get_params()) # update the GP def _FITC_computations(self): @@ -87,37 +76,37 @@ class GeneralizedFITC(SparseGP): - removes the extra terms computed in the SparseGP approximation - computes the likelihood gradients wrt the true precision. """ - #NOTE the true precison is now 'true_precision' not 'precision' + # NOTE the true precison is now 'true_precision' not 'precision' if self.likelihood.is_heteroscedastic: # Compute generalized FITC's diagonal term of the covariance - self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.num_inducing),lower=1) - Lmipsi1 = np.dot(self.Lmi,self.psi1) - self.Qnn = np.dot(Lmipsi1.T,Lmipsi1) - #self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm) - #self.Qnn = mdot(self.psi1.T,self.Kmmi,self.psi1) - #a = kj + self.Lmi, info = linalg.lapack.flapack.dtrtrs(self.Lm, np.eye(self.num_inducing), lower=1) + Lmipsi1 = np.dot(self.Lmi, self.psi1) + self.Qnn = np.dot(Lmipsi1.T, Lmipsi1) + # self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm) + # self.Qnn = mdot(self.psi1.T,self.Kmmi,self.psi1) + # a = kj self.Diag0 = self.psi0 - np.diag(self.Qnn) - Iplus_Dprod_i = 1./(1.+ self.Diag0 * self.true_precision.flatten()) + Iplus_Dprod_i = 1. / (1. + self.Diag0 * self.true_precision.flatten()) self.Diag = self.Diag0 * Iplus_Dprod_i - self.P = Iplus_Dprod_i[:,None] * self.psi1.T - self.RPT0 = np.dot(self.Lmi,self.psi1) - self.L = np.linalg.cholesky(np.eye(self.num_inducing) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T)) - self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1) - self.RPT = np.dot(self.R,self.P.T) - self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT) + self.P = Iplus_Dprod_i[:, None] * self.psi1.T + self.RPT0 = np.dot(self.Lmi, self.psi1) + self.L = np.linalg.cholesky(np.eye(self.num_inducing) + np.dot(self.RPT0, ((1. - Iplus_Dprod_i) / self.Diag0)[:, None] * self.RPT0.T)) + self.R, info = linalg.lapack.dtrtrs(self.L, self.Lmi, lower=1) + self.RPT = np.dot(self.R, self.P.T) + self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T, self.RPT) self.w = self.Diag * self.likelihood.v_tilde - self.Gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde)) - self.mu = self.w + np.dot(self.P,self.Gamma) + self.Gamma = np.dot(self.R.T, np.dot(self.RPT, self.likelihood.v_tilde)) + self.mu = self.w + np.dot(self.P, self.Gamma) # Remove extra term from dL_dpsi1 - self.dL_dpsi1 -= mdot(self.Lmi.T,Lmipsi1*self.likelihood.precision.flatten().reshape(1,self.N)) - #self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm) - #self.dL_dpsi1 -= mdot(self.Kmmi,self.psi1*self.likelihood.precision.flatten().reshape(1,self.N)) #dB + self.dL_dpsi1 -= mdot(self.Lmi.T, Lmipsi1 * self.likelihood.precision.flatten().reshape(1, self.N)) + # self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm) + # self.dL_dpsi1 -= mdot(self.Kmmi,self.psi1*self.likelihood.precision.flatten().reshape(1,self.N)) #dB #########333333 - #self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B) + # self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B) #########333333 @@ -125,54 +114,54 @@ class GeneralizedFITC(SparseGP): else: raise NotImplementedError, "homoscedastic fitc not implemented" # Remove extra term from dL_dpsi1 - #self.dL_dpsi1 += -mdot(self.Kmmi,self.psi1*self.likelihood.precision) #dB + # self.dL_dpsi1 += -mdot(self.Kmmi,self.psi1*self.likelihood.precision) #dB sf = self.scale_factor - sf2 = sf**2 + sf2 = sf ** 2 # Remove extra term from dL_dKmm - self.dL_dKmm += 0.5 * self.input_dim * mdot(self.Lmi.T, self.A, self.Lmi)*sf2 # dB + self.dL_dKmm += 0.5 * self.input_dim * mdot(self.Lmi.T, self.A, self.Lmi) * sf2 # dB self.dL_dpsi0 = None - #the partial derivative vector for the likelihood + # the partial derivative vector for the likelihood if self.likelihood.Nparams == 0: self.partial_for_likelihood = None elif self.likelihood.is_heteroscedastic: raise NotImplementedError, "heteroscedastic derivates not implemented" else: raise NotImplementedError, "homoscedastic derivatives not implemented" - #likelihood is not heterscedatic - #self.partial_for_likelihood = - 0.5 * self.N*self.input_dim*self.likelihood.precision + 0.5 * np.sum(np.square(self.likelihood.Y))*self.likelihood.precision**2 - #self.partial_for_likelihood += 0.5 * self.input_dim * trace_dot(self.Bi,self.A)*self.likelihood.precision - #self.partial_for_likelihood += self.likelihood.precision*(0.5*trace_dot(self.psi2_beta_scaled,self.E*sf2) - np.trace(self.Cpsi1VVpsi1)) - #TODO partial derivative vector for the likelihood not implemented + # likelihood is not heterscedatic + # self.partial_for_likelihood = - 0.5 * self.N*self.input_dim*self.likelihood.precision + 0.5 * np.sum(np.square(self.likelihood.Y))*self.likelihood.precision**2 + # self.partial_for_likelihood += 0.5 * self.input_dim * trace_dot(self.Bi,self.A)*self.likelihood.precision + # self.partial_for_likelihood += self.likelihood.precision*(0.5*trace_dot(self.psi2_beta_scaled,self.E*sf2) - np.trace(self.Cpsi1VVpsi1)) + # TODO partial derivative vector for the likelihood not implemented def dL_dtheta(self): """ Compute and return the derivative of the log marginal likelihood wrt the parameters of the kernel """ - dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm,self.Z) + dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm, self.Z) if self.has_uncertain_inputs: raise NotImplementedError, "heteroscedatic derivates not implemented" else: - #NOTE in SparseGP this would include the gradient wrt psi0 - dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1,self.Z,self.X) + # NOTE in SparseGP this would include the gradient wrt psi0 + dL_dtheta += self.kern.dK_dtheta(self.dL_dpsi1, self.Z, self.X) return dL_dtheta def log_likelihood(self): """ Compute the (lower bound on the) log marginal likelihood """ - sf2 = self.scale_factor**2 + sf2 = self.scale_factor ** 2 if self.likelihood.is_heteroscedastic: - A = -0.5*self.N*self.input_dim*np.log(2.*np.pi) +0.5*np.sum(np.log(self.likelihood.precision)) -0.5*np.sum(self.V*self.likelihood.Y) + A = -0.5 * self.N * self.input_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.V * self.likelihood.Y) else: - A = -0.5*self.N*self.input_dim*(np.log(2.*np.pi) + np.log(self.likelihood._variance)) -0.5*self.likelihood.precision*self.likelihood.trYYT - C = -self.input_dim * (np.sum(np.log(np.diag(self.LB))) + 0.5*self.num_inducing*np.log(sf2)) - #C = -0.5*self.input_dim * (self.B_logdet + self.num_inducing*np.log(sf2)) - D = 0.5*np.sum(np.square(self._LBi_Lmi_psi1V)) - #self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T) - #D_ = 0.5*np.trace(self.Cpsi1VVpsi1) - return A+C+D + A = -0.5 * self.N * self.input_dim * (np.log(2.*np.pi) + np.log(self.likelihood._variance)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT + C = -self.input_dim * (np.sum(np.log(np.diag(self.LB))) + 0.5 * self.num_inducing * np.log(sf2)) + # C = -0.5*self.input_dim * (self.B_logdet + self.num_inducing*np.log(sf2)) + D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V)) + # self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T) + # D_ = 0.5*np.trace(self.Cpsi1VVpsi1) + return A + C + D def _raw_predict(self, Xnew, which_parts, full_cov=False): if self.likelihood.is_heteroscedastic: @@ -191,30 +180,30 @@ class GeneralizedFITC(SparseGP): # = I - [RPT0] * (U*U.T)^-1 * [RPT0].T # = I - V.T * V U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn) - V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1) - C = np.eye(self.num_inducing) - np.dot(V.T,V) - mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:]) - #self.C = C - #self.RPT0 = np.dot(self.R0,self.Knm.T) P0.T - #self.mu_u = mu_u - #self.U = U + V, info = linalg.lapack.dtrtrs(U, self.RPT0.T, lower=1) + C = np.eye(self.num_inducing) - np.dot(V.T, V) + mu_u = np.dot(C, self.RPT0) * (1. / self.Diag0[None, :]) + # self.C = C + # self.RPT0 = np.dot(self.R0,self.Knm.T) P0.T + # self.mu_u = mu_u + # self.U = U # q(u|y) = N(u| R0i*mu_H,R0i*Sigma_H*R0i.T) - mu_H = np.dot(mu_u,self.mu) + mu_H = np.dot(mu_u, self.mu) self.mu_H = mu_H - Sigma_H = C + np.dot(mu_u,np.dot(self.Sigma,mu_u.T)) + Sigma_H = C + np.dot(mu_u, np.dot(self.Sigma, mu_u.T)) # q(f_star|y) = N(f_star|mu_star,sigma2_star) Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts) - KR0T = np.dot(Kx.T,self.Lmi.T) - mu_star = np.dot(KR0T,mu_H) + KR0T = np.dot(Kx.T, self.Lmi.T) + mu_star = np.dot(KR0T, mu_H) if full_cov: - Kxx = self.kern.K(Xnew,which_parts=which_parts) - var = Kxx + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.num_inducing),KR0T.T)) + Kxx = self.kern.K(Xnew, which_parts=which_parts) + var = Kxx + np.dot(KR0T, np.dot(Sigma_H - np.eye(self.num_inducing), KR0T.T)) else: - Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts) - Kxx_ = self.kern.K(Xnew,which_parts=which_parts) # TODO: RA, is this line needed? - var_ = Kxx_ + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.num_inducing),KR0T.T)) # TODO: RA, is this line needed? - var = (Kxx + np.sum(KR0T.T*np.dot(Sigma_H - np.eye(self.num_inducing),KR0T.T),0))[:,None] - return mu_star[:,None],var + Kxx = self.kern.Kdiag(Xnew, which_parts=which_parts) + Kxx_ = self.kern.K(Xnew, which_parts=which_parts) # TODO: RA, is this line needed? + var_ = Kxx_ + np.dot(KR0T, np.dot(Sigma_H - np.eye(self.num_inducing), KR0T.T)) # TODO: RA, is this line needed? + var = (Kxx + np.sum(KR0T.T * np.dot(Sigma_H - np.eye(self.num_inducing), KR0T.T), 0))[:, None] + return mu_star[:, None], var else: raise NotImplementedError, "homoscedastic fitc not implemented" """ diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index 5589304a..4bd37022 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -6,7 +6,7 @@ import numpy as np import pylab as pb import sys, pdb from .. import kern -from ..core import model +from ..core import Model from ..util.linalg import pdinv, PCA from ..core import GP from ..likelihoods import Gaussian diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 4300f113..eeae94f4 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -3,7 +3,7 @@ Created on 10 Apr 2013 @author: Max Zwiessele ''' -from GPy.core import model +from GPy.core import Model from GPy.core import SparseGP from GPy.util.linalg import PCA import numpy @@ -12,7 +12,7 @@ import pylab from GPy.kern.kern import kern from GPy.models.bayesian_gplvm import BayesianGPLVM -class MRD(model): +class MRD(Model): """ Do MRD on given Datasets in Ylist. All Ys in likelihood_list are in [N x Dn], where Dn can be different per Yn, @@ -78,7 +78,7 @@ class MRD(model): self.NQ = self.N * self.input_dim self.MQ = self.num_inducing * self.input_dim - model.__init__(self) # @UndefinedVariable + Model.__init__(self) # @UndefinedVariable self._set_params(self._get_params()) @property diff --git a/GPy/testing/examples_tests.py b/GPy/testing/examples_tests.py index 51131fc3..14fa5593 100644 --- a/GPy/testing/examples_tests.py +++ b/GPy/testing/examples_tests.py @@ -12,31 +12,31 @@ from nose.tools import nottest import sys class ExamplesTests(unittest.TestCase): - def _checkgrad(self, model): - self.assertTrue(model.checkgrad()) + def _checkgrad(self, Model): + self.assertTrue(Model.checkgrad()) - def _model_instance(self, model): - self.assertTrue(isinstance(model, GPy.models)) + def _model_instance(self, Model): + self.assertTrue(isinstance(Model, GPy.models)) """ -def model_instance_generator(model): +def model_instance_generator(Model): def check_model_returned(self): - self._model_instance(model) + self._model_instance(Model) return check_model_returned -def checkgrads_generator(model): +def checkgrads_generator(Model): def model_checkgrads(self): - self._checkgrad(model) + self._checkgrad(Model) return model_checkgrads """ -def model_checkgrads(model): - model.randomize() - assert model.checkgrad() +def model_checkgrads(Model): + Model.randomize() + assert Model.checkgrad() -def model_instance(model): - assert isinstance(model, GPy.core.model) +def model_instance(Model): + assert isinstance(Model, GPy.core.Model) @nottest def test_models(): @@ -57,25 +57,25 @@ def test_models(): continue print "Testing example: ", example[0] - # Generate model - model = example[1]() - print model + # Generate Model + Model = example[1]() + print Model # Create tests for instance check """ - test = model_instance_generator(model) + test = model_instance_generator(Model) test.__name__ = 'test_instance_%s' % example[0] setattr(ExamplesTests, test.__name__, test) #Create tests for checkgrads check - test = checkgrads_generator(model) + test = checkgrads_generator(Model) test.__name__ = 'test_checkgrads_%s' % example[0] setattr(ExamplesTests, test.__name__, test) """ model_checkgrads.description = 'test_checkgrads_%s' % example[0] - yield model_checkgrads, model + yield model_checkgrads, Model model_instance.description = 'test_instance_%s' % example[0] - yield model_instance, model + yield model_instance, Model if __name__ == "__main__": print "Running unit tests, please be (very) patient..." diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 2d97c82c..98c75827 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -21,7 +21,7 @@ class KernelTests(unittest.TestCase): """ X = np.random.rand(30, 4) K = np.dot(X, X.T) - kernel = GPy.kern.fixed(4, K) + kernel = GPy.kern.Fixed(4, K) Y = np.ones((30,1)) m = GPy.models.GPRegression(X,Y,kernel=kernel) self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/psi_stat_gradient_tests.py b/GPy/testing/psi_stat_gradient_tests.py index 2257d16c..7bdfe48d 100644 --- a/GPy/testing/psi_stat_gradient_tests.py +++ b/GPy/testing/psi_stat_gradient_tests.py @@ -8,9 +8,9 @@ import numpy import GPy import itertools -from GPy.core import model +from GPy.core import Model -class PsiStatModel(model): +class PsiStatModel(Model): def __init__(self, which, X, X_variance, Z, M, kernel): self.which = which self.X = X diff --git a/GPy/testing/unit_tests.py b/GPy/testing/unit_tests.py index 020f3890..1507381c 100644 --- a/GPy/testing/unit_tests.py +++ b/GPy/testing/unit_tests.py @@ -5,7 +5,6 @@ import unittest import numpy as np import GPy -from GPy.likelihoods.likelihood_functions import Binomial class GradientTests(unittest.TestCase): def setUp(self): @@ -144,7 +143,7 @@ class GradientTests(unittest.TestCase): def test_GPLVM_rbf_bias_white_kern_2D(self): """ Testing GPLVM with rbf + bias and white kernel """ - N, input_dim, D = 50, 1, 2 + N, input_dim = 50, 1 X = np.random.rand(N, input_dim) k = GPy.kern.rbf(input_dim, 0.5, 0.9 * np.ones((1,))) + GPy.kern.bias(input_dim, 0.1) + GPy.kern.white(input_dim, 0.05) K = k.K(X) @@ -155,7 +154,7 @@ class GradientTests(unittest.TestCase): def test_GPLVM_rbf_linear_white_kern_2D(self): """ Testing GPLVM with rbf + bias and white kernel """ - N, input_dim, D = 50, 1, 2 + N, input_dim = 50, 1 X = np.random.rand(N, input_dim) k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim, 0.1) + GPy.kern.white(input_dim, 0.05) K = k.K(X) @@ -194,12 +193,12 @@ class GradientTests(unittest.TestCase): N = 20 X = np.hstack([np.random.rand(N / 2) + 1, np.random.rand(N / 2) - 1])[:, None] k = GPy.kern.rbf(1) + GPy.kern.white(1) - Y = np.hstack([np.ones(N/2),-np.ones(N/2)])[:,None] + Y = np.hstack([np.ones(N / 2), -np.ones(N / 2)])[:, None] - distribution = GPy.likelihoods.likelihood_functions.binomial() + distribution = GPy.likelihoods.likelihood_functions.Binomial() likelihood = GPy.likelihoods.EP(Y, distribution) - #likelihood = GPy.inference.likelihoods.binomial(Y) - m = GPy.models.generalized_FITC(X,likelihood,k,inducing=4) + # likelihood = GPy.inference.likelihoods.binomial(Y) + m = GPy.models.generalized_fitc(X, likelihood, k, inducing=4) m.constrain_positive('(var|len)') m.approximate_likelihood() self.assertTrue(m.checkgrad()) diff --git a/GPy/util/plot_latent.py b/GPy/util/plot_latent.py index c09b5c93..e147d840 100644 --- a/GPy/util/plot_latent.py +++ b/GPy/util/plot_latent.py @@ -2,9 +2,9 @@ import pylab as pb import numpy as np from .. import util -def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, marker='o', s=40): +def plot_latent(Model, labels=None, which_indices=None, resolution=50, ax=None, marker='o', s=40): """ - :param labels: a np.array of size model.N containing labels for the points (can be number, strings, etc) + :param labels: a np.array of size Model.N containing labels for the points (can be number, strings, etc) :param resolution: the resolution of the grid on which to evaluate the predictive variance """ if ax is None: @@ -12,26 +12,26 @@ def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, util.plot.Tango.reset() if labels is None: - labels = np.ones(model.N) + labels = np.ones(Model.N) if which_indices is None: - if model.input_dim==1: + if Model.input_dim==1: input_1 = 0 input_2 = None - if model.input_dim==2: + if Model.input_dim==2: input_1, input_2 = 0,1 else: try: - input_1, input_2 = np.argsort(model.input_sensitivity())[:2] + input_1, input_2 = np.argsort(Model.input_sensitivity())[:2] except: raise ValueError, "cannot Atomatically determine which dimensions to plot, please pass 'which_indices'" else: input_1, input_2 = which_indices #first, plot the output variance as a function of the latent space - Xtest, xx,yy,xmin,xmax = util.plot.x_frame2D(model.X[:,[input_1, input_2]],resolution=resolution) - Xtest_full = np.zeros((Xtest.shape[0], model.X.shape[1])) + Xtest, xx,yy,xmin,xmax = util.plot.x_frame2D(Model.X[:,[input_1, input_2]],resolution=resolution) + Xtest_full = np.zeros((Xtest.shape[0], Model.X.shape[1])) Xtest_full[:, :2] = Xtest - mu, var, low, up = model.predict(Xtest_full) + mu, var, low, up = Model.predict(Xtest_full) var = var[:, :1] ax.imshow(var.reshape(resolution, resolution).T, extent=[xmin[0], xmax[0], xmin[1], xmax[1]], cmap=pb.cm.binary,interpolation='bilinear',origin='lower') @@ -55,12 +55,12 @@ def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, m = marker index = np.nonzero(labels==ul)[0] - if model.input_dim==1: - x = model.X[index,input_1] + if Model.input_dim==1: + x = Model.X[index,input_1] y = np.zeros(index.size) else: - x = model.X[index,input_1] - y = model.X[index,input_2] + x = Model.X[index,input_1] + y = Model.X[index,input_2] ax.scatter(x, y, marker=m, s=s, color=util.plot.Tango.nextMedium(), label=this_label) ax.set_xlabel('latent dimension %i'%input_1) @@ -76,16 +76,16 @@ def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, return ax -def plot_latent_indices(model, which_indices=None, *args, **kwargs): +def plot_latent_indices(Model, which_indices=None, *args, **kwargs): if which_indices is None: try: - input_1, input_2 = np.argsort(model.input_sensitivity())[:2] + input_1, input_2 = np.argsort(Model.input_sensitivity())[:2] except: raise ValueError, "cannot Automatically determine which dimensions to plot, please pass 'which_indices'" else: input_1, input_2 = which_indices - ax = plot_latent(model, which_indices=[input_1, input_2], *args, **kwargs) + ax = plot_latent(Model, which_indices=[input_1, input_2], *args, **kwargs) # TODO: Here test if there are inducing points... - ax.plot(model.Z[:, input_1], model.Z[:, input_2], '^w') + ax.plot(Model.Z[:, input_1], Model.Z[:, input_2], '^w') return ax \ No newline at end of file diff --git a/GPy/util/visualize.py b/GPy/util/visualize.py index a3c10b97..66322c15 100644 --- a/GPy/util/visualize.py +++ b/GPy/util/visualize.py @@ -43,16 +43,16 @@ class vector_show(data_show): class lvm(data_show): - def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1]): - """Visualize a latent variable model + def __init__(self, vals, Model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1]): + """Visualize a latent variable Model - :param model: the latent variable model to visualize. + :param Model: the latent variable Model to visualize. :param data_visualize: the object used to visualize the data which has been modelled. :type data_visualize: visualize.data_show type. :param latent_axes: the axes where the latent visualization should be plotted. """ if vals == None: - vals = model.X[0] + vals = Model.X[0] data_show.__init__(self, vals, axes=latent_axes) @@ -68,13 +68,13 @@ class lvm(data_show): self.cid = latent_axes[0].figure.canvas.mpl_connect('axes_enter_event', self.on_enter) self.data_visualize = data_visualize - self.model = model + self.Model = Model self.latent_axes = latent_axes self.sense_axes = sense_axes self.called = False self.move_on = False self.latent_index = latent_index - self.latent_dim = model.input_dim + self.latent_dim = Model.input_dim # The red cross which shows current latent point. self.latent_values = vals @@ -85,7 +85,7 @@ class lvm(data_show): def modify(self, vals): """When latent values are modified update the latent representation and ulso update the output visualization.""" self.vals = vals.copy() - y = self.model.predict(self.vals)[0] + y = self.Model.predict(self.vals)[0] self.data_visualize.modify(y) self.latent_handle.set_data(self.vals[self.latent_index[0]], self.vals[self.latent_index[1]]) self.axes.figure.canvas.draw() @@ -113,15 +113,15 @@ class lvm(data_show): # A click in the bar chart axis for selection a dimension. if self.sense_axes != None: self.sense_axes.cla() - self.sense_axes.bar(np.arange(self.model.input_dim),1./self.model.input_sensitivity(),color='b') + self.sense_axes.bar(np.arange(self.Model.input_dim),1./self.Model.input_sensitivity(),color='b') if self.latent_index[1] == self.latent_index[0]: - self.sense_axes.bar(np.array(self.latent_index[0]),1./self.model.input_sensitivity()[self.latent_index[0]],color='y') - self.sense_axes.bar(np.array(self.latent_index[1]),1./self.model.input_sensitivity()[self.latent_index[1]],color='y') + self.sense_axes.bar(np.array(self.latent_index[0]),1./self.Model.input_sensitivity()[self.latent_index[0]],color='y') + self.sense_axes.bar(np.array(self.latent_index[1]),1./self.Model.input_sensitivity()[self.latent_index[1]],color='y') else: - self.sense_axes.bar(np.array(self.latent_index[0]),1./self.model.input_sensitivity()[self.latent_index[0]],color='g') - self.sense_axes.bar(np.array(self.latent_index[1]),1./self.model.input_sensitivity()[self.latent_index[1]],color='r') + self.sense_axes.bar(np.array(self.latent_index[0]),1./self.Model.input_sensitivity()[self.latent_index[0]],color='g') + self.sense_axes.bar(np.array(self.latent_index[1]),1./self.Model.input_sensitivity()[self.latent_index[1]],color='r') self.sense_axes.figure.canvas.draw() @@ -131,21 +131,21 @@ class lvm_subplots(lvm): latent_axes is a np array of dimension np.ceil(input_dim/2), one for each pair of the latent dimensions. """ - def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None): - self.nplots = int(np.ceil(model.input_dim/2.))+1 + def __init__(self, vals, Model, data_visualize, latent_axes=None, sense_axes=None): + self.nplots = int(np.ceil(Model.input_dim/2.))+1 assert len(latent_axes)==self.nplots if vals==None: - vals = model.X[0, :] + vals = Model.X[0, :] self.latent_values = vals for i, axis in enumerate(latent_axes): if i == self.nplots-1: - if self.nplots*2!=model.input_dim: + if self.nplots*2!=Model.input_dim: latent_index = [i*2, i*2] - lvm.__init__(self, self.latent_vals, model, data_visualize, axis, sense_axes, latent_index=latent_index) + lvm.__init__(self, self.latent_vals, Model, data_visualize, axis, sense_axes, latent_index=latent_index) else: latent_index = [i*2, i*2+1] - lvm.__init__(self, self.latent_vals, model, data_visualize, axis, latent_index=latent_index) + lvm.__init__(self, self.latent_vals, Model, data_visualize, axis, latent_index=latent_index) @@ -158,7 +158,7 @@ class lvm_dimselect(lvm): GPy.examples.dimensionality_reduction.BGPVLM_oil() """ - def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0, 1]): + def __init__(self, vals, Model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0, 1]): if latent_axes==None and sense_axes==None: self.fig,(latent_axes,self.sense_axes) = plt.subplots(1,2) elif sense_axes==None: @@ -167,14 +167,14 @@ class lvm_dimselect(lvm): else: self.sense_axes = sense_axes - lvm.__init__(self,vals,model,data_visualize,latent_axes,sense_axes,latent_index) + lvm.__init__(self,vals,Model,data_visualize,latent_axes,sense_axes,latent_index) print "use left and right mouse butons to select dimensions" def on_click(self, event): if event.inaxes==self.sense_axes: - new_index = max(0,min(int(np.round(event.xdata-0.5)),self.model.input_dim-1)) + new_index = max(0,min(int(np.round(event.xdata-0.5)),self.Model.input_dim-1)) if event.button == 1: # Make it red if and y-axis (red=port=left) if it is a left button click self.latent_index[1] = new_index @@ -185,7 +185,7 @@ class lvm_dimselect(lvm): self.show_sensitivities() self.latent_axes.cla() - self.model.plot_latent(which_indices=self.latent_index, + self.Model.plot_latent(which_indices=self.latent_index, ax=self.latent_axes) self.latent_handle = self.latent_axes.plot([0],[0],'rx',mew=2)[0] self.modify(self.latent_values) @@ -199,7 +199,7 @@ class lvm_dimselect(lvm): def on_leave(self,event): latent_values = self.latent_values.copy() - y = self.model.predict(latent_values[None,:])[0] + y = self.Model.predict(latent_values[None,:])[0] self.data_visualize.modify(y)