From db5fd17609346b56c14ce07b32fa1268abcdd007 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 7 Mar 2014 16:59:41 +0000 Subject: [PATCH 001/116] slicing support for kernel input dimension --- GPy/core/gp.py | 5 +- GPy/core/parameterization/array_core.py | 2 +- GPy/core/parameterization/parameter_core.py | 6 +- GPy/core/parameterization/variational.py | 16 ++- GPy/core/sparse_gp.py | 8 +- .../exact_gaussian_inference.py | 3 - GPy/kern/_src/add.py | 54 +++++---- GPy/kern/_src/kern.py | 106 ++++++++++++++++-- GPy/kern/_src/stationary.py | 8 +- GPy/util/caching.py | 35 +++--- 10 files changed, 178 insertions(+), 65 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 1add8268..6441561b 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -48,7 +48,7 @@ class GP(Model): self.Y_metadata = None assert isinstance(kernel, kern.Kern) - assert self.input_dim == kernel.input_dim + #assert self.input_dim == kernel.input_dim self.kern = kernel assert isinstance(likelihood, likelihoods.Likelihood) @@ -68,8 +68,9 @@ class GP(Model): def parameters_changed(self): self.posterior, self._log_marginal_likelihood, grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y, Y_metadata=self.Y_metadata) + self.likelihood.update_gradients(np.diag(grad_dict['dL_dK'])) self.kern.update_gradients_full(grad_dict['dL_dK'], self.X) - + def log_likelihood(self): return self._log_marginal_likelihood diff --git a/GPy/core/parameterization/array_core.py b/GPy/core/parameterization/array_core.py index 27801e23..9ce0e8f6 100644 --- a/GPy/core/parameterization/array_core.py +++ b/GPy/core/parameterization/array_core.py @@ -16,7 +16,7 @@ class ObservableArray(np.ndarray, Observable): __array_priority__ = -1 # Never give back ObservableArray def __new__(cls, input_array): if not isinstance(input_array, ObservableArray): - obj = np.atleast_1d(input_array).view(cls) + obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['W', 'C'])).view(cls) else: obj = input_array cls.__name__ = "ObservableArray\n " return obj diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index a78cf02d..351eacef 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -15,7 +15,6 @@ Observable Pattern for patameterization from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED import numpy as np -import itertools __updated__ = '2013-12-16' @@ -43,6 +42,7 @@ class Observable(object): _updated = True def __init__(self, *args, **kwargs): self._observer_callables_ = [] + def __del__(self, *args, **kwargs): del self._observer_callables_ @@ -551,8 +551,8 @@ class OptimizationHandlable(Constrainable, Observable): return p def _set_params_transformed(self, p): - if p is self._param_array_: - p = p.copy() + #if p is self._param_array_: + p = p.copy() if self._has_fixes(): self._param_array_[self._fixes_] = p else: self._param_array_[:] = p self.untransform() diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index 8bc7ca59..4c929cc8 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -66,10 +66,10 @@ class VariationalPosterior(Parameterized): def __init__(self, means=None, variances=None, name=None, **kw): super(VariationalPosterior, self).__init__(name=name, **kw) self.mean = Param("mean", means) - self.ndim = self.mean.ndim - self.shape = self.mean.shape self.variance = Param("variance", variances, Logexp()) self.add_parameters(self.mean, self.variance) + self.ndim = self.mean.ndim + self.shape = self.mean.shape self.num_data, self.input_dim = self.mean.shape if self.has_uncertain_inputs(): assert self.variance.shape == self.mean.shape, "need one variance per sample and dimenion" @@ -77,6 +77,18 @@ class VariationalPosterior(Parameterized): def has_uncertain_inputs(self): return not self.variance is None + def __getitem__(self, s): + import copy + n = self.__new__(self.__class__) + dc = copy.copy(self.__dict__) + dc['mean'] = dc['mean'][s] + dc['variance'] = dc['variance'][s] + dc['shape'] = dc['mean'].shape + dc['ndim'] = dc['ndim'] + dc['num_data'], dc['input_dim'] = self.mean.shape[0], self.mean.shape[1] if dc['ndim'] > 1 else 1 + n.__dict__ = dc + return n + class NormalPosterior(VariationalPosterior): ''' diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index f4f34a5e..16b66676 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -64,8 +64,8 @@ class SparseGP(GP): self.kern.gradient += target #gradients wrt Z - self.Z.gradient = self.kern.gradients_X(dL_dKmm, self.Z) - self.Z.gradient += self.kern.gradients_Z_expectations( + self.Z.gradient[:,self.kern.active_dims] = self.kern.gradients_X(dL_dKmm, self.Z) + self.Z.gradient[:,self.kern.active_dims] += self.kern.gradients_Z_expectations( self.grad_dict['dL_dpsi1'], self.grad_dict['dL_dpsi2'], Z=self.Z, variational_posterior=self.X) else: #gradients wrt kernel @@ -77,8 +77,8 @@ class SparseGP(GP): self.kern.gradient += target #gradients wrt Z - self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z) - self.Z.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'].T, self.Z, self.X) + self.Z.gradient[:,self.kern.active_dims] = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z) + self.Z.gradient[:,self.kern.active_dims] += self.kern.gradients_X(self.grad_dict['dL_dKnm'].T, self.Z, self.X) def _raw_predict(self, Xnew, full_cov=False): """ diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index 922b52f4..47f6ea09 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -49,9 +49,6 @@ class ExactGaussianInference(object): dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi) - #TODO: does this really live here? - likelihood.update_gradients(np.diag(dL_dK)) - return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK} diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 77fe057d..87dda365 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -1,12 +1,10 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import sys import numpy as np import itertools -from linear import Linear from ...core.parameterization import Parameterized -from ...core.parameterization.param import Param +from ...util.caching import Cache_this from kern import Kern class Add(Kern): @@ -14,19 +12,24 @@ class Add(Kern): assert all([isinstance(k, Kern) for k in subkerns]) if tensor: input_dim = sum([k.input_dim for k in subkerns]) - self.input_slices = [] + self.self.active_dims = [] n = 0 for k in subkerns: - self.input_slices.append(slice(n, n+k.input_dim)) + self.self.active_dims.append(slice(n, n+k.input_dim)) n += k.input_dim else: - assert all([k.input_dim == subkerns[0].input_dim for k in subkerns]) - input_dim = subkerns[0].input_dim - self.input_slices = [slice(None) for k in subkerns] + #assert all([k.input_dim == subkerns[0].input_dim for k in subkerns]) + #input_dim = subkerns[0].input_dim + #self.input_slices = [slice(None) for k in subkerns] + input_dim = reduce(np.union1d, map(lambda x: np.r_[x.active_dims], subkerns)) super(Add, self).__init__(input_dim, 'add') self.add_parameters(*subkerns) - - + + @property + def parts(self): + return self._parameters_ + + @Cache_this(limit=1, force_kwargs=('which_parts',)) def K(self, X, X2=None): """ Compute the kernel function. @@ -37,13 +40,19 @@ class Add(Kern): handLes this as X2 == X. """ assert X.shape[1] == self.input_dim - if X2 is None: - return sum([p.K(X[:, i_s], None) for p, i_s in zip(self._parameters_, self.input_slices)]) - else: - return sum([p.K(X[:, i_s], X2[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]) + which_parts=None + if which_parts is None: + which_parts = self.parts + elif not isinstance(which_parts, (list, tuple)): + # if only one part is given + which_parts = [which_parts] + return sum([p.K(X, X2) for p in which_parts]) - def update_gradients_full(self, dL_dK, X): - [p.update_gradients_full(dL_dK, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] + def update_gradients_full(self, dL_dK, X, X2=None): + [p.update_gradients_full(dL_dK, X, X2) for p in self.parts] + + def update_gradients_diag(self, dL_dK, X): + [p.update_gradients_diag(dL_dK, X) for p in self.parts] def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. @@ -55,16 +64,17 @@ class Add(Kern): :param X2: Observed data inputs (optional, defaults to X) :type X2: np.ndarray (num_inducing x input_dim)""" - target = np.zeros_like(X) - if X2 is None: - [np.add(target[:,i_s], p.gradients_X(dL_dK, X[:, i_s], None), target[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] - else: - [np.add(target[:,i_s], p.gradients_X(dL_dK, X[:, i_s], X2[:,i_s]), target[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] + target = np.zeros(X.shape) + for p in self.parts: + target[:, p.active_dims] += p.gradients_X(dL_dK, X, X2) return target def Kdiag(self, X): + which_parts=None assert X.shape[1] == self.input_dim - return sum([p.Kdiag(X[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]) + if which_parts is None: + which_parts = self.parts + return sum([p.Kdiag(X) for p in which_parts]) def psi0(self, Z, variational_posterior): diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 47166156..33b9ff08 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -2,13 +2,22 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import sys -import numpy as np -import itertools -from ...core.parameterization import Parameterized -from ...core.parameterization.param import Param - +from ...core.parameterization.parameterized import ParametersChangedMeta, Parameterized +from ...util.caching import Cache_this +class KernCallsViaSlicerMeta(ParametersChangedMeta): + def __call__(self, *args, **kw): + instance = super(KernCallsViaSlicerMeta, self).__call__(*args, **kw) + instance.K = instance._slice_wrapper(instance.K) + instance.Kdiag = instance._slice_wrapper(instance.Kdiag, True) + instance.update_gradients_full = instance._slice_wrapper(instance.update_gradients_full, False, True) + instance.update_gradients_diag = instance._slice_wrapper(instance.update_gradients_diag, True, True) + instance.gradients_X = instance._slice_wrapper(instance.gradients_X, False, True) + instance.gradients_X_diag = instance._slice_wrapper(instance.gradients_X_diag, True, True) + return instance + class Kern(Parameterized): + __metaclass__ = KernCallsViaSlicerMeta def __init__(self, input_dim, name, *a, **kw): """ The base class for a kernel: a positive definite function @@ -20,11 +29,83 @@ class Kern(Parameterized): Do not instantiate. """ super(Kern, self).__init__(name=name, *a, **kw) - self.input_dim = input_dim - + if isinstance(input_dim, int): + self.active_dims = slice(0, input_dim) + self.input_dim = input_dim + else: + self.active_dims = input_dim + self.input_dim = len(self.active_dims) + self._sliced_X = False + self._sliced_X2 = False + + @Cache_this(limit=10, ignore_args = (0,)) + def _slice_X(self, X): + return X[:, self.active_dims] + + def _slice_wrapper(self, operation, diag=False, derivative=False): + """ + This method wraps the functions in kernel to make sure all kernels allways see their respective input dimension. + The different switches are: + diag: if X2 exists + derivative: if firest arg is dL_dK + """ + if derivative: + if diag: + def x_slice_wrapper(dL_dK, X, *args, **kw): + X = self._slice_X(X) if not self._sliced_X else X + self._sliced_X = True + try: + ret = operation(dL_dK, X, *args, **kw) + except: raise + finally: + self._sliced_X = False + return ret + else: + def x_slice_wrapper(dL_dK, X, X2=None, *args, **kw): + X, X2 = self._slice_X(X) if not self._sliced_X else X, self._slice_X(X2) if X2 is not None and not self._sliced_X2 else X2 + self._sliced_X = True + self._sliced_X2 = True + try: + ret = operation(dL_dK, X, X2, *args, **kw) + except: raise + finally: + self._sliced_X = False + self._sliced_X2 = False + return ret + else: + if diag: + def x_slice_wrapper(X, *args, **kw): + X = self._slice_X(X) if not self._sliced_X else X + self._sliced_X = True + try: + ret = operation(X, *args, **kw) + except: raise + finally: + self._sliced_X = False + return ret + else: + def x_slice_wrapper(X, X2=None, *args, **kw): + X, X2 = self._slice_X(X) if not self._sliced_X else X, self._slice_X(X2) if X2 is not None and not self._sliced_X2 else X2 + self._sliced_X = True + self._sliced_X2 = True + try: + ret = operation(X, X2, *args, **kw) + except: raise + finally: + self._sliced_X = False + self._sliced_X2 = False + return ret + x_slice_wrapper._operation = operation + x_slice_wrapper.__name__ = ("slicer("+operation.__name__ + +(","+str(bool(diag)) if diag else'') + +(','+str(bool(derivative)) if derivative else '') + +')') + x_slice_wrapper.__doc__ = "**sliced**\n\n" + (operation.__doc__ or "") + return x_slice_wrapper + def K(self, X, X2): raise NotImplementedError - def Kdiag(self, Xa): + def Kdiag(self, X): raise NotImplementedError def psi0(self, Z, variational_posterior): raise NotImplementedError @@ -34,13 +115,16 @@ class Kern(Parameterized): raise NotImplementedError def gradients_X(self, dL_dK, X, X2): raise NotImplementedError - def gradients_X_diag(self, dL_dK, X): + def gradients_X_diag(self, dL_dKdiag, X): raise NotImplementedError - + def update_gradients_full(self, dL_dK, X, X2): """Set the gradients of all parameters when doing full (N) inference.""" raise NotImplementedError - + def update_gradients_diag(self, dL_dKdiag, X): + """Set the gradients for all parameters for the derivative of the diagonal of the covariance w.r.t the kernel parameters.""" + raise NotImplementedError + def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): """ Set the gradients of all parameters when doing inference with diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 44e17d8a..725f8660 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -57,7 +57,7 @@ class Stationary(Kern): if lengthscale.size != input_dim: lengthscale = np.ones(input_dim)*lengthscale else: - lengthscale = np.ones(self.input_dim) + lengthscale = np.ones(self.input_dim) self.lengthscale = Param('lengthscale', lengthscale, Logexp()) self.variance = Param('variance', variance, Logexp()) assert self.variance.size==1 @@ -85,12 +85,14 @@ class Stationary(Kern): Compute the Euclidean distance between each row of X and X2, or between each pair of rows of X if X2 is None. """ + #X, = self._slice_X(X) if X2 is None: Xsq = np.sum(np.square(X),1) r2 = -2.*tdot(X) + (Xsq[:,None] + Xsq[None,:]) util.diag.view(r2)[:,]= 0. # force diagnoal to be zero: sometime numerically a little negative return np.sqrt(r2) else: + #X2, = self._slice_X(X2) X1sq = np.sum(np.square(X),1) X2sq = np.sum(np.square(X2),1) return np.sqrt(-2.*np.dot(X, X2.T) + (X1sq[:,None] + X2sq[None,:])) @@ -124,7 +126,6 @@ class Stationary(Kern): self.lengthscale.gradient = 0. def update_gradients_full(self, dL_dK, X, X2=None): - self.variance.gradient = np.einsum('ij,ij,i', self.K(X, X2), dL_dK, 1./self.variance) #now the lengthscale gradient(s) @@ -136,7 +137,7 @@ class Stationary(Kern): #self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum(0).sum(0)/self.lengthscale**3 tmp = dL_dr*self._inv_dist(X, X2) if X2 is None: X2 = X - self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)]) + self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(self._slice_X(X)[:,q:q+1] - self._slice_X(X2)[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)]) else: r = self._scaled_dist(X, X2) self.lengthscale.gradient = -np.sum(dL_dr*r)/self.lengthscale @@ -176,7 +177,6 @@ class Stationary(Kern): ret = np.empty(X.shape, dtype=np.float64) [np.einsum('ij,ij->i', tmp, X[:,q][:,None]-X2[:,q][None,:], out=ret[:,q]) for q in xrange(self.input_dim)] ret /= self.lengthscale**2 - return ret def gradients_X_diag(self, dL_dKdiag, X): diff --git a/GPy/util/caching.py b/GPy/util/caching.py index 250efe11..0b6f7234 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -9,24 +9,27 @@ class Cacher(object): """ - def __init__(self, operation, limit=5, ignore_args=()): + def __init__(self, operation, limit=5, ignore_args=(), force_kwargs=()): self.limit = int(limit) self.ignore_args = ignore_args + self.force_kwargs = force_kwargs self.operation=operation self.cached_inputs = [] self.cached_outputs = [] self.inputs_changed = [] - def __call__(self, *args): + def __call__(self, *args, **kw): """ A wrapper function for self.operation, """ #ensure that specified arguments are ignored + items = sorted(kw.items(), key=lambda x: x[0]) + oa_all = args + tuple(a for _,a in items) if len(self.ignore_args) != 0: - oa = [a for i,a in enumerate(args) if i not in self.ignore_args] + oa = [a for i,a in itertools.chain(enumerate(args), items) if i not in self.ignore_args and i not in self.force_kwargs] else: - oa = args + oa = oa_all # this makes sure we only add an observer once, and that None can be in args observable_args = [] @@ -37,8 +40,13 @@ class Cacher(object): #make sure that all the found argument really are observable: #otherswise don't cache anything, pass args straight though if not all([isinstance(arg, Observable) for arg in observable_args]): - return self.operation(*args) + return self.operation(*args, **kw) + if len(self.force_kwargs) != 0: + # check if there are force args, which force reloading + for k in self.force_kwargs: + if k in kw and kw[k] is not None: + return self.operation(*args, **kw) # TODO: WARNING !!! Cache OFFSWITCH !!! WARNING # return self.operation(*args) @@ -48,7 +56,7 @@ class Cacher(object): i = state.index(True) if self.inputs_changed[i]: #(elements of) the args have changed since we last computed: update - self.cached_outputs[i] = self.operation(*args) + self.cached_outputs[i] = self.operation(*args, **kw) self.inputs_changed[i] = False return self.cached_outputs[i] else: @@ -62,11 +70,11 @@ class Cacher(object): self.cached_outputs.pop(0) #compute - self.cached_inputs.append(args) - self.cached_outputs.append(self.operation(*args)) + self.cached_inputs.append(oa_all) + self.cached_outputs.append(self.operation(*args, **kw)) self.inputs_changed.append(False) [a.add_observer(self, self.on_cache_changed) for a in observable_args] - return self.cached_outputs[-1]#Max says return. + return self.cached_outputs[-1]#return def on_cache_changed(self, arg): """ @@ -90,15 +98,16 @@ class Cache_this(object): """ A decorator which can be applied to bound methods in order to cache them """ - def __init__(self, limit=5, ignore_args=()): + def __init__(self, limit=5, ignore_args=(), force_kwargs=()): self.limit = limit self.ignore_args = ignore_args + self.force_args = force_kwargs self.c = None def __call__(self, f): - def f_wrap(*args): + def f_wrap(*args, **kw): if self.c is None: - self.c = Cacher(f, self.limit, ignore_args=self.ignore_args) - return self.c(*args) + self.c = Cacher(f, self.limit, ignore_args=self.ignore_args, force_kwargs=self.force_args) + return self.c(*args, **kw) f_wrap._cacher = self f_wrap.__doc__ = "**cached**\n\n" + (f.__doc__ or "") return f_wrap From fab136392a9117a16eb6b07d4257ce01bb6ea351 Mon Sep 17 00:00:00 2001 From: Neil Lawrence Date: Fri, 7 Mar 2014 17:26:27 +0000 Subject: [PATCH 002/116] Part written addition to datasets for loading in google trends. --- GPy/kern/_src/sympykern.py | 1 + GPy/util/datasets.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/GPy/kern/_src/sympykern.py b/GPy/kern/_src/sympykern.py index 0688682a..91f8c28b 100644 --- a/GPy/kern/_src/sympykern.py +++ b/GPy/kern/_src/sympykern.py @@ -117,6 +117,7 @@ class Sympykern(Kern): if self.output_dim > 1: self.arg_list += self._sp_theta_i + self._sp_theta_j self.diag_arg_list += self._sp_theta_i + # psi_stats aren't yet implemented. if False: self.compute_psi_stats() diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 23f5d0c8..3c44703a 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -274,7 +274,27 @@ def della_gatta_TRP63_gene_expression(data_set='della_gatta', gene_number=None): Y = Y[:, None] return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set) +def google_trends(query_terms=['big data', 'machine learning', 'data science'], data_set='google_trends'): + # Inspired by this notebook: + # http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb + # quote the query terms. + for i, element in enumerate(query_terms): + query_terms[i] = urllib2.quote(element) + query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(query_terms) + data = urllib2.urlopen(query).read() + + # We need to do some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD. + header = """// Data table response\ngoogle.visualization.Query.setResponse(""" + data = data[len(header):-2] + data = re.sub('new Date\((\d+),(\d+),(\d+)\)', (lambda m: '"%s-%02d-%02d"' % (m.group(1).strip(), 1+int(m.group(2)), int(m.group(3)))), data) + timeseries = json.loads(data) + import pandas as pd + columns = [k['label'] for k in timeseries['table']['cols']] + rows = map(lambda x: [k['v'] for k in x['c']], timeseries['table']['rows']) + df = pd.DataFrame(rows, columns=columns) + df.set_index('Date', inplace=True) + df.plot(figsize=(16, 8)) # The data sets def oil(data_set='three_phase_oil_flow'): From 4ce2eb2ac6f193a0f00e616b810537b1ee909b07 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 10 Mar 2014 08:17:02 +0000 Subject: [PATCH 003/116] mrd for new parameterize --- GPy/models/mrd.py | 361 ++++++++++------------------------------------ 1 file changed, 79 insertions(+), 282 deletions(-) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index dd1c44ba..b547f2d1 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -5,15 +5,15 @@ import numpy as np import itertools import pylab -from ..core import Model, SparseGP +from ..core import Model from ..util.linalg import PCA from ..kern import Kern -from bayesian_gplvm import BayesianGPLVM from ..core.parameterization.variational import NormalPosterior, NormalPrior -from ..inference.latent_function_inference.var_dtc import VarDTCMissingData -from ..likelihoods.gaussian import Gaussian +from ..core.parameterization import Param, Parameterized +from ..inference.latent_function_inference.var_dtc import VarDTCMissingData, VarDTC +from ..likelihoods import Gaussian -class MRD2(Model): +class MRD(Model): """ Apply MRD to all given datasets Y in Ylist. @@ -43,61 +43,110 @@ class MRD2(Model): :param :class:`~GPy.inference.latent_function_inference inference_method: the inference method to use :param :class:`~GPy.likelihoods.likelihood.Likelihood` likelihood: the likelihood to use :param str name: the name of this model + :param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None """ def __init__(self, Ylist, input_dim, X=None, X_variance=None, initx = 'PCA', initz = 'permute', num_inducing=10, Z=None, kernel=None, - inference_method=None, likelihood=None, name='mrd'): - super(MRD2, self).__init__(name) + inference_method=None, likelihood=None, name='mrd', Ynames=None): + super(MRD, self).__init__(name) # sort out the kernels if kernel is None: from ..kern import RBF - self.kern = [RBF(input_dim, ARD=1, name='Y_{}'.format(i)) for i in range(len(Ylist))] + self.kern = [RBF(input_dim, ARD=1, name='rbf'.format(i)) for i in range(len(Ylist))] elif isinstance(kernel, Kern): - self.kern = [kernel.copy(name='Y_{}'.format(i)) for i in range(len(Ylist))] + self.kern = [kernel.copy(name='{}'.format(kernel.name, i)) for i in range(len(Ylist))] else: assert len(kernel) == len(Ylist), "need one kernel per output" assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!" - + self.kern = kernel self.input_dim = input_dim self.num_inducing = num_inducing - + + self.Ylist = Ylist self._in_init_ = True X = self._init_X(initx, Ylist) - self.Z = self._init_Z(initz, X) + self.Z = Param('inducing inputs', self._init_Z(initz, X)) self.num_inducing = self.Z.shape[0] # ensure M==N if M>N if X_variance is None: - X_variance = np.random.uniform(0,.2,X.shape) + X_variance = np.random.uniform(0, .2, X.shape) self.variational_prior = NormalPrior() self.X = NormalPosterior(X, X_variance) if likelihood is None: - likelihood = Gaussian() + self.likelihood = [Gaussian(name='Gaussian_noise'.format(i)) for i in range(len(Ylist))] + else: self.likelihood = likelihood if inference_method is None: - if any(np.any(np.isnan(y)) for y in Ylist): - self.inference_method = VarDTCMissingData(limit=len(Ylist)) + self.inference_method= [] + for y in Ylist: + if np.any(np.isnan(y)): + self.inference_method.append(VarDTCMissingData(limit=1)) + else: + self.inference_method.append(VarDTC(limit=1)) + else: + self.inference_method = inference_method + self.inference_method.set_limit(len(Ylist)) - self.Ylist = Ylist - + self.add_parameters(self.X, self.Z) + + if Ynames is None: + Ynames = ['Y{}'.format(i) for i in range(len(Ylist))] + + for i, n, k, l in itertools.izip(itertools.count(), Ynames, self.kern, self.likelihood): + p = Parameterized(name=n) + p.add_parameter(k) + p.add_parameter(l) + setattr(self, 'Y{}'.format(i), p) + self.add_parameter(p) + self._in_init_ = False + def parameters_changed(self): - for y in self.Ylist: - pass + self._log_marginal_likelihood = 0 + self.posteriors = [] + self.Z.gradient = 0. + self.X.mean.gradient = 0. + self.X.variance.gradient = 0. + + for y, k, l, i in itertools.izip(self.Ylist, self.kern, self.likelihood, self.inference_method): + posterior, lml, grad_dict = i.inference(k, self.X, self.Z, l, y) + + self.posteriors.append(posterior) + self._log_marginal_likelihood += lml + + # likelihood gradients + l.update_gradients(grad_dict.pop('partial_for_likelihood')) + + #gradients wrt kernel + dL_dKmm = grad_dict.pop('dL_dKmm') + k.update_gradients_full(dL_dKmm, self.Z, None) + target = k.gradient.copy() + k.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, **grad_dict) + k.gradient += target - def _init_X(self, init='PCA', likelihood_list=None): - if likelihood_list is None: - likelihood_list = self.likelihood_list - Ylist = [] - for likelihood_or_Y in likelihood_list: - if type(likelihood_or_Y) is np.ndarray: - Ylist.append(likelihood_or_Y) - else: - Ylist.append(likelihood_or_Y.Y) - del likelihood_list + #gradients wrt Z + self.Z.gradient += k.gradients_X(dL_dKmm, self.Z) + self.Z.gradient += k.gradients_Z_expectations( + grad_dict['dL_dpsi1'], grad_dict['dL_dpsi2'], Z=self.Z, variational_posterior=self.X) + + dL_dmean, dL_dS = k.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, **grad_dict) + self.X.mean.gradient += dL_dmean + self.X.variance.gradient += dL_dS + + # update for the KL divergence + self.variational_prior.update_gradients_KL(self.X) + self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X) + + def log_likelihood(self): + return self._log_marginal_likelihood + + def _init_X(self, init='PCA', Ylist=None): + if Ylist is None: + Ylist = self.Ylist if init in "PCA_concat": X = PCA(np.hstack(Ylist), self.input_dim)[0] elif init in "PCA_single": @@ -106,7 +155,6 @@ class MRD2(Model): X[:, qs] = PCA(Y, len(qs))[0] else: # init == 'random': X = np.random.randn(Ylist[0].shape[0], self.input_dim) - self.X = X return X def _init_Z(self, init="permute", X=None): @@ -116,259 +164,8 @@ class MRD2(Model): Z = np.random.permutation(X.copy())[:self.num_inducing] elif init in "random": Z = np.random.randn(self.num_inducing, self.input_dim) * X.var() - self.Z = Z return Z -class MRD(Model): - """ - Do MRD on given Datasets in Ylist. - All Ys in likelihood_list are in [N x Dn], where Dn can be different per Yn, - N must be shared across datasets though. - - :param likelihood_list: list of observed datasets (:py:class:`~GPy.likelihoods.gaussian.Gaussian` if not supplied directly) - :type likelihood_list: [:py:class:`~GPy.likelihoods.likelihood.likelihood` | :py:class:`ndarray`] - :param names: names for different gplvm models - :type names: [str] - :param input_dim: latent dimensionality - :type input_dim: int - :param initx: initialisation method for the latent space : - - * 'concat' - PCA on concatenation of all datasets - * 'single' - Concatenation of PCA on datasets, respectively - * 'random' - Random draw from a normal - - :type initx: ['concat'|'single'|'random'] - :param initz: initialisation method for inducing inputs - :type initz: 'permute'|'random' - :param X: Initial latent space - :param X_variance: Initial latent space variance - :param Z: initial inducing inputs - :param num_inducing: number of inducing inputs to use - :param kernels: list of kernels or kernel shared for all BGPLVMS - :type kernels: [GPy.kern.kern] | GPy.kern.kern | None (default) - - """ - def __init__(self, likelihood_or_Y_list, input_dim, num_inducing=10, names=None, - kernels=None, initx='PCA', - initz='permute', _debug=False, **kw): - if names is None: - self.names = ["{}".format(i) for i in range(len(likelihood_or_Y_list))] - - # sort out the kernels - if kernels is None: - kernels = [None] * len(likelihood_or_Y_list) - elif isinstance(kernels, Kern): - kernels = [kernels.copy() for i in range(len(likelihood_or_Y_list))] - else: - assert len(kernels) == len(likelihood_or_Y_list), "need one kernel per output" - assert all([isinstance(k, Kern) for k in kernels]), "invalid kernel object detected!" - assert not ('kernel' in kw), "pass kernels through `kernels` argument" - - self.input_dim = input_dim - self._debug = _debug - self.num_inducing = num_inducing - - self._in_init_ = True - X = self._init_X(initx, likelihood_or_Y_list) - Z = self._init_Z(initz, X) - self.num_inducing = Z.shape[0] # ensure M==N if M>N - - self.bgplvms = [BayesianGPLVM(l, input_dim=input_dim, kernel=k, X=X, Z=Z, num_inducing=self.num_inducing, **kw) for l, k in zip(likelihood_or_Y_list, kernels)] - del self._in_init_ - - self.gref = self.bgplvms[0] - nparams = np.array([0] + [SparseGP._get_params(g).size - g.Z.size for g in self.bgplvms]) - self.nparams = nparams.cumsum() - - self.num_data = self.gref.num_data - - self.NQ = self.num_data * self.input_dim - self.MQ = self.num_inducing * self.input_dim - - Model.__init__(self) - self.ensure_default_constraints() - - def _getstate(self): - return Model._getstate(self) + [self.names, - self.bgplvms, - self.gref, - self.nparams, - self.input_dim, - self.num_inducing, - self.num_data, - self.NQ, - self.MQ] - - def _setstate(self, state): - self.MQ = state.pop() - self.NQ = state.pop() - self.num_data = state.pop() - self.num_inducing = state.pop() - self.input_dim = state.pop() - self.nparams = state.pop() - self.gref = state.pop() - self.bgplvms = state.pop() - self.names = state.pop() - Model._setstate(self, state) - - @property - def X(self): - return self.gref.X - @X.setter - def X(self, X): - try: - self.propagate_param(X=X) - except AttributeError: - if not self._in_init_: - raise AttributeError("bgplvm list not initialized") - @property - def Z(self): - return self.gref.Z - @Z.setter - def Z(self, Z): - try: - self.propagate_param(Z=Z) - except AttributeError: - if not self._in_init_: - raise AttributeError("bgplvm list not initialized") - @property - def X_variance(self): - return self.gref.X_variance - @X_variance.setter - def X_variance(self, X_var): - try: - self.propagate_param(X_variance=X_var) - except AttributeError: - if not self._in_init_: - raise AttributeError("bgplvm list not initialized") - @property - def likelihood_list(self): - return [g.likelihood.Y for g in self.bgplvms] - @likelihood_list.setter - def likelihood_list(self, likelihood_list): - for g, Y in itertools.izip(self.bgplvms, likelihood_list): - g.likelihood.Y = Y - - @property - def auto_scale_factor(self): - """ - set auto_scale_factor for all gplvms - :param b: auto_scale_factor - :type b: - """ - return self.gref.auto_scale_factor - @auto_scale_factor.setter - def auto_scale_factor(self, b): - self.propagate_param(auto_scale_factor=b) - - def propagate_param(self, **kwargs): - for key, val in kwargs.iteritems(): - for g in self.bgplvms: - g.__setattr__(key, val) - - def randomize(self, initx='concat', initz='permute', *args, **kw): - super(MRD, self).randomize(*args, **kw) - self._init_X(initx, self.likelihood_list) - self._init_Z(initz, self.X) - - #def _get_latent_param_names(self): - def _get_param_names(self): - n1 = self.gref._get_param_names() - n1var = n1[:self.NQ * 2 + self.MQ] - # return n1var - # - #def _get_kernel_names(self): - map_names = lambda ns, name: map(lambda x: "{1}_{0}".format(*x), - itertools.izip(ns, - itertools.repeat(name))) - return list(itertools.chain(n1var, *(map_names(\ - SparseGP._get_param_names(g)[self.MQ:], n) \ - for g, n in zip(self.bgplvms, self.names)))) - # kernel_names = (map_names(SparseGP._get_param_names(g)[self.MQ:], n) for g, n in zip(self.bgplvms, self.names)) - # return kernel_names - - #def _get_param_names(self): - # X_names = sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], []) - # S_names = sum([['X_variance_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], []) - # n1var = self._get_latent_param_names() - # kernel_names = self._get_kernel_names() - # return list(itertools.chain(n1var, *kernel_names)) - - #def _get_print_names(self): - # return list(itertools.chain(*self._get_kernel_names())) - - def _get_params(self): - """ - return parameter list containing private and shared parameters as follows: - - ================================================================= - | mu | S | Z || theta1 | theta2 | .. | thetaN | - ================================================================= - """ - X = self.gref.X.ravel() - X_var = self.gref.X_variance.ravel() - Z = self.gref.Z.ravel() - thetas = [SparseGP._get_params(g)[g.Z.size:] for g in self.bgplvms] - params = np.hstack([X, X_var, Z, np.hstack(thetas)]) - return params - -# def _set_var_params(self, g, X, X_var, Z): -# g.X = X.reshape(self.num_data, self.input_dim) -# g.X_variance = X_var.reshape(self.num_data, self.input_dim) -# g.Z = Z.reshape(self.num_inducing, self.input_dim) -# -# def _set_kern_params(self, g, p): -# g.kern._set_params(p[:g.kern.num_params]) -# g.likelihood._set_params(p[g.kern.num_params:]) - - def _set_params(self, x): - start = 0; end = self.NQ - X = x[start:end] - start = end; end += start - X_var = x[start:end] - start = end; end += self.MQ - Z = x[start:end] - thetas = x[end:] - - # set params for all: - for g, s, e in itertools.izip(self.bgplvms, self.nparams, self.nparams[1:]): - g._set_params(np.hstack([X, X_var, Z, thetas[s:e]])) -# self._set_var_params(g, X, X_var, Z) -# self._set_kern_params(g, thetas[s:e].copy()) -# g._compute_kernel_matrices() -# if self.auto_scale_factor: -# g.scale_factor = np.sqrt(g.psi2.sum(0).mean() * g.likelihood.precision) -# # self.scale_factor = np.sqrt(self.psi2.sum(0).mean() * self.likelihood.precision) -# g._computations() - - - def update_likelihood_approximation(self): # TODO: object oriented vs script base - for bgplvm in self.bgplvms: - bgplvm.update_likelihood_approximation() - - def log_likelihood(self): - ll = -self.gref.KL_divergence() - for g in self.bgplvms: - ll += SparseGP.log_likelihood(g) - return ll - - def _log_likelihood_gradients(self): - dLdmu, dLdS = reduce(lambda a, b: [a[0] + b[0], a[1] + b[1]], (g.dL_dmuS() for g in self.bgplvms)) - dKLmu, dKLdS = self.gref.dKL_dmuS() - dLdmu -= dKLmu - dLdS -= dKLdS - dLdmuS = np.hstack((dLdmu.flatten(), dLdS.flatten())).flatten() - dldzt1 = reduce(lambda a, b: a + b, (SparseGP._log_likelihood_gradients(g)[:self.MQ] for g in self.bgplvms)) - - return np.hstack((dLdmuS, - dldzt1, - np.hstack([np.hstack([g.dL_dtheta(), - g.likelihood._gradients(\ - partial=g.partial_for_likelihood)]) \ - for g in self.bgplvms]))) - - - def _handle_plotting(self, fignum, axes, plotf, sharex=False, sharey=False): if axes is None: fig = pylab.figure(num=fignum) From a9a285c7906c38b98c00343d759a570dbe98a81d Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 10 Mar 2014 08:17:19 +0000 Subject: [PATCH 004/116] likelihood test --- GPy/testing/likelihood_tests.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index d2a236dd..631f2ec2 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -651,7 +651,7 @@ class LaplaceTests(unittest.TestCase): m2['.*white'].constrain_fixed(1e-6) m2['.*rbf.variance'].constrain_bounded(1e-4, 10) m2.randomize() - + if debug: print m1 print m2 @@ -663,9 +663,8 @@ class LaplaceTests(unittest.TestCase): if debug: print m1 print m2 - - m2.parameters_changed() - #m2._set_params(m1._get_params()) + + m2[:] = m1[:] #Predict for training points to get posterior mean and variance post_mean, post_var, _, _ = m1.predict(X) @@ -701,8 +700,9 @@ class LaplaceTests(unittest.TestCase): np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2) #Check marginals are the same with random m1.randomize() - #m2._set_params(m1._get_params()) - m2.parameters_changed() + import ipdb;ipdb.set_trace() + m2[:] = m1[:] + np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2) #Check they are checkgradding From f7223ea3770e48f8be3373ba306e87a090929b96 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 10 Mar 2014 08:20:45 +0000 Subject: [PATCH 005/116] constant jitter --- .../latent_function_inference/exact_gaussian_inference.py | 2 +- GPy/inference/latent_function_inference/var_dtc.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index 922b52f4..cf74aa17 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -46,7 +46,7 @@ class ExactGaussianInference(object): alpha, _ = dpotrs(LW, YYT_factor, lower=1) log_marginal = 0.5*(-Y.size * log_2_pi - Y.shape[1] * W_logdet - np.sum(alpha * YYT_factor)) - + dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi) #TODO: does this really live here? diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 1d998fcb..0be8f74c 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -77,7 +77,8 @@ class VarDTC(object): num_inducing = Z.shape[0] num_data = Y.shape[0] # kernel computations, using BGPLVM notation - Kmm = kern.K(Z) + + Kmm = kern.K(Z) +np.eye(Z.shape[0]) * self.const_jitter Lm = jitchol(Kmm) From 546d5dfff3461723c5c16a1111879c54c37217dd Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 10 Mar 2014 08:21:13 +0000 Subject: [PATCH 006/116] all parameters in memory --- GPy/core/parameterization/array_core.py | 14 ++-- GPy/core/parameterization/param.py | 8 +-- GPy/core/parameterization/parameter_core.py | 60 +++++++++++------ GPy/core/parameterization/parameterized.py | 31 +++++---- GPy/core/parameterization/variational.py | 30 +++++++-- GPy/examples/dimensionality_reduction.py | 16 ++--- GPy/kern/_src/add.py | 71 ++++++++------------- GPy/kern/_src/kern.py | 2 +- GPy/kern/_src/static.py | 6 +- 9 files changed, 135 insertions(+), 103 deletions(-) diff --git a/GPy/core/parameterization/array_core.py b/GPy/core/parameterization/array_core.py index 27801e23..3850971a 100644 --- a/GPy/core/parameterization/array_core.py +++ b/GPy/core/parameterization/array_core.py @@ -6,6 +6,12 @@ __updated__ = '2013-12-16' import numpy as np from parameter_core import Observable +class _Array(np.ndarray): + def __init__(self, dtype=float, buffer=None, offset=0, + strides=None, order=None, *args, **kwargs): + super(_Array, self).__init__(dtype=dtype, buffer=buffer, offset=offset, + strides=strides, order=order, *args, **kwargs) + class ObservableArray(np.ndarray, Observable): """ An ndarray which reports changes to its observers. @@ -14,16 +20,14 @@ class ObservableArray(np.ndarray, Observable): takes exactly one argument, which is this array itself. """ __array_priority__ = -1 # Never give back ObservableArray - def __new__(cls, input_array): + def __new__(cls, input_array, *a, **kw): if not isinstance(input_array, ObservableArray): - obj = np.atleast_1d(input_array).view(cls) + obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['C', 'W'])).view(cls) else: obj = input_array cls.__name__ = "ObservableArray\n " + super(ObservableArray, obj).__init__(*a, **kw) return obj - def __init__(self, *a, **kw): - super(ObservableArray, self).__init__(*a, **kw) - def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 3ebeb566..a2dc9514 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -3,7 +3,7 @@ import itertools import numpy -from parameter_core import OptimizationHandlable, Gradcheckable, adjust_name_for_printing +from parameter_core import OptimizationHandlable, adjust_name_for_printing from array_core import ObservableArray ###### printing @@ -43,13 +43,12 @@ class Param(OptimizationHandlable, ObservableArray): _fixes_ = None _parameters_ = [] def __new__(cls, name, input_array, default_constraint=None): - obj = numpy.atleast_1d(super(Param, cls).__new__(cls, input_array=input_array)) + obj = numpy.atleast_1d(super(Param, cls).__new__(cls, input_array=input_array, name=name, default_constraint=default_constraint)) cls.__name__ = "Param" obj._current_slice_ = (slice(obj.shape[0]),) obj._realshape_ = obj.shape obj._realsize_ = obj.size obj._realndim_ = obj.ndim - obj._updated_ = False from lists_and_dicts import SetDict obj._tied_to_me_ = SetDict() obj._tied_to_ = [] @@ -86,7 +85,6 @@ class Param(OptimizationHandlable, ObservableArray): self._realshape_ = getattr(obj, '_realshape_', None) self._realsize_ = getattr(obj, '_realsize_', None) self._realndim_ = getattr(obj, '_realndim_', None) - self._updated_ = getattr(obj, '_updated_', None) self._original_ = getattr(obj, '_original_', None) self._name = getattr(obj, 'name', None) self._gradient_array_ = getattr(obj, '_gradient_array_', None) @@ -121,14 +119,12 @@ class Param(OptimizationHandlable, ObservableArray): self._realndim_, self._tied_to_me_, self._tied_to_, - self._updated_, ) ) def __setstate__(self, state): super(Param, self).__setstate__(state[0]) state = list(state[1]) - self._updated_ = state.pop() self._tied_to_ = state.pop() self._tied_to_me_ = state.pop() self._realndim_ = state.pop() diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index a78cf02d..3917ed09 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -40,11 +40,9 @@ class Observable(object): as an observer. Every time the observable changes, it sends a notification with self as only argument to all its observers. """ - _updated = True def __init__(self, *args, **kwargs): + super(Observable, self).__init__() self._observer_callables_ = [] - def __del__(self, *args, **kwargs): - del self._observer_callables_ def add_observer(self, observer, callble, priority=0): self._insert_sorted(priority, observer, callble) @@ -161,7 +159,9 @@ class Parentable(object): """ _parent_ = None _parent_index_ = None - + def __init__(self, *args, **kwargs): + super(Parentable, self).__init__() + def has_parent(self): """ Return whether this parentable object currently has a parent. @@ -205,6 +205,7 @@ class Gradcheckable(Parentable): """ def __init__(self, *a, **kw): super(Gradcheckable, self).__init__(*a, **kw) + def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3): """ Check the gradient of this parameter with respect to the highest parent's @@ -272,6 +273,9 @@ class Indexable(object): Enable enraveled indexes and offsets for this object. The raveled index of an object is the index for its parameters in a flattened int array. """ + def __init__(self, *a, **kw): + super(Indexable, self).__init__() + def _raveled_index(self): """ Flattened array of ints, specifying the index of this object. @@ -314,7 +318,7 @@ class Constrainable(Nameable, Indexable): :func:`constrain()` and :func:`unconstrain()` are main methods here """ def __init__(self, name, default_constraint=None, *a, **kw): - super(Constrainable, self).__init__(name=name, *a, **kw) + super(Constrainable, self).__init__(name=name, default_constraint=default_constraint, *a, **kw) self._default_constraint_ = default_constraint from index_operations import ParameterIndexOperations self.constraints = ParameterIndexOperations() @@ -534,8 +538,11 @@ class OptimizationHandlable(Constrainable, Observable): """ This enables optimization handles on an Object as done in GPy 0.4. - transformed: make sure the transformations and constraints etc are handled + `..._transformed`: make sure the transformations and constraints etc are handled """ + def __init__(self, name, default_constraint=None, *a, **kw): + super(OptimizationHandlable, self).__init__(name, default_constraint=default_constraint, *a, **kw) + def transform(self): [np.put(self._param_array_, ind, c.finv(self._param_array_[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] @@ -625,6 +632,24 @@ class OptimizationHandlable(Constrainable, Observable): [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None] self._set_params_transformed(x) # makes sure all of the tied parameters get the same init (since there's only one prior object...) + #=========================================================================== + # For shared memory arrays. This does nothing in Param, but sets the memory + # for all parameterized objects + #=========================================================================== + def _propagate_param_grad(self, parray, garray): + pi_old_size = 0 + for pi in self._parameters_: + pislice = slice(pi_old_size, pi_old_size+pi.size) + + self._param_array_[pislice] = pi._param_array_.ravel()#, requirements=['C', 'W']).flat + self._gradient_array_[pislice] = pi._gradient_array_.ravel()#, requirements=['C', 'W']).flat + + pi._param_array_.data = parray[pislice].data + pi._gradient_array_.data = garray[pislice].data + + pi._propagate_param_grad(parray[pislice], garray[pislice]) + pi_old_size += pi.size + class Parameterizable(OptimizationHandlable): def __init__(self, *args, **kwargs): super(Parameterizable, self).__init__(*args, **kwargs) @@ -811,25 +836,24 @@ class Parameterizable(OptimizationHandlable): p._parent_index_ = i pslice = slice(old_size, old_size+p.size) - pi_old_size = old_size - for pi in p.flattened_parameters: - pislice = slice(pi_old_size, pi_old_size+pi.size) - - self._param_array_[pislice] = pi._param_array_.flat - self._gradient_array_[pislice] = pi._gradient_array_.flat - - pi._param_array_.data = self._param_array_[pislice].data - pi._gradient_array_.data = self._gradient_array_[pislice].data - - pi_old_size += pi.size + # first connect all children + p._propagate_param_grad(self._param_array_[pslice], self._gradient_array_[pslice]) + + # then connect children to self + self._param_array_[pslice] = p._param_array_.ravel()#, requirements=['C', 'W']).ravel(order='C') + self._gradient_array_[pslice] = p._gradient_array_.ravel()#, requirements=['C', 'W']).ravel(order='C') + + if not p._param_array_.flags['C_CONTIGUOUS']: + import ipdb;ipdb.set_trace() p._param_array_.data = self._param_array_[pslice].data p._gradient_array_.data = self._gradient_array_[pslice].data self._param_slices_.append(pslice) + self._add_parameter_name(p, ignore_added_names=ignore_added_names) old_size += p.size - + #=========================================================================== # notification system #=========================================================================== diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 6d06018a..a98f0098 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -65,8 +65,8 @@ class Parameterized(Parameterizable, Pickleable): # **Never** call parameters_changed() yourself __metaclass__ = ParametersChangedMeta #=========================================================================== - def __init__(self, name=None, *a, **kw): - super(Parameterized, self).__init__(name=name, parent=None, parent_index=None, *a, **kw) + def __init__(self, name=None, parameters=[], *a, **kw): + super(Parameterized, self).__init__(name=name, *a, **kw) self._in_init_ = True self._parameters_ = ArrayList() self.size = sum(p.size for p in self._parameters_) @@ -76,6 +76,7 @@ class Parameterized(Parameterizable, Pickleable): self._param_slices_ = [] self._connect_parameters() del self._in_init_ + self.add_parameters(*parameters) def build_pydot(self, G=None): import pydot # @UnresolvedImport @@ -205,25 +206,29 @@ class Parameterized(Parameterizable, Pickleable): return found_params def __getitem__(self, name, paramlist=None): - if paramlist is None: - paramlist = self.grep_param_names(name) - if len(paramlist) < 1: raise AttributeError, name - if len(paramlist) == 1: - if isinstance(paramlist[-1], Parameterized): - paramlist = paramlist[-1].flattened_parameters - if len(paramlist) != 1: - return ParamConcatenation(paramlist) - return paramlist[-1] - return ParamConcatenation(paramlist) + if isinstance(name, (int, slice, tuple, np.ndarray)): + return self._param_array_[name] + else: + if paramlist is None: + paramlist = self.grep_param_names(name) + if len(paramlist) < 1: raise AttributeError, name + if len(paramlist) == 1: + if isinstance(paramlist[-1], Parameterized): + paramlist = paramlist[-1].flattened_parameters + if len(paramlist) != 1: + return ParamConcatenation(paramlist) + return paramlist[-1] + return ParamConcatenation(paramlist) def __setitem__(self, name, value, paramlist=None): if isinstance(name, (slice, tuple, np.ndarray)): self._param_array_[name] = value + self.notify_observers() else: try: param = self.__getitem__(name, paramlist) except AttributeError as a: raise a param[:] = value - + def __setattr__(self, name, val): # override the default behaviour, if setting a param, so broadcasting can by used if hasattr(self, '_parameters_'): diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index 71921ab1..87b291a7 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -21,7 +21,7 @@ class VariationalPrior(Parameterized): updates the gradients for mean and variance **in place** """ raise NotImplementedError, "override this for variational inference of latent space" - + class NormalPrior(VariationalPrior): def KL_divergence(self, variational_posterior): var_mean = np.square(variational_posterior.mean).sum() @@ -63,20 +63,38 @@ class SpikeAndSlabPrior(VariationalPrior): class VariationalPosterior(Parameterized): - def __init__(self, means=None, variances=None, name=None, **kw): - super(VariationalPosterior, self).__init__(name=name, **kw) + def __init__(self, means=None, variances=None, name=None, *a, **kw): + super(VariationalPosterior, self).__init__(name=name, *a, **kw) self.mean = Param("mean", means) + self.variance = Param("variance", variances, Logexp()) self.ndim = self.mean.ndim self.shape = self.mean.shape - self.variance = Param("variance", variances, Logexp()) - self.add_parameters(self.mean, self.variance) self.num_data, self.input_dim = self.mean.shape + self.add_parameters(self.mean, self.variance) if self.has_uncertain_inputs(): assert self.variance.shape == self.mean.shape, "need one variance per sample and dimenion" def has_uncertain_inputs(self): return not self.variance is None + def __getitem__(self, s): + if isinstance(s, (int, slice, tuple, list, np.ndarray)): + import copy + n = self.__new__(self.__class__, self.name) + dc = self.__dict__.copy() + dc['mean'] = self.mean[s] + dc['variance'] = self.variance[s] + dc['_parameters_'] = copy.copy(self._parameters_) + n.__dict__.update(dc) + n._parameters_[dc['mean']._parent_index_] = dc['mean'] + n._parameters_[dc['variance']._parent_index_] = dc['variance'] + n.ndim = n.mean.ndim + n.shape = n.mean.shape + n.num_data = n.mean.shape[0] + n.input_dim = n.mean.shape[1] if n.ndim != 1 else 1 + return n + else: + return super(VariationalPrior, self).__getitem__(s) class NormalPosterior(VariationalPosterior): ''' @@ -107,7 +125,7 @@ class SpikeAndSlabPosterior(VariationalPosterior): super(SpikeAndSlabPosterior, self).__init__(means, variances, name) self.gamma = Param("binary_prob",binary_prob, Logistic(1e-10,1.-1e-10)) self.add_parameter(self.gamma) - + def plot(self, *args): """ Plot latent space X in 1D: diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 9ebb54a2..0e440ab7 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -324,14 +324,14 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw): D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5 _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) - likelihood_list = [Gaussian(x, normalize=True) for x in Ylist] - - k = kern.Linear(Q, ARD=True) + kern.Bias(Q, _np.exp(-2)) + kern.White(Q, _np.exp(-2)) - m = MRD(likelihood_list, input_dim=Q, num_inducing=num_inducing, kernels=k, initx="", initz='permute', **kw) - m.ensure_default_constraints() - - for i, bgplvm in enumerate(m.bgplvms): - m['{}_noise'.format(i)] = bgplvm.likelihood.Y.var() / 500. + + #Ylist = [Ylist[0]] + k = [kern.Linear(Q, ARD=True) + kern.White(Q, 1e-4) for _ in range(len(Ylist))] + m = MRD(Ylist, input_dim=Q, num_inducing=num_inducing, kernel=k, initx="", initz='permute', **kw) + + m['.*noise'] = [Y.var()/500. for Y in Ylist] + #for i, Y in enumerate(Ylist): + # m['.*Y_{}.*Gaussian.*noise'.format(i)] = Y.var(1) / 500. if optimize: print "Optimizing Model:" diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 77fe057d..6498664a 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -1,12 +1,9 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import sys import numpy as np import itertools -from linear import Linear from ...core.parameterization import Parameterized -from ...core.parameterization.param import Param from kern import Kern class Add(Kern): @@ -42,8 +39,11 @@ class Add(Kern): else: return sum([p.K(X[:, i_s], X2[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]) - def update_gradients_full(self, dL_dK, X): - [p.update_gradients_full(dL_dK, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] + def update_gradients_full(self, dL_dK, X, X2=None): + if X2 is None: + [p.update_gradients_full(dL_dK, X[:,i_s], X2) for p, i_s in zip(self._parameters_, self.input_slices)] + else: + [p.update_gradients_full(dL_dK, X[:,i_s], X2[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. @@ -68,19 +68,18 @@ class Add(Kern): def psi0(self, Z, variational_posterior): - return np.sum([p.psi0(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)],0) + return np.sum([p.psi0(Z[:, i_s], variational_posterior[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)],0) def psi1(self, Z, variational_posterior): - return np.sum([p.psi1(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) + return np.sum([p.psi1(Z[:, i_s], variational_posterior[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) def psi2(self, Z, variational_posterior): - psi2 = np.sum([p.psi2(Z[:, i_s], mu[:, i_s], S[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) + psi2 = np.sum([p.psi2(Z[:, i_s], variational_posterior[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) # compute the "cross" terms - from white import White + from static import White, Bias from rbf import RBF #from rbf_inv import RBFInv - from bias import Bias from linear import Linear #ffrom fixed import Fixed @@ -91,24 +90,20 @@ class Add(Kern): # rbf X bias #elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)): elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)): - tmp = p2.psi1(Z[:,i2], mu[:,i2], S[:,i2]) + tmp = p2.psi1(Z[:,i2], variational_posterior[:, i_s]) psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :]) #elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)): elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)): - tmp = p1.psi1(Z[:,i1], mu[:,i1], S[:,i1]) + tmp = p1.psi1(Z[:,i1], variational_posterior[:, i_s]) psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :]) else: raise NotImplementedError, "psi2 cannot be computed for this kernel" return psi2 def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - from white import White - from rbf import RBF - #from rbf_inv import RBFInv - #from bias import Bias - from linear import Linear - #ffrom fixed import Fixed - + from static import White, Bias + mu, S = variational_posterior.mean, variational_posterior.variance + for p1, is1 in zip(self._parameters_, self.input_slices): #compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2! @@ -121,20 +116,15 @@ class Add(Kern): elif isinstance(p2, Bias): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], mu[:,is2], S[:,is2]) * 2. + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], variational_posterior[:, is1]) * 2. - p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], S[:,is1], Z[:,is1]) + p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z[:,is1], variational_posterior[:, is1]) def gradients_Z_expectations(self, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - from white import White - from rbf import RBF - #from rbf_inv import rbfinv - from bias import Bias - from linear import Linear - #ffrom fixed import fixed - + from static import White, Bias + target = np.zeros(Z.shape) for p1, is1 in zip(self._parameters_, self.input_slices): @@ -148,22 +138,17 @@ class Add(Kern): elif isinstance(p2, Bias): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], mu[:,is2], S[:,is2]) * 2. + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], variational_posterior[:, is2]) * 2. - target += p1.gradients_z_variational(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], S[:,is1], Z[:,is1]) + target += p1.gradients_Z_expectations(eff_dL_dpsi1, dL_dpsi2, Z[:,is1], variational_posterior[:, is1]) return target def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - from white import white - from rbf import rbf - #from rbf_inv import rbfinv - #from bias import bias - from linear import linear - #ffrom fixed import fixed - - target_mu = np.zeros(mu.shape) - target_S = np.zeros(S.shape) + from static import White, Bias + + target_mu = np.zeros(variational_posterior.shape) + target_S = np.zeros(variational_posterior.shape) for p1, is1 in zip(self._parameters_, self.input_slices): #compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2! @@ -171,15 +156,15 @@ class Add(Kern): for p2, is2 in zip(self._parameters_, self.input_slices): if p2 is p1: continue - if isinstance(p2, white): + if isinstance(p2, White): continue - elif isinstance(p2, bias): + elif isinstance(p2, Bias): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(z[:,is2], mu[:,is2], s[:,is2]) * 2. + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], variational_posterior[:, is2]) * 2. - a, b = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], s[:,is1], z[:,is1]) + a, b = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z[:,is1], variational_posterior[:, is1]) target_mu += a target_S += b return target_mu, target_S diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 47166156..e1106275 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -89,7 +89,7 @@ class Kern(Parameterized): """ Returns the sensitivity for each dimension of this kernel. """ - return self.kern.input_sensitivity() + return np.zeros(self.input_dim) def __add__(self, other): """ Overloading of the '+' operator. for more control, see self.add """ diff --git a/GPy/kern/_src/static.py b/GPy/kern/_src/static.py index 135e3f9e..f344357c 100644 --- a/GPy/kern/_src/static.py +++ b/GPy/kern/_src/static.py @@ -55,7 +55,7 @@ class White(Static): def psi2(self, Z, variational_posterior): return np.zeros((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) - def update_gradients_full(self, dL_dK, X): + def update_gradients_full(self, dL_dK, X, X2=None): self.variance.gradient = np.trace(dL_dK) def update_gradients_diag(self, dL_dKdiag, X): @@ -79,10 +79,10 @@ class Bias(Static): self.variance.gradient = dL_dK.sum() def update_gradients_diag(self, dL_dKdiag, X): - self.variance.gradient = dL_dK.sum() + self.variance.gradient = dL_dKdiag.sum() def psi2(self, Z, variational_posterior): - ret = np.empty((mu.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) + ret = np.empty((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) ret[:] = self.variance**2 return ret From 38b05e571cdb69f2e6096fc35ebd7ac7cf54f53b Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 10 Mar 2014 09:49:42 +0000 Subject: [PATCH 007/116] whitespaces --- GPy/core/parameterization/array_core.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/GPy/core/parameterization/array_core.py b/GPy/core/parameterization/array_core.py index 3850971a..93cf4a94 100644 --- a/GPy/core/parameterization/array_core.py +++ b/GPy/core/parameterization/array_core.py @@ -27,12 +27,12 @@ class ObservableArray(np.ndarray, Observable): cls.__name__ = "ObservableArray\n " super(ObservableArray, obj).__init__(*a, **kw) return obj - + def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return self._observer_callables_ = getattr(obj, '_observer_callables_', None) - + def __array_wrap__(self, out_arr, context=None): return out_arr.view(np.ndarray) @@ -54,10 +54,10 @@ class ObservableArray(np.ndarray, Observable): if self._s_not_empty(s): super(ObservableArray, self).__setitem__(s, val) self.notify_observers(self[s]) - + def __getslice__(self, start, stop): return self.__getitem__(slice(start, stop)) - + def __setslice__(self, start, stop, val): return self.__setitem__(slice(start, stop), val) @@ -89,7 +89,7 @@ class ObservableArray(np.ndarray, Observable): self.notify_observers() return r - + def __ifloordiv__(self, *args, **kwargs): r = np.ndarray.__ifloordiv__(self, *args, **kwargs) self.notify_observers() From 603733c6f7a0915761d0e1e1c92e77cc1d179e2c Mon Sep 17 00:00:00 2001 From: James Hensman Date: Mon, 10 Mar 2014 11:14:19 +0000 Subject: [PATCH 008/116] added update_gradints_diag to the add and base kernels --- GPy/kern/_src/add.py | 3 +++ GPy/kern/_src/kern.py | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 6498664a..a5ca9a59 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -45,6 +45,9 @@ class Add(Kern): else: [p.update_gradients_full(dL_dK, X[:,i_s], X2[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] + def update_gradients_diag(self, dL_dKdiag, X): + [p.update_gradients_diag(dL_dK, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] + def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index e1106275..b8e428dc 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -37,6 +37,10 @@ class Kern(Parameterized): def gradients_X_diag(self, dL_dK, X): raise NotImplementedError + def update_gradients_diag(self, dL_dKdiag, X): + """ update the gradients of all parameters when using only the diagonal elements of the covariance matrix""" + raise NotImplementedError + def update_gradients_full(self, dL_dK, X, X2): """Set the gradients of all parameters when doing full (N) inference.""" raise NotImplementedError From 73cb20afb04a95f6d4e68e30d6c93ce113d2ff87 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Mon, 10 Mar 2014 11:19:57 +0000 Subject: [PATCH 009/116] bugfix --- GPy/kern/_src/add.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index a5ca9a59..28250e06 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -46,7 +46,7 @@ class Add(Kern): [p.update_gradients_full(dL_dK, X[:,i_s], X2[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] def update_gradients_diag(self, dL_dKdiag, X): - [p.update_gradients_diag(dL_dK, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] + [p.update_gradients_diag(dL_dKdiag, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. From 7e9078b0f9f58a539a1153622b24874a235e21b6 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 10 Mar 2014 16:01:32 +0000 Subject: [PATCH 010/116] merged params here --- GPy/kern/_src/add.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 3514a224..604ed103 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -24,6 +24,9 @@ class Add(Kern): super(Add, self).__init__(input_dim, 'add') self.add_parameters(*subkerns) + @property + def parts(self): + return self._parameters_ def K(self, X, X2=None): """ @@ -107,8 +110,6 @@ class Add(Kern): def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): from static import White, Bias - mu, S = variational_posterior.mean, variational_posterior.variance - for p1, is1 in zip(self._parameters_, self.input_slices): #compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2! @@ -129,7 +130,6 @@ class Add(Kern): def gradients_Z_expectations(self, dL_dpsi1, dL_dpsi2, Z, variational_posterior): from static import White, Bias - target = np.zeros(Z.shape) for p1, is1 in zip(self._parameters_, self.input_slices): @@ -151,7 +151,6 @@ class Add(Kern): def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): from static import White, Bias - target_mu = np.zeros(variational_posterior.shape) target_S = np.zeros(variational_posterior.shape) for p1, is1 in zip(self._parameters_, self.input_slices): From 2d8246d33f779823ba4b5bf8060c855c888f5147 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Tue, 11 Mar 2014 10:24:15 +0000 Subject: [PATCH 011/116] Combination Kernel for add and prod --- GPy/kern/_src/add.py | 51 +++++++------------------- GPy/kern/_src/kern.py | 83 +++++++++++++++++++++++++++++-------------- GPy/kern/_src/prod.py | 51 +++++++++++++------------- 3 files changed, 94 insertions(+), 91 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 604ed103..433a8921 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -5,40 +5,19 @@ import numpy as np import itertools from ...core.parameterization import Parameterized from ...util.caching import Cache_this -from kern import Kern +from kern import CombinationKernel -class Add(Kern): - def __init__(self, subkerns, tensor): - assert all([isinstance(k, Kern) for k in subkerns]) - if tensor: - input_dim = sum([k.input_dim for k in subkerns]) - self.input_slices = [] - n = 0 - for k in subkerns: - self.input_slices.append(slice(n, n+k.input_dim)) - n += k.input_dim - else: - assert all([k.input_dim == subkerns[0].input_dim for k in subkerns]) - input_dim = subkerns[0].input_dim - self.input_slices = [slice(None) for k in subkerns] - super(Add, self).__init__(input_dim, 'add') - self.add_parameters(*subkerns) +class Add(CombinationKernel): + """ + Add given list of kernels together. + propagates gradients thorugh. + """ + def __init__(self, subkerns, name='add'): + super(Add, self).__init__(subkerns, name) - @property - def parts(self): - return self._parameters_ - - def K(self, X, X2=None): - """ - Compute the kernel function. - - :param X: the first set of inputs to the kernel - :param X2: (optional) the second set of arguments to the kernel. If X2 - is None, this is passed throgh to the 'part' object, which - handLes this as X2 == X. - """ + @Cache_this(limit=2, force_kwargs=['which_parts']) + def K(self, X, X2=None, which_parts=None): assert X.shape[1] == self.input_dim - which_parts=None if which_parts is None: which_parts = self.parts elif not isinstance(which_parts, (list, tuple)): @@ -46,12 +25,6 @@ class Add(Kern): which_parts = [which_parts] return sum([p.K(X, X2) for p in which_parts]) - def update_gradients_full(self, dL_dK, X, X2=None): - [p.update_gradients_full(dL_dK, X, X2) for p in self.parts] - - def update_gradients_diag(self, dL_dK, X): - [p.update_gradients_diag(dL_dK, X) for p in self.parts] - def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. @@ -67,8 +40,8 @@ class Add(Kern): target[:, p.active_dims] += p.gradients_X(dL_dK, X, X2) return target - def Kdiag(self, X): - which_parts=None + @Cache_this(limit=2, force_kwargs=['which_parts']) + def Kdiag(self, X, which_parts=None): assert X.shape[1] == self.input_dim if which_parts is None: which_parts = self.parts diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 96bab646..a1106241 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -2,6 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import sys +import numpy as np from ...core.parameterization.parameterized import ParametersChangedMeta, Parameterized from ...util.caching import Cache_this @@ -14,8 +15,11 @@ class KernCallsViaSlicerMeta(ParametersChangedMeta): instance.update_gradients_diag = instance._slice_wrapper(instance.update_gradients_diag, True, True) instance.gradients_X = instance._slice_wrapper(instance.gradients_X, False, True) instance.gradients_X_diag = instance._slice_wrapper(instance.gradients_X_diag, True, True) + instance.psi0 = instance._slice_wrapper(instance.psi0, False, False) + instance.psi1 = instance._slice_wrapper(instance.psi1, False, False) + instance.psi2 = instance._slice_wrapper(instance.psi2, False, False) return instance - + class Kern(Parameterized): __metaclass__ = KernCallsViaSlicerMeta def __init__(self, input_dim, name, *a, **kw): @@ -37,11 +41,11 @@ class Kern(Parameterized): self.input_dim = len(self.active_dims) self._sliced_X = False self._sliced_X2 = False - - @Cache_this(limit=10, ignore_args = (0,)) + + @Cache_this(limit=10)#, ignore_args = (0,)) def _slice_X(self, X): return X[:, self.active_dims] - + def _slice_wrapper(self, operation, diag=False, derivative=False): """ This method wraps the functions in kernel to make sure all kernels allways see their respective input dimension. @@ -56,7 +60,8 @@ class Kern(Parameterized): self._sliced_X = True try: ret = operation(dL_dK, X, *args, **kw) - except: raise + except: + raise finally: self._sliced_X = False return ret @@ -67,7 +72,8 @@ class Kern(Parameterized): self._sliced_X2 = True try: ret = operation(dL_dK, X, X2, *args, **kw) - except: raise + except: + raise finally: self._sliced_X = False self._sliced_X2 = False @@ -79,7 +85,8 @@ class Kern(Parameterized): self._sliced_X = True try: ret = operation(X, *args, **kw) - except: raise + except: + raise finally: self._sliced_X = False return ret @@ -100,10 +107,18 @@ class Kern(Parameterized): +(","+str(bool(diag)) if diag else'') +(','+str(bool(derivative)) if derivative else '') +')') - x_slice_wrapper.__doc__ = "**sliced**\n\n" + (operation.__doc__ or "") + x_slice_wrapper.__doc__ = "**sliced**\n" + (operation.__doc__ or "") return x_slice_wrapper def K(self, X, X2): + """ + Compute the kernel function. + + :param X: the first set of inputs to the kernel + :param X2: (optional) the second set of arguments to the kernel. If X2 + is None, this is passed throgh to the 'part' object, which + handLes this as X2 == X. + """ raise NotImplementedError def Kdiag(self, X): raise NotImplementedError @@ -179,17 +194,10 @@ class Kern(Parameterized): """ Overloading of the '+' operator. for more control, see self.add """ return self.add(other) - def add(self, other, tensor=False): + def add(self, other, name='add'): """ Add another kernel to this one. - If Tensor is False, both kernels are defined on the same _space_. then - the created kernel will have the same number of inputs as self and - other (which must be the same). - - If Tensor is True, then the dimensions are stacked 'horizontally', so - that the resulting kernel has self.input_dim + other.input_dim - :param other: the other kernel to be added :type other: GPy.kern @@ -197,23 +205,23 @@ class Kern(Parameterized): assert isinstance(other, Kern), "only kernels can be added to kernels..." from add import Add kernels = [] - if not tensor and isinstance(self, Add): kernels.extend(self._parameters_) + if isinstance(self, Add): kernels.extend(self._parameters_) else: kernels.append(self) - if not tensor and isinstance(other, Add): kernels.extend(other._parameters_) + if isinstance(other, Add): kernels.extend(other._parameters_) else: kernels.append(other) - return Add(kernels, tensor) + return Add(kernels, name=name) def __mul__(self, other): """ Here we overload the '*' operator. See self.prod for more information""" return self.prod(other) - def __pow__(self, other): - """ - Shortcut for tensor `prod`. - """ - return self.prod(other, tensor=True) + #def __pow__(self, other): + # """ + # Shortcut for tensor `prod`. + # """ + # return self.prod(other, tensor=True) - def prod(self, other, tensor=False, name=None): + def prod(self, other, name=None): """ Multiply two kernels (either on the same space, or on the tensor product of the input space). @@ -226,4 +234,27 @@ class Kern(Parameterized): """ assert isinstance(other, Kern), "only kernels can be added to kernels..." from prod import Prod - return Prod(self, other, tensor, name) + kernels = [] + if isinstance(self, Prod): kernels.extend(self._parameters_) + else: kernels.append(self) + if isinstance(other, Prod): kernels.extend(other._parameters_) + else: kernels.append(other) + return Prod(self, other, name) + + +class CombinationKernel(Kern): + def __init__(self, kernels, name): + assert all([isinstance(k, Kern) for k in kernels]) + input_dim = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels)) + super(CombinationKernel, self).__init__(input_dim, name) + self.add_parameters(*kernels) + + @property + def parts(self): + return self._parameters_ + + def update_gradients_full(self, dL_dK, X, X2=None): + [p.update_gradients_full(dL_dK, X, X2) for p in self.parts] + + def update_gradients_diag(self, dL_dK, X): + [p.update_gradients_diag(dL_dK, X) for p in self.parts] diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index 51490687..77b2ea51 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -1,10 +1,12 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern import numpy as np +from kern import CombinationKernel +from ...util.caching import Cache_this +import itertools -class Prod(Kern): +class Prod(CombinationKernel): """ Computes the product of 2 kernels @@ -15,34 +17,31 @@ class Prod(Kern): :rtype: kernel object """ - def __init__(self, k1, k2, tensor=False,name=None): - if tensor: - name = k1.name + '_xx_' + k2.name if name is None else name - super(Prod, self).__init__(k1.input_dim + k2.input_dim, name) - self.slice1 = slice(0,k1.input_dim) - self.slice2 = slice(k1.input_dim,k1.input_dim+k2.input_dim) - else: - assert k1.input_dim == k2.input_dim, "Error: The input spaces of the kernels to multiply don't have the same dimension." - name = k1.name + '_x_' + k2.name if name is None else name - super(Prod, self).__init__(k1.input_dim, name) - self.slice1 = slice(0, self.input_dim) - self.slice2 = slice(0, self.input_dim) - self.k1 = k1 - self.k2 = k2 - self.add_parameters(self.k1, self.k2) + def __init__(self, kernels, name='prod'): + super(Prod, self).__init__(kernels, name) - def K(self, X, X2=None): - if X2 is None: - return self.k1.K(X[:,self.slice1], None) * self.k2.K(X[:,self.slice2], None) - else: - return self.k1.K(X[:,self.slice1], X2[:,self.slice1]) * self.k2.K(X[:,self.slice2], X2[:,self.slice2]) + @Cache_this(limit=2, force_kwargs=['which_parts']) + def K(self, X, X2=None, which_parts=None): + assert X.shape[1] == self.input_dim + if which_parts is None: + which_parts = self.parts + elif not isinstance(which_parts, (list, tuple)): + # if only one part is given + which_parts = [which_parts] + return reduce(np.multiply, (p.K(X, X2) for p in which_parts)) - def Kdiag(self, X): - return self.k1.Kdiag(X[:,self.slice1]) * self.k2.Kdiag(X[:,self.slice2]) + @Cache_this(limit=2, force_kwargs=['which_parts']) + def Kdiag(self, X, which_parts=None): + assert X.shape[1] == self.input_dim + if which_parts is None: + which_parts = self.parts + return reduce(np.multiply, (p.Kdiag(X) for p in which_parts)) def update_gradients_full(self, dL_dK, X): - self.k1.update_gradients_full(dL_dK*self.k2.K(X[:,self.slice2]), X[:,self.slice1]) - self.k2.update_gradients_full(dL_dK*self.k1.K(X[:,self.slice1]), X[:,self.slice2]) + for k1,k2 in itertools.combinations(self.parts, 2): + k1._sliced_X = k1._sliced_X2 = k2._sliced_X = k2._sliced_X2 = True + k1.update_gradients_full(dL_dK*k2.K(X, X) + self.k2.update_gradients_full(dL_dK*self.k1.K(X[:,self.slice1]), X[:,self.slice2]) def gradients_X(self, dL_dK, X, X2=None): target = np.zeros(X.shape) From 81d35686d987d45df7bbc9ccd1f292d5e419689e Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Tue, 11 Mar 2014 10:24:30 +0000 Subject: [PATCH 012/116] slicing tests and ipdb delete --- GPy/testing/kernel_tests.py | 30 +++++++++++++++++++++++++----- GPy/testing/likelihood_tests.py | 6 +++--- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index d373a546..2789d1de 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -6,7 +6,9 @@ import numpy as np import GPy import sys -verbose = True +verbose = 0 + + class Kern_check_model(GPy.core.Model): """ @@ -91,7 +93,7 @@ class Kern_check_dKdiag_dX(Kern_check_dK_dX): -def kern_test(kern, X=None, X2=None, output_ind=None, verbose=False): +def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verbose=False): """ This function runs on kernels to check the correctness of their implementation. It checks that the covariance function is positive definite @@ -210,7 +212,7 @@ def kern_test(kern, X=None, X2=None, output_ind=None, verbose=False): -class KernelTestsContinuous(unittest.TestCase): +class KernelGradientTestsContinuous(unittest.TestCase): def setUp(self): self.X = np.random.randn(100,2) self.X2 = np.random.randn(110,2) @@ -220,16 +222,34 @@ class KernelTestsContinuous(unittest.TestCase): def test_Matern32(self): k = GPy.kern.Matern32(2) - self.assertTrue(kern_test(k, X=self.X, X2=self.X2, verbose=verbose)) + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) def test_Matern52(self): k = GPy.kern.Matern52(2) - self.assertTrue(kern_test(k, X=self.X, X2=self.X2, verbose=verbose)) + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) #TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize +class KernelTestsMiscellaneous(unittest.TestCase): + def setUp(self): + N, D = 100, 10 + self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D) + self.rbf = GPy.kern.RBF(range(2)) + self.linear = GPy.kern.Linear((3,5,6)) + self.matern = GPy.kern.Matern32(np.array([2,4,7])) + self.sumkern = self.rbf + self.linear + self.sumkern += self.matern + self.sumkern.randomize() + + def test_active_dims(self): + self.assertListEqual(self.sumkern.active_dims.tolist(), range(8)) + + def test_which_parts(self): + self.assertTrue(np.allclose(self.sumkern.K(self.X, which_parts=[self.linear, self.matern]), self.linear.K(self.X)+self.matern.K(self.X))) + self.assertTrue(np.allclose(self.sumkern.K(self.X, which_parts=[self.linear, self.rbf]), self.linear.K(self.X)+self.rbf.K(self.X))) + self.assertTrue(np.allclose(self.sumkern.K(self.X, which_parts=self.sumkern.parts[0]), self.rbf.K(self.X))) if __name__ == "__main__": print "Running unit tests, please be (very) patient..." diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index 631f2ec2..c71842d8 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -651,7 +651,7 @@ class LaplaceTests(unittest.TestCase): m2['.*white'].constrain_fixed(1e-6) m2['.*rbf.variance'].constrain_bounded(1e-4, 10) m2.randomize() - + if debug: print m1 print m2 @@ -663,7 +663,7 @@ class LaplaceTests(unittest.TestCase): if debug: print m1 print m2 - + m2[:] = m1[:] #Predict for training points to get posterior mean and variance @@ -702,7 +702,7 @@ class LaplaceTests(unittest.TestCase): m1.randomize() import ipdb;ipdb.set_trace() m2[:] = m1[:] - + np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2) #Check they are checkgradding From 3e91ea497d1214bd1ee04612962e6809e5d4814c Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Tue, 11 Mar 2014 10:24:51 +0000 Subject: [PATCH 013/116] caching doc --- GPy/util/caching.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/util/caching.py b/GPy/util/caching.py index 0b6f7234..5de03059 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -109,5 +109,5 @@ class Cache_this(object): self.c = Cacher(f, self.limit, ignore_args=self.ignore_args, force_kwargs=self.force_args) return self.c(*args, **kw) f_wrap._cacher = self - f_wrap.__doc__ = "**cached**\n\n" + (f.__doc__ or "") + f_wrap.__doc__ = "**cached**" + (f.__doc__ or "") return f_wrap From 10608a45656ad61aa34ecd3197c716c11640cb67 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Tue, 11 Mar 2014 10:25:21 +0000 Subject: [PATCH 014/116] empty spaces --- GPy/models/sparse_gp_regression.py | 4 ++-- GPy/plotting/matplot_dep/models_plots.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/models/sparse_gp_regression.py b/GPy/models/sparse_gp_regression.py index 99176601..7edb93e4 100644 --- a/GPy/models/sparse_gp_regression.py +++ b/GPy/models/sparse_gp_regression.py @@ -45,10 +45,10 @@ class SparseGPRegression(SparseGP): assert Z.shape[1] == input_dim likelihood = likelihoods.Gaussian() - + if not (X_variance is None): X = NormalPosterior(X,X_variance) - + SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method=VarDTC()) def _getstate(self): diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 4ca4441e..86777527 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -56,7 +56,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) - + if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs(): X = model.X.mean X_variance = param_to_array(model.X.variance) From 85a471e0f6340dadbd4fe9002ee7d82b5dc07ef0 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Tue, 11 Mar 2014 16:22:45 +0000 Subject: [PATCH 015/116] oh huge bug in checkgrad global --- GPy/core/model.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index a858a62d..6a6fe1ba 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -253,7 +253,7 @@ class Model(Parameterized): sgd.run() self.optimization_runs.append(sgd) - def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3): + def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, _debug=False): """ Check the gradient of the ,odel by comparing to a numerical estimate. If the verbose flag is passed, invividual @@ -271,7 +271,7 @@ class Model(Parameterized): and numerical gradients is within of unity. """ x = self._get_params_transformed().copy() - + if not verbose: # make sure only to test the selected parameters if target_param is None: @@ -298,12 +298,12 @@ class Model(Parameterized): dx = dx[transformed_index] gradient = gradient[transformed_index] - + denominator = (2 * np.dot(dx, gradient)) global_ratio = (f1 - f2) / np.where(denominator==0., 1e-32, denominator) gloabl_diff = (f1 - f2) - denominator - - return (np.abs(1. - global_ratio) < tolerance) or (np.abs(gloabl_diff) < tolerance) + + return (np.abs(1. - global_ratio) < tolerance) or (np.abs(gloabl_diff) == 0) else: # check the gradient of each parameter individually, and do some pretty printing try: @@ -349,6 +349,8 @@ class Model(Parameterized): xx[xind] -= 2.*step f2 = self.objective_function(xx) numerical_gradient = (f1 - f2) / (2 * step) + if _debug: + self.gradient[xind] = numerical_gradient if np.all(gradient[xind]==0): ratio = (f1-f2) == gradient[xind] else: ratio = (f1 - f2) / (2 * step * gradient[xind]) difference = np.abs((f1 - f2) / 2 / step - gradient[xind]) @@ -366,7 +368,7 @@ class Model(Parameterized): ng = '%.6f' % float(numerical_gradient) grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}".format(formatted_name, r, d, g, ng, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4]) print grad_string - + self._set_params_transformed(x) return ret From 74999a89ad37bc55821fc12ce786400acc5f722f Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Tue, 11 Mar 2014 16:23:29 +0000 Subject: [PATCH 016/116] gradient check --- GPy/core/parameterization/param.py | 4 +-- GPy/core/parameterization/parameter_core.py | 39 +++++++++++---------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index a2dc9514..8eb10608 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -446,8 +446,8 @@ class ParamConcatenation(object): def untie(self, *ties): [param.untie(*ties) for param in self.params] - def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3): - return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance) + def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): + return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance, _debug=_debug) #checkgrad.__doc__ = Gradcheckable.checkgrad.__doc__ __lt__ = lambda self, val: self._vals() < val diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 38fe0526..5727bc17 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -16,7 +16,7 @@ Observable Pattern for patameterization from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED import numpy as np -__updated__ = '2013-12-16' +__updated__ = '2014-03-11' class HierarchyError(Exception): """ @@ -34,7 +34,7 @@ def adjust_name_for_printing(name): class Observable(object): """ Observable pattern for parameterization. - + This Object allows for observers to register with self and a (bound!) function as an observer. Every time the observable changes, it sends a notification with self as only argument to all its observers. @@ -43,10 +43,10 @@ class Observable(object): def __init__(self, *args, **kwargs): super(Observable, self).__init__(*args, **kwargs) self._observer_callables_ = [] - + def add_observer(self, observer, callble, priority=0): self._insert_sorted(priority, observer, callble) - + def remove_observer(self, observer, callble=None): to_remove = [] for p, obs, clble in self._observer_callables_: @@ -58,15 +58,15 @@ class Observable(object): to_remove.append((p, obs, clble)) for r in to_remove: self._observer_callables_.remove(r) - + def notify_observers(self, which=None, min_priority=None): """ Notifies all observers. Which is the element, which kicked off this notification loop. - + NOTE: notifies only observers with priority p > min_priority! ^^^^^^^^^^^^^^^^ - + :param which: object, which started this notification loop :param min_priority: only notify observers with priority > min_priority if min_priority is None, notify all observers in order @@ -88,11 +88,11 @@ class Observable(object): break ins += 1 self._observer_callables_.insert(ins, (p, o, c)) - + class Pickleable(object): """ Make an object pickleable (See python doc 'pickling'). - + This class allows for pickling support by Memento pattern. _getstate returns a memento of the class, which gets pickled. _setstate() (re-)sets the state of the class to the memento @@ -153,7 +153,7 @@ class Pickleable(object): class Parentable(object): """ Enable an Object to have a parent. - + Additionally this adds the parent_index, which is the index for the parent to look for in its parameter list. """ @@ -161,7 +161,7 @@ class Parentable(object): _parent_index_ = None def __init__(self, *args, **kwargs): super(Parentable, self).__init__(*args, **kwargs) - + def has_parent(self): """ Return whether this parentable object currently has a parent. @@ -205,8 +205,8 @@ class Gradcheckable(Parentable): """ def __init__(self, *a, **kw): super(Gradcheckable, self).__init__(*a, **kw) - - def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3): + + def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): """ Check the gradient of this parameter with respect to the highest parent's objective function. @@ -214,20 +214,21 @@ class Gradcheckable(Parentable): with a stepsize step. The check passes if either the ratio or the difference between numerical and analytical gradient is smaller then tolerance. - + :param bool verbose: whether each parameter shall be checked individually. :param float step: the stepsize for the numerical three point gradient estimate. :param flaot tolerance: the tolerance for the gradient ratio or difference. """ if self.has_parent(): - return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance) - return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance) - def _checkgrad(self, param): + return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance, _debug=_debug) + return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance, _debug=_debug) + + def _checkgrad(self, param, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): """ Perform the checkgrad on the model. TODO: this can be done more efficiently, when doing it inside here """ - raise NotImplementedError, "Need log likelihood to check gradient against" + raise HierarchyError, "This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!" class Nameable(Gradcheckable): @@ -258,7 +259,7 @@ class Nameable(Gradcheckable): def hierarchy_name(self, adjust_for_printing=True): """ return the name for this object with the parents names attached by dots. - + :param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()` on the names, recursively """ From e078bb47e10f97519805f363416eae0281ab6c20 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Tue, 11 Mar 2014 16:23:51 +0000 Subject: [PATCH 017/116] psi_stat_expectaions now working with new parameterized --- GPy/testing/psi_stat_expectation_tests.py | 57 ++++++----------------- 1 file changed, 13 insertions(+), 44 deletions(-) diff --git a/GPy/testing/psi_stat_expectation_tests.py b/GPy/testing/psi_stat_expectation_tests.py index aec0d36d..075800f6 100644 --- a/GPy/testing/psi_stat_expectation_tests.py +++ b/GPy/testing/psi_stat_expectation_tests.py @@ -12,6 +12,7 @@ import numpy from GPy.kern import RBF from GPy.kern import Linear from copy import deepcopy +from GPy.core.parameterization.variational import NormalPosterior __test__ = lambda: 'deep' in sys.argv # np.random.seed(0) @@ -28,53 +29,20 @@ def ard(p): class Test(unittest.TestCase): input_dim = 9 num_inducing = 13 - N = 300 + N = 1000 Nsamples = 1e6 def setUp(self): - i_s_dim_list = [2,4,3] - indices = numpy.cumsum(i_s_dim_list).tolist() - input_slices = [slice(a,b) for a,b in zip([None]+indices, indices)] - #input_slices[2] = deepcopy(input_slices[1]) - input_slice_kern = GPy.kern.kern(9, - [ - RBF(i_s_dim_list[0], np.random.rand(), np.random.rand(i_s_dim_list[0]), ARD=True), - RBF(i_s_dim_list[1], np.random.rand(), np.random.rand(i_s_dim_list[1]), ARD=True), - Linear(i_s_dim_list[2], np.random.rand(i_s_dim_list[2]), ARD=True) - ], - input_slices = input_slices - ) self.kerns = ( -# input_slice_kern, -# (GPy.kern.rbf(self.input_dim, ARD=True) + -# GPy.kern.linear(self.input_dim, ARD=True) + -# GPy.kern.bias(self.input_dim) + -# GPy.kern.white(self.input_dim)), - (#GPy.kern.rbf(self.input_dim, np.random.rand(), np.random.rand(self.input_dim), ARD=True) - GPy.kern.Linear(self.input_dim, np.random.rand(self.input_dim), ARD=True) - +GPy.kern.RBF(self.input_dim, np.random.rand(), np.random.rand(self.input_dim), ARD=True) -# +GPy.kern.bias(self.input_dim) -# +GPy.kern.white(self.input_dim)), - ), -# (GPy.kern.rbf(self.input_dim, np.random.rand(), np.random.rand(self.input_dim), ARD=True) + -# GPy.kern.bias(self.input_dim, np.random.rand())), -# (GPy.kern.rbf(self.input_dim, np.random.rand(), np.random.rand(self.input_dim), ARD=True) -# +GPy.kern.rbf(self.input_dim, np.random.rand(), np.random.rand(self.input_dim), ARD=True) -# #+GPy.kern.bias(self.input_dim, np.random.rand()) -# #+GPy.kern.white(self.input_dim, np.random.rand())), -# ), -# GPy.kern.white(self.input_dim, np.random.rand())), -# GPy.kern.rbf(self.input_dim), GPy.kern.rbf(self.input_dim, ARD=True), -# GPy.kern.linear(self.input_dim, ARD=False), GPy.kern.linear(self.input_dim, ARD=True), -# GPy.kern.linear(self.input_dim) + GPy.kern.bias(self.input_dim), -# GPy.kern.rbf(self.input_dim) + GPy.kern.bias(self.input_dim), -# GPy.kern.linear(self.input_dim) + GPy.kern.bias(self.input_dim) + GPy.kern.white(self.input_dim), -# GPy.kern.rbf(self.input_dim) + GPy.kern.bias(self.input_dim) + GPy.kern.white(self.input_dim), -# GPy.kern.bias(self.input_dim), GPy.kern.white(self.input_dim), + GPy.kern.RBF(self.input_dim, ARD=True)+GPy.kern.Bias(self.input_dim)+GPy.kern.White(self.input_dim), + GPy.kern.RBF(self.input_dim)+GPy.kern.Bias(self.input_dim)+GPy.kern.White(self.input_dim), + GPy.kern.Linear(self.input_dim) + GPy.kern.Bias(self.input_dim) + GPy.kern.White(self.input_dim), + GPy.kern.Linear(self.input_dim, ARD=True) + GPy.kern.Bias(self.input_dim) + GPy.kern.White(self.input_dim), ) - self.q_x_mean = np.random.randn(self.input_dim) - self.q_x_variance = np.exp(np.random.randn(self.input_dim)) + self.q_x_mean = np.random.randn(self.input_dim)[None] + self.q_x_variance = np.exp(.5*np.random.randn(self.input_dim))[None] self.q_x_samples = np.random.randn(self.Nsamples, self.input_dim) * np.sqrt(self.q_x_variance) + self.q_x_mean + self.q_x = NormalPosterior(self.q_x_mean, self.q_x_variance) self.Z = np.random.randn(self.num_inducing, self.input_dim) self.q_x_mean.shape = (1, self.input_dim) self.q_x_variance.shape = (1, self.input_dim) @@ -114,8 +82,9 @@ class Test(unittest.TestCase): def test_psi2(self): for kern in self.kerns: + kern.randomize() Nsamples = int(np.floor(self.Nsamples/self.N)) - psi2 = kern.psi2(self.Z, self.q_x_mean, self.q_x_variance) + psi2 = kern.psi2(self.Z, self.q_x) K_ = np.zeros((self.num_inducing, self.num_inducing)) diffs = [] for i, q_x_sample_stripe in enumerate(np.array_split(self.q_x_samples, self.Nsamples / Nsamples)): @@ -130,8 +99,8 @@ class Test(unittest.TestCase): pylab.figure(msg) pylab.plot(diffs, marker='x', mew=.2) # print msg, np.allclose(psi2.squeeze(), K_, rtol=1e-1, atol=.1) - self.assertTrue(np.allclose(psi2.squeeze(), K_), - #rtol=1e-1, atol=.1), + self.assertTrue(np.allclose(psi2.squeeze(), K_, + atol=.1, rtol=1), msg=msg + ": not matching") # sys.stdout.write(".") except: From 01f5d789c5999de7df8818ce659c2c0ea0a633fb Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Tue, 11 Mar 2014 16:24:09 +0000 Subject: [PATCH 018/116] automatic slicing --- GPy/kern/_src/add.py | 78 ++++++++++++------------------ GPy/kern/_src/kern.py | 107 +++++++++++------------------------------- GPy/kern/_src/rbf.py | 31 ++++++------ 3 files changed, 72 insertions(+), 144 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 433a8921..8a3cefaf 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -23,7 +23,7 @@ class Add(CombinationKernel): elif not isinstance(which_parts, (list, tuple)): # if only one part is given which_parts = [which_parts] - return sum([p.K(X, X2) for p in which_parts]) + return reduce(np.add, (p.K(X, X2) for p in which_parts)) def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. @@ -49,14 +49,14 @@ class Add(CombinationKernel): def psi0(self, Z, variational_posterior): - return np.sum([p.psi0(Z[:, i_s], variational_posterior[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)],0) + return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts)) def psi1(self, Z, variational_posterior): - return np.sum([p.psi1(Z[:, i_s], variational_posterior[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) + return reduce(np.add, (p.psi1(Z, variational_posterior) for p in self.parts)) def psi2(self, Z, variational_posterior): - psi2 = np.sum([p.psi2(Z[:, i_s], variational_posterior[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)], 0) - + psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts)) + return psi2 # compute the "cross" terms from static import White, Bias from rbf import RBF @@ -64,18 +64,23 @@ class Add(CombinationKernel): from linear import Linear #ffrom fixed import Fixed - for (p1, i1), (p2, i2) in itertools.combinations(itertools.izip(self._parameters_, self.input_slices), 2): + for p1, p2 in itertools.combinations(self.parts, 2): + i1, i2 = p1.active_dims, p2.active_dims # white doesn;t combine with anything if isinstance(p1, White) or isinstance(p2, White): pass # rbf X bias #elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)): elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)): - tmp = p2.psi1(Z[:,i2], variational_posterior[:, i_s]) + # manual override for slicing: + p2._sliced_X = p1._sliced_X = True + tmp = p2.psi1(Z[:,i2], variational_posterior[:, i1]) psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :]) #elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)): elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)): - tmp = p1.psi1(Z[:,i1], variational_posterior[:, i_s]) + # manual override for slicing: + p2._sliced_X = p1._sliced_X = True + tmp = p1.psi1(Z[:,i1], variational_posterior[:, i2]) psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :]) else: raise NotImplementedError, "psi2 cannot be computed for this kernel" @@ -83,11 +88,10 @@ class Add(CombinationKernel): def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): from static import White, Bias - for p1, is1 in zip(self._parameters_, self.input_slices): - + for p1 in self.parts: #compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2! eff_dL_dpsi1 = dL_dpsi1.copy() - for p2, is2 in zip(self._parameters_, self.input_slices): + for p2 in self.parts: if p2 is p1: continue if isinstance(p2, White): @@ -95,42 +99,35 @@ class Add(CombinationKernel): elif isinstance(p2, Bias): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], variational_posterior[:, is1]) * 2. - - - p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z[:,is1], variational_posterior[:, is1]) - + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2. + p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) def gradients_Z_expectations(self, dL_dpsi1, dL_dpsi2, Z, variational_posterior): from static import White, Bias target = np.zeros(Z.shape) - for p1, is1 in zip(self._parameters_, self.input_slices): - + for p1 in self.parts: #compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2! eff_dL_dpsi1 = dL_dpsi1.copy() - for p2, is2 in zip(self._parameters_, self.input_slices): + for p2 in self.parts: if p2 is p1: continue if isinstance(p2, White): continue elif isinstance(p2, Bias): - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. + eff_dL_dpsi1 += 0#dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], variational_posterior[:, is2]) * 2. - - - target += p1.gradients_Z_expectations(eff_dL_dpsi1, dL_dpsi2, Z[:,is1], variational_posterior[:, is1]) + eff_dL_dpsi1 += 0#dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2. + target[:, p1.active_dims] += p1.gradients_Z_expectations(eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) return target def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): from static import White, Bias target_mu = np.zeros(variational_posterior.shape) target_S = np.zeros(variational_posterior.shape) - for p1, is1 in zip(self._parameters_, self.input_slices): - + for p1 in self._parameters_: #compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2! eff_dL_dpsi1 = dL_dpsi1.copy() - for p2, is2 in zip(self._parameters_, self.input_slices): + for p2 in self._parameters_: if p2 is p1: continue if isinstance(p2, White): @@ -138,35 +135,20 @@ class Add(CombinationKernel): elif isinstance(p2, Bias): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], variational_posterior[:, is2]) * 2. - - - a, b = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z[:,is1], variational_posterior[:, is1]) - target_mu += a - target_S += b + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2. + a, b = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) + target_mu[:, p1.active_dims] += a + target_S[:, p1.active_dims] += b return target_mu, target_S - def input_sensitivity(self): - in_sen = np.zeros((self.num_params, self.input_dim)) - for i, [p, i_s] in enumerate(zip(self._parameters_, self.input_slices)): - in_sen[i, i_s] = p.input_sensitivity() - return in_sen - def _getstate(self): """ Get the current state of the class, here just all the indices, rest can get recomputed """ - return Parameterized._getstate(self) + [#self._parameters_, - self.input_dim, - self.input_slices, - self._param_slices_ - ] + return super(Add, self)._getstate() def _setstate(self, state): - self._param_slices_ = state.pop() - self.input_slices = state.pop() - self.input_dim = state.pop() - Parameterized._setstate(self, state) + super(Add, self)._setstate(state) diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index a1106241..8feb9a04 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -3,25 +3,18 @@ import sys import numpy as np -from ...core.parameterization.parameterized import ParametersChangedMeta, Parameterized +from ...core.parameterization.parameterized import Parameterized +from kernel_slice_operations import KernCallsViaSlicerMeta from ...util.caching import Cache_this -class KernCallsViaSlicerMeta(ParametersChangedMeta): - def __call__(self, *args, **kw): - instance = super(KernCallsViaSlicerMeta, self).__call__(*args, **kw) - instance.K = instance._slice_wrapper(instance.K) - instance.Kdiag = instance._slice_wrapper(instance.Kdiag, True) - instance.update_gradients_full = instance._slice_wrapper(instance.update_gradients_full, False, True) - instance.update_gradients_diag = instance._slice_wrapper(instance.update_gradients_diag, True, True) - instance.gradients_X = instance._slice_wrapper(instance.gradients_X, False, True) - instance.gradients_X_diag = instance._slice_wrapper(instance.gradients_X_diag, True, True) - instance.psi0 = instance._slice_wrapper(instance.psi0, False, False) - instance.psi1 = instance._slice_wrapper(instance.psi1, False, False) - instance.psi2 = instance._slice_wrapper(instance.psi2, False, False) - return instance + class Kern(Parameterized): + #=========================================================================== + # This adds input slice support. The rather ugly code for slicing can be + # found in kernel_slice_operations __metaclass__ = KernCallsViaSlicerMeta + #=========================================================================== def __init__(self, input_dim, name, *a, **kw): """ The base class for a kernel: a positive definite function @@ -40,76 +33,11 @@ class Kern(Parameterized): self.active_dims = input_dim self.input_dim = len(self.active_dims) self._sliced_X = False - self._sliced_X2 = False @Cache_this(limit=10)#, ignore_args = (0,)) def _slice_X(self, X): return X[:, self.active_dims] - def _slice_wrapper(self, operation, diag=False, derivative=False): - """ - This method wraps the functions in kernel to make sure all kernels allways see their respective input dimension. - The different switches are: - diag: if X2 exists - derivative: if firest arg is dL_dK - """ - if derivative: - if diag: - def x_slice_wrapper(dL_dK, X, *args, **kw): - X = self._slice_X(X) if not self._sliced_X else X - self._sliced_X = True - try: - ret = operation(dL_dK, X, *args, **kw) - except: - raise - finally: - self._sliced_X = False - return ret - else: - def x_slice_wrapper(dL_dK, X, X2=None, *args, **kw): - X, X2 = self._slice_X(X) if not self._sliced_X else X, self._slice_X(X2) if X2 is not None and not self._sliced_X2 else X2 - self._sliced_X = True - self._sliced_X2 = True - try: - ret = operation(dL_dK, X, X2, *args, **kw) - except: - raise - finally: - self._sliced_X = False - self._sliced_X2 = False - return ret - else: - if diag: - def x_slice_wrapper(X, *args, **kw): - X = self._slice_X(X) if not self._sliced_X else X - self._sliced_X = True - try: - ret = operation(X, *args, **kw) - except: - raise - finally: - self._sliced_X = False - return ret - else: - def x_slice_wrapper(X, X2=None, *args, **kw): - X, X2 = self._slice_X(X) if not self._sliced_X else X, self._slice_X(X2) if X2 is not None and not self._sliced_X2 else X2 - self._sliced_X = True - self._sliced_X2 = True - try: - ret = operation(X, X2, *args, **kw) - except: raise - finally: - self._sliced_X = False - self._sliced_X2 = False - return ret - x_slice_wrapper._operation = operation - x_slice_wrapper.__name__ = ("slicer("+operation.__name__ - +(","+str(bool(diag)) if diag else'') - +(','+str(bool(derivative)) if derivative else '') - +')') - x_slice_wrapper.__doc__ = "**sliced**\n" + (operation.__doc__ or "") - return x_slice_wrapper - def K(self, X, X2): """ Compute the kernel function. @@ -241,6 +169,21 @@ class Kern(Parameterized): else: kernels.append(other) return Prod(self, other, name) + def _getstate(self): + """ + Get the current state of the class, + here just all the indices, rest can get recomputed + """ + return super(Kern, self)._getstate() + [ + self.active_dims, + self.input_dim, + self._sliced_X] + + def _setstate(self, state): + self._sliced_X = state.pop() + self.input_dim = state.pop() + self.active_dims = state.pop() + super(Kern, self)._setstate(state) class CombinationKernel(Kern): def __init__(self, kernels, name): @@ -258,3 +201,9 @@ class CombinationKernel(Kern): def update_gradients_diag(self, dL_dK, X): [p.update_gradients_diag(dL_dK, X) for p in self.parts] + + def input_sensitivity(self): + in_sen = np.zeros((self.num_params, self.input_dim)) + for i, p in enumerate(self.parts): + in_sen[i, p.active_dims] = p.input_sensitivity() + return in_sen diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index cd6c41e9..7ba1f35d 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -56,28 +56,28 @@ class RBF(Stationary): if isinstance(variational_posterior, variational.SpikeAndSlabPosterior): _, _dpsi1_dvariance, _, _, _, _, _dpsi1_dlengthscale = ssrbf_psi_comp._psi1computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob) _, _dpsi2_dvariance, _, _, _, _, _dpsi2_dlengthscale = ssrbf_psi_comp._psi2computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob) - + #contributions from psi0: self.variance.gradient = np.sum(dL_dpsi0) - + #from psi1 self.variance.gradient += np.sum(dL_dpsi1 * _dpsi1_dvariance) if self.ARD: self.lengthscale.gradient = (dL_dpsi1[:,:,None]*_dpsi1_dlengthscale).reshape(-1,self.input_dim).sum(axis=0) else: self.lengthscale.gradient = (dL_dpsi1[:,:,None]*_dpsi1_dlengthscale).sum() - - + #from psi2 self.variance.gradient += (dL_dpsi2 * _dpsi2_dvariance).sum() if self.ARD: self.lengthscale.gradient += (dL_dpsi2[:,:,:,None] * _dpsi2_dlengthscale).reshape(-1,self.input_dim).sum(axis=0) else: self.lengthscale.gradient += (dL_dpsi2[:,:,:,None] * _dpsi2_dlengthscale).sum() - + elif isinstance(variational_posterior, variational.NormalPosterior): - - l2 = self.lengthscale **2 + l2 = self.lengthscale**2 + if l2.size != self.input_dim: + l2 = l2*np.ones(self.input_dim) #contributions from psi0: self.variance.gradient = np.sum(dL_dpsi0) @@ -92,11 +92,9 @@ class RBF(Stationary): else: self.lengthscale.gradient += dpsi1_dlength.sum() self.variance.gradient += np.sum(dL_dpsi1 * psi1) / self.variance - #from psi2 S = variational_posterior.variance _, Zdist_sq, _, mudist_sq, psi2 = self._psi2computations(Z, variational_posterior) - if not self.ARD: self.lengthscale.gradient += self._weave_psi2_lengthscale_grads(dL_dpsi2, psi2, Zdist_sq, S, mudist_sq, l2).sum() else: @@ -112,17 +110,16 @@ class RBF(Stationary): if isinstance(variational_posterior, variational.SpikeAndSlabPosterior): _, _, _, _, _, _dpsi1_dZ, _ = ssrbf_psi_comp._psi1computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob) _, _, _, _, _, _dpsi2_dZ, _ = ssrbf_psi_comp._psi2computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob) - + #psi1 grad = (dL_dpsi1[:, :, None] * _dpsi1_dZ).sum(axis=0) - + #psi2 grad += (dL_dpsi2[:, :, :, None] * _dpsi2_dZ).sum(axis=0).sum(axis=1) - + return grad elif isinstance(variational_posterior, variational.NormalPosterior): - l2 = self.lengthscale **2 #psi1 @@ -145,10 +142,10 @@ class RBF(Stationary): # Spike-and-Slab GPLVM if isinstance(variational_posterior, variational.SpikeAndSlabPosterior): ndata = variational_posterior.mean.shape[0] - + _, _, _dpsi1_dgamma, _dpsi1_dmu, _dpsi1_dS, _, _ = ssrbf_psi_comp._psi1computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob) _, _, _dpsi2_dgamma, _dpsi2_dmu, _dpsi2_dS, _, _ = ssrbf_psi_comp._psi2computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob) - + #psi1 grad_mu = (dL_dpsi1[:, :, None] * _dpsi1_dmu).sum(axis=1) grad_S = (dL_dpsi1[:, :, None] * _dpsi1_dS).sum(axis=1) @@ -157,11 +154,11 @@ class RBF(Stationary): grad_mu += (dL_dpsi2[:, :, :, None] * _dpsi2_dmu).reshape(ndata,-1,self.input_dim).sum(axis=1) grad_S += (dL_dpsi2[:, :, :, None] * _dpsi2_dS).reshape(ndata,-1,self.input_dim).sum(axis=1) grad_gamma += (dL_dpsi2[:,:,:, None] * _dpsi2_dgamma).reshape(ndata,-1,self.input_dim).sum(axis=1) - + return grad_mu, grad_S, grad_gamma elif isinstance(variational_posterior, variational.NormalPosterior): - + l2 = self.lengthscale **2 #psi1 denom, dist, dist_sq, psi1 = self._psi1computations(Z, variational_posterior) From 5f2b383510f31d592acf1601970caec173f38530 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 09:51:26 +0000 Subject: [PATCH 019/116] plotting returns --- GPy/plotting/matplot_dep/models_plots.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 86777527..8b5a40e7 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -68,7 +68,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', #work out what the inputs are for plotting (1D or 2D) fixed_dims = np.array([i for i,v in fixed_inputs]) free_dims = np.setdiff1d(np.arange(model.input_dim),fixed_dims) - + plots = {} #one dimensional plotting if len(free_dims) == 1: @@ -89,20 +89,20 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', m, v, lower, upper = model.predict(Xgrid) Y = Y for d in which_data_ycols: - gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], ax=ax, edgecol=linecol, fillcol=fillcol) - ax.plot(X[which_data_rows,free_dims], Y[which_data_rows, d], 'kx', mew=1.5) + plots['gpplot'] = gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], ax=ax, edgecol=linecol, fillcol=fillcol) + plots['dataplot'] = ax.plot(X[which_data_rows,free_dims], Y[which_data_rows, d], 'kx', mew=1.5) #optionally plot some samples if samples: #NOTE not tested with fixed_inputs Ysim = model.posterior_samples(Xgrid, samples) for yi in Ysim.T: - ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) + plots['posterior_samples'] = ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) #ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs. #add error bars for uncertain (if input uncertainty is being modelled) if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs(): - ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(), + plots['xerrorbar'] = ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(), xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()), ecolor='k', fmt=None, elinewidth=.5, alpha=.5) @@ -118,7 +118,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', #Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims] Zu = Z[:,free_dims] z_height = ax.get_ylim()[0] - ax.plot(Zu, np.zeros_like(Zu) + z_height, 'r|', mew=1.5, markersize=12) + plots['inducing_inputs'] = ax.plot(Zu, np.zeros_like(Zu) + z_height, 'r|', mew=1.5, markersize=12) @@ -143,8 +143,8 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', Y = Y for d in which_data_ycols: m_d = m[:,d].reshape(resolution, resolution).T - ax.contour(x, y, m_d, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) - ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40, Y[which_data_rows, d], cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) + plots['contour'] = ax.contour(x, y, m_d, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) + plots['dataplot'] = ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40, Y[which_data_rows, d], cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) #set the limits of the plot to some sensible values ax.set_xlim(xmin[0], xmax[0]) @@ -157,11 +157,11 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', if hasattr(model,"Z"): #Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims] Zu = Z[:,free_dims] - ax.plot(Zu[:,free_dims[0]], Zu[:,free_dims[1]], 'wo') + plots['inducing_inputs'] = ax.plot(Zu[:,free_dims[0]], Zu[:,free_dims[1]], 'wo') else: raise NotImplementedError, "Cannot define a frame with more than two input dimensions" - + return plots def plot_fit_f(model, *args, **kwargs): """ From 02bce95c41606da02ea8f9548282425fe125fbdf Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 09:51:46 +0000 Subject: [PATCH 020/116] psi stat testing improvements, gradients not working yet --- GPy/testing/psi_stat_expectation_tests.py | 2 +- GPy/testing/psi_stat_gradient_tests.py | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/GPy/testing/psi_stat_expectation_tests.py b/GPy/testing/psi_stat_expectation_tests.py index 075800f6..04167bef 100644 --- a/GPy/testing/psi_stat_expectation_tests.py +++ b/GPy/testing/psi_stat_expectation_tests.py @@ -34,7 +34,7 @@ class Test(unittest.TestCase): def setUp(self): self.kerns = ( - GPy.kern.RBF(self.input_dim, ARD=True)+GPy.kern.Bias(self.input_dim)+GPy.kern.White(self.input_dim), + GPy.kern.RBF([0,1,2], ARD=True)+GPy.kern.Bias(self.input_dim)+GPy.kern.White(self.input_dim), GPy.kern.RBF(self.input_dim)+GPy.kern.Bias(self.input_dim)+GPy.kern.White(self.input_dim), GPy.kern.Linear(self.input_dim) + GPy.kern.Bias(self.input_dim) + GPy.kern.White(self.input_dim), GPy.kern.Linear(self.input_dim, ARD=True) + GPy.kern.Bias(self.input_dim) + GPy.kern.White(self.input_dim), diff --git a/GPy/testing/psi_stat_gradient_tests.py b/GPy/testing/psi_stat_gradient_tests.py index fc189f93..d51cd913 100644 --- a/GPy/testing/psi_stat_gradient_tests.py +++ b/GPy/testing/psi_stat_gradient_tests.py @@ -11,6 +11,7 @@ import itertools from GPy.core import Model from GPy.core.parameterization.param import Param from GPy.core.parameterization.transformations import Logexp +from GPy.core.parameterization.variational import NormalPosterior class PsiStatModel(Model): def __init__(self, which, X, X_variance, Z, num_inducing, kernel): @@ -18,23 +19,24 @@ class PsiStatModel(Model): self.which = which self.X = Param("X", X) self.X_variance = Param('X_variance', X_variance, Logexp()) + self.q = NormalPosterior(self.X, self.X_variance) self.Z = Param("Z", Z) self.N, self.input_dim = X.shape self.num_inducing, input_dim = Z.shape assert self.input_dim == input_dim, "shape missmatch: Z:{!s} X:{!s}".format(Z.shape, X.shape) self.kern = kernel - self.psi_ = self.kern.__getattribute__(self.which)(self.Z, self.X, self.X_variance) - self.add_parameters(self.X, self.X_variance, self.Z, self.kern) + self.psi_ = self.kern.__getattribute__(self.which)(self.Z, self.q) + self.add_parameters(self.q, self.Z, self.kern) def log_likelihood(self): return self.kern.__getattribute__(self.which)(self.Z, self.X, self.X_variance).sum() def parameters_changed(self): - psimu, psiS = self.kern.__getattribute__("d" + self.which + "_dmuS")(numpy.ones_like(self.psi_), self.Z, self.X, self.X_variance) + psimu, psiS = self.kern.__getattribute__("d" + self.which + "_dmuS")(numpy.ones_like(self.psi_), self.Z, self.q) self.X.gradient = psimu self.X_variance.gradient = psiS #psimu, psiS = numpy.ones(self.N * self.input_dim), numpy.ones(self.N * self.input_dim) - try: psiZ = self.kern.__getattribute__("d" + self.which + "_dZ")(numpy.ones_like(self.psi_), self.Z, self.X, self.X_variance) + try: psiZ = self.kern.__getattribute__("d" + self.which + "_dZ")(numpy.ones_like(self.psi_), self.Z, self.q) except AttributeError: psiZ = numpy.zeros_like(self.Z) self.Z.gradient = psiZ #psiZ = numpy.ones(self.num_inducing * self.input_dim) @@ -176,6 +178,6 @@ if __name__ == "__main__": +GPy.kern.White(input_dim) ) ) - m2.ensure_default_constraints() + #m2.ensure_default_constraints() else: unittest.main() From dfb63860ca9f6b8b10fc4879a21a25733e0277c1 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 12:03:25 +0000 Subject: [PATCH 021/116] psi stat expectations with slices --- GPy/testing/psi_stat_expectation_tests.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/GPy/testing/psi_stat_expectation_tests.py b/GPy/testing/psi_stat_expectation_tests.py index 04167bef..ffbde37c 100644 --- a/GPy/testing/psi_stat_expectation_tests.py +++ b/GPy/testing/psi_stat_expectation_tests.py @@ -34,10 +34,11 @@ class Test(unittest.TestCase): def setUp(self): self.kerns = ( - GPy.kern.RBF([0,1,2], ARD=True)+GPy.kern.Bias(self.input_dim)+GPy.kern.White(self.input_dim), - GPy.kern.RBF(self.input_dim)+GPy.kern.Bias(self.input_dim)+GPy.kern.White(self.input_dim), - GPy.kern.Linear(self.input_dim) + GPy.kern.Bias(self.input_dim) + GPy.kern.White(self.input_dim), - GPy.kern.Linear(self.input_dim, ARD=True) + GPy.kern.Bias(self.input_dim) + GPy.kern.White(self.input_dim), + #GPy.kern.RBF([0,1,2], ARD=True)+GPy.kern.Bias(self.input_dim)+GPy.kern.White(self.input_dim), + #GPy.kern.RBF(self.input_dim)+GPy.kern.Bias(self.input_dim)+GPy.kern.White(self.input_dim), + #GPy.kern.Linear(self.input_dim) + GPy.kern.Bias(self.input_dim) + GPy.kern.White(self.input_dim), + #GPy.kern.Linear(self.input_dim, ARD=True) + GPy.kern.Bias(self.input_dim) + GPy.kern.White(self.input_dim), + GPy.kern.Linear([1,3,6,7], ARD=True) + GPy.kern.RBF([0,5,8], ARD=True) + GPy.kern.White(self.input_dim), ) self.q_x_mean = np.random.randn(self.input_dim)[None] self.q_x_variance = np.exp(.5*np.random.randn(self.input_dim))[None] From 54239555a1a099d423f28458ba8ebc5bb25a33ad Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 12:03:37 +0000 Subject: [PATCH 022/116] psi_stat slices for kernels --- GPy/kern/_src/add.py | 55 +++++++++++++++++++++++++---------------- GPy/kern/_src/kern.py | 18 +++++--------- GPy/kern/_src/linear.py | 3 +-- GPy/kern/_src/rbf.py | 6 ++++- GPy/kern/_src/static.py | 28 +++++++++++++++++++++ 5 files changed, 74 insertions(+), 36 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 8a3cefaf..fdebdfac 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -17,6 +17,11 @@ class Add(CombinationKernel): @Cache_this(limit=2, force_kwargs=['which_parts']) def K(self, X, X2=None, which_parts=None): + """ + Add all kernels together. + If a list of parts (of this kernel!) `which_parts` is given, only + the parts of the list are taken to compute the covariance. + """ assert X.shape[1] == self.input_dim if which_parts is None: which_parts = self.parts @@ -25,6 +30,22 @@ class Add(CombinationKernel): which_parts = [which_parts] return reduce(np.add, (p.K(X, X2) for p in which_parts)) + @Cache_this(limit=2, force_kwargs=['which_parts']) + def Kdiag(self, X, which_parts=None): + assert X.shape[1] == self.input_dim + if which_parts is None: + which_parts = self.parts + elif not isinstance(which_parts, (list, tuple)): + # if only one part is given + which_parts = [which_parts] + return reduce(np.add, (p.Kdiag(X) for p in which_parts)) + + def update_gradients_full(self, dL_dK, X, X2=None): + [p.update_gradients_full(dL_dK, X, X2) for p in self.parts] + + def update_gradients_diag(self, dL_dK, X): + [p.update_gradients_diag(dL_dK, X) for p in self.parts] + def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. @@ -36,18 +57,9 @@ class Add(CombinationKernel): :type X2: np.ndarray (num_inducing x input_dim)""" target = np.zeros(X.shape) - for p in self.parts: - target[:, p.active_dims] += p.gradients_X(dL_dK, X, X2) + [target.__setitem__([Ellipsis, p.active_dims], target[:, p.active_dims]+p.gradients_X(dL_dK, X, X2)) for p in self.parts] return target - @Cache_this(limit=2, force_kwargs=['which_parts']) - def Kdiag(self, X, which_parts=None): - assert X.shape[1] == self.input_dim - if which_parts is None: - which_parts = self.parts - return sum([p.Kdiag(X) for p in which_parts]) - - def psi0(self, Z, variational_posterior): return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts)) @@ -56,7 +68,7 @@ class Add(CombinationKernel): def psi2(self, Z, variational_posterior): psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts)) - return psi2 + #return psi2 # compute the "cross" terms from static import White, Bias from rbf import RBF @@ -65,23 +77,24 @@ class Add(CombinationKernel): #ffrom fixed import Fixed for p1, p2 in itertools.combinations(self.parts, 2): - i1, i2 = p1.active_dims, p2.active_dims + # i1, i2 = p1.active_dims, p2.active_dims # white doesn;t combine with anything if isinstance(p1, White) or isinstance(p2, White): pass # rbf X bias #elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)): elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)): - # manual override for slicing: - p2._sliced_X = p1._sliced_X = True - tmp = p2.psi1(Z[:,i2], variational_posterior[:, i1]) + tmp = p2.psi1(Z, variational_posterior) psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :]) #elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)): elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)): - # manual override for slicing: - p2._sliced_X = p1._sliced_X = True - tmp = p1.psi1(Z[:,i1], variational_posterior[:, i2]) + tmp = p1.psi1(Z, variational_posterior) psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :]) + elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)): + assert np.intersect1d(p1.active_dims, p2.active_dims).size == 0, "only non overlapping kernel dimensions allowed so far" + tmp1 = p1.psi1(Z, variational_posterior) + tmp2 = p2.psi1(Z, variational_posterior) + psi2 += (tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :]) else: raise NotImplementedError, "psi2 cannot be computed for this kernel" return psi2 @@ -98,7 +111,7 @@ class Add(CombinationKernel): continue elif isinstance(p2, Bias): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. - else: + else:# np.setdiff1d(p1.active_dims, ar2, assume_unique): # TODO: Careful, not correct for overlapping active_dims eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2. p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) @@ -114,9 +127,9 @@ class Add(CombinationKernel): if isinstance(p2, White): continue elif isinstance(p2, Bias): - eff_dL_dpsi1 += 0#dL_dpsi2.sum(1) * p2.variance * 2. + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: - eff_dL_dpsi1 += 0#dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2. + eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2. target[:, p1.active_dims] += p1.gradients_Z_expectations(eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) return target diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 8feb9a04..8a24e24a 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -15,6 +15,7 @@ class Kern(Parameterized): # found in kernel_slice_operations __metaclass__ = KernCallsViaSlicerMeta #=========================================================================== + _debug=False def __init__(self, input_dim, name, *a, **kw): """ The base class for a kernel: a positive definite function @@ -27,12 +28,12 @@ class Kern(Parameterized): """ super(Kern, self).__init__(name=name, *a, **kw) if isinstance(input_dim, int): - self.active_dims = slice(0, input_dim) + self.active_dims = np.r_[0:input_dim] self.input_dim = input_dim else: - self.active_dims = input_dim + self.active_dims = np.r_[input_dim] self.input_dim = len(self.active_dims) - self._sliced_X = False + self._sliced_X = 0 @Cache_this(limit=10)#, ignore_args = (0,)) def _slice_X(self, X): @@ -60,14 +61,13 @@ class Kern(Parameterized): raise NotImplementedError def gradients_X_diag(self, dL_dKdiag, X): raise NotImplementedError - + def update_gradients_full(self, dL_dK, X, X2): """Set the gradients of all parameters when doing full (N) inference.""" raise NotImplementedError def update_gradients_diag(self, dL_dKdiag, X): """Set the gradients for all parameters for the derivative of the diagonal of the covariance w.r.t the kernel parameters.""" raise NotImplementedError - def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): """ Set the gradients of all parameters when doing inference with @@ -188,7 +188,7 @@ class Kern(Parameterized): class CombinationKernel(Kern): def __init__(self, kernels, name): assert all([isinstance(k, Kern) for k in kernels]) - input_dim = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels)) + input_dim = reduce(np.union1d, (x.active_dims for x in kernels)) super(CombinationKernel, self).__init__(input_dim, name) self.add_parameters(*kernels) @@ -196,12 +196,6 @@ class CombinationKernel(Kern): def parts(self): return self._parameters_ - def update_gradients_full(self, dL_dK, X, X2=None): - [p.update_gradients_full(dL_dK, X, X2) for p in self.parts] - - def update_gradients_diag(self, dL_dK, X): - [p.update_gradients_diag(dL_dK, X) for p in self.parts] - def input_sensitivity(self): in_sen = np.zeros((self.num_params, self.input_dim)) for i, p in enumerate(self.parts): diff --git a/GPy/kern/_src/linear.py b/GPy/kern/_src/linear.py index 60645d11..f2ac0124 100644 --- a/GPy/kern/_src/linear.py +++ b/GPy/kern/_src/linear.py @@ -147,7 +147,6 @@ class Linear(Kern): mu = variational_posterior.mean S = variational_posterior.variance mu2S = np.square(mu)+S - _dpsi2_dvariance, _, _, _, _ = linear_psi_comp._psi2computations(self.variances, Z, mu, S, gamma) grad = np.einsum('n,nq,nq->q',dL_dpsi0,gamma,mu2S) + np.einsum('nm,nq,mq,nq->q',dL_dpsi1,gamma,Z,mu) +\ np.einsum('nmo,nmoq->q',dL_dpsi2,_dpsi2_dvariance) @@ -175,7 +174,7 @@ class Linear(Kern): mu = variational_posterior.mean S = variational_posterior.variance _, _, _, _, _dpsi2_dZ = linear_psi_comp._psi2computations(self.variances, Z, mu, S, gamma) - + grad = np.einsum('nm,nq,q,nq->mq',dL_dpsi1,gamma, self.variances,mu) +\ np.einsum('nmo,noq->mq',dL_dpsi2,_dpsi2_dZ) diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index 7ba1f35d..341d46a7 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -19,7 +19,6 @@ class RBF(Stationary): k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) """ - def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='rbf'): super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, name) self.weave_options = {} @@ -81,6 +80,8 @@ class RBF(Stationary): #contributions from psi0: self.variance.gradient = np.sum(dL_dpsi0) + if self._debug: + num_grad = self.lengthscale.gradient.copy() self.lengthscale.gradient = 0. #from psi1 @@ -100,6 +101,8 @@ class RBF(Stationary): else: self.lengthscale.gradient += self._weave_psi2_lengthscale_grads(dL_dpsi2, psi2, Zdist_sq, S, mudist_sq, l2) + if self._debug: + import ipdb;ipdb.set_trace() self.variance.gradient += 2.*np.sum(dL_dpsi2 * psi2)/self.variance else: @@ -150,6 +153,7 @@ class RBF(Stationary): grad_mu = (dL_dpsi1[:, :, None] * _dpsi1_dmu).sum(axis=1) grad_S = (dL_dpsi1[:, :, None] * _dpsi1_dS).sum(axis=1) grad_gamma = (dL_dpsi1[:,:,None] * _dpsi1_dgamma).sum(axis=1) + #psi2 grad_mu += (dL_dpsi2[:, :, :, None] * _dpsi2_dmu).reshape(ndata,-1,self.input_dim).sum(axis=1) grad_S += (dL_dpsi2[:, :, :, None] * _dpsi2_dS).reshape(ndata,-1,self.input_dim).sum(axis=1) diff --git a/GPy/kern/_src/static.py b/GPy/kern/_src/static.py index f344357c..387c92c6 100644 --- a/GPy/kern/_src/static.py +++ b/GPy/kern/_src/static.py @@ -89,3 +89,31 @@ class Bias(Static): def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum() +class Fixed(Static): + def __init__(self, input_dim, covariance_matrix, variance=1., name='fixed'): + """ + :param input_dim: the number of input dimensions + :type input_dim: int + :param variance: the variance of the kernel + :type variance: float + """ + super(Bias, self).__init__(input_dim, variance, name) + self.fixed_K = covariance_matrix + def K(self, X, X2): + return self.variance * self.fixed_K + + def Kdiag(self, X): + return self.variance * self.fixed_K.diag() + + def update_gradients_full(self, dL_dK, X, X2=None): + self.variance.gradient = np.einsum('ij,ij', dL_dK, self.fixed_K) + + def update_gradients_diag(self, dL_dKdiag, X): + self.variance.gradient = np.einsum('i,i', dL_dKdiag, self.fixed_K) + + def psi2(self, Z, variational_posterior): + return np.zeros((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) + + def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + self.variance.gradient = dL_dpsi0.sum() + From 5027e8e312caa99946f4817a446c12779b5b0934 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 12:03:47 +0000 Subject: [PATCH 023/116] diagonal add kmm --- GPy/inference/latent_function_inference/var_dtc.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 52f44cdf..6239e5a4 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -3,6 +3,7 @@ from posterior import Posterior from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify +from ...util import diag from ...core.parameterization.variational import VariationalPosterior import numpy as np from ...util.misc import param_to_array @@ -28,7 +29,7 @@ class VarDTC(object): def set_limit(self, limit): self.get_trYYT.limit = limit self.get_YYTfactor.limit = limit - + def _get_trYYT(self, Y): return param_to_array(np.sum(np.square(Y))) @@ -77,10 +78,10 @@ class VarDTC(object): num_inducing = Z.shape[0] num_data = Y.shape[0] # kernel computations, using BGPLVM notation - - Kmm = kern.K(Z) +np.eye(Z.shape[0]) * self.const_jitter - Lm = jitchol(Kmm+np.eye(Z.shape[0])*self.const_jitter) + Kmm = kern.K(Z).copy() + diag.add(Kmm, self.const_jitter) + Lm = jitchol(Kmm) # The rather complex computations of A if uncertain_inputs: @@ -169,7 +170,6 @@ class VarDTC(object): Bi, _ = dpotri(LB, lower=1) symmetrify(Bi) Bi = -dpotri(LB, lower=1)[0] - from ...util import diag diag.add(Bi, 1) woodbury_inv = backsub_both_sides(Lm, Bi) @@ -238,7 +238,8 @@ class VarDTCMissingData(object): dL_dKmm = 0 log_marginal = 0 - Kmm = kern.K(Z) + Kmm = kern.K(Z).copy() + diag.add(Kmm, self.const_jitter) #factor Kmm Lm = jitchol(Kmm) if uncertain_inputs: LmInv = dtrtri(Lm) @@ -324,7 +325,6 @@ class VarDTCMissingData(object): Bi, _ = dpotri(LB, lower=1) symmetrify(Bi) Bi = -dpotri(LB, lower=1)[0] - from ...util import diag diag.add(Bi, 1) woodbury_inv_all[:, :, ind] = backsub_both_sides(Lm, Bi)[:,:,None] From 2200c5c30b8c98e653b7a0433ac02dce20835075 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 12:05:54 +0000 Subject: [PATCH 024/116] uncertain_inputs_example plot changed --- GPy/examples/regression.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index cc23410a..7cd1e964 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -468,7 +468,7 @@ def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, opt def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True): """Run a 1D example of a sparse GP regression with uncertain inputs.""" - fig, axes = pb.subplots(1, 2, figsize=(12, 5)) + fig, axes = pb.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True) # sample inputs and outputs S = np.ones((20, 1)) From 53e071b892a5b2bf0ab8efee05b358efed9f4ec8 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 12:06:21 +0000 Subject: [PATCH 025/116] gradient check and debug options --- GPy/core/gp.py | 6 +++--- GPy/core/model.py | 7 ++++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 6441561b..2cff4341 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -70,7 +70,7 @@ class GP(Model): self.posterior, self._log_marginal_likelihood, grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y, Y_metadata=self.Y_metadata) self.likelihood.update_gradients(np.diag(grad_dict['dL_dK'])) self.kern.update_gradients_full(grad_dict['dL_dK'], self.X) - + def log_likelihood(self): return self._log_marginal_likelihood @@ -186,7 +186,7 @@ class GP(Model): """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import models_plots - models_plots.plot_fit_f(self,*args,**kwargs) + return models_plots.plot_fit_f(self,*args,**kwargs) def plot(self, *args, **kwargs): """ @@ -207,7 +207,7 @@ class GP(Model): """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import models_plots - models_plots.plot_fit(self,*args,**kwargs) + return models_plots.plot_fit(self,*args,**kwargs) def _getstate(self): """ diff --git a/GPy/core/model.py b/GPy/core/model.py index 6a6fe1ba..710c1b22 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -339,7 +339,7 @@ class Model(Parameterized): print "No free parameters to check" return - gradient = self.objective_function_gradients(x) + gradient = self.objective_function_gradients(x).copy() np.where(gradient == 0, 1e-312, gradient) ret = True for nind, xind in itertools.izip(param_index, transformed_index): @@ -350,7 +350,12 @@ class Model(Parameterized): f2 = self.objective_function(xx) numerical_gradient = (f1 - f2) / (2 * step) if _debug: + for p in self.kern.flattened_parameters: + p._parent_._debug=True self.gradient[xind] = numerical_gradient + self._set_params_transformed(x) + for p in self.kern.flattened_parameters: + p._parent_._debug=False if np.all(gradient[xind]==0): ratio = (f1-f2) == gradient[xind] else: ratio = (f1 - f2) / (2 * step * gradient[xind]) difference = np.abs((f1 - f2) / 2 / step - gradient[xind]) From a43021d74a3d0cebf2fe28680d176f246c7f96b4 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 12:44:42 +0000 Subject: [PATCH 026/116] new functionality added --- GPy/util/multioutput.py | 100 +++++++++++++++++++++++++++++----------- 1 file changed, 72 insertions(+), 28 deletions(-) diff --git a/GPy/util/multioutput.py b/GPy/util/multioutput.py index eb4d8d08..09a64518 100644 --- a/GPy/util/multioutput.py +++ b/GPy/util/multioutput.py @@ -1,12 +1,17 @@ import numpy as np import warnings -from .. import kern +import GPy -def build_XY(input_list,output_list=None,index=None): + +def get_slices(input_list): num_outputs = len(input_list) _s = [0] + [ _x.shape[0] for _x in input_list ] _s = np.cumsum(_s) slices = [slice(a,b) for a,b in zip(_s[:-1],_s[1:])] + return slices + +def build_XY(input_list,output_list=None,index=None): + num_outputs = len(input_list) if output_list is not None: assert num_outputs == len(output_list) Y = np.vstack(output_list) @@ -15,42 +20,81 @@ def build_XY(input_list,output_list=None,index=None): if index is not None: assert len(index) == num_outputs - I = np.vstack( [j*np.ones((_x.shape[0],1)) for _x,j in zip(input_list,index)] ) + I = np.hstack( [np.repeat(j,_x.shape[0]) for _x,j in zip(input_list,index)] ) else: - I = np.vstack( [j*np.ones((_x.shape[0],1)) for _x,j in zip(input_list,range(num_outputs))] ) + I = np.hstack( [np.repeat(j,_x.shape[0]) for _x,j in zip(input_list,range(num_outputs))] ) X = np.vstack(input_list) - X = np.hstack([X,I]) - return X,Y,slices + X = np.hstack([X,I[:,None]]) -def build_lcm(input_dim, num_outputs, CK = [], NC = [], W_columns=1,W=None,kappa=None): - #TODO build_icm or build_lcm + return X,Y,I[:,None]#slices + +def build_likelihood(Y_list,noise_index,likelihoods_list=None): + Ny = len(Y_list) + if likelihoods_list is None: + likelihoods_list = [GPy.likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for y,j in zip(Y_list,range(Ny))] + else: + assert len(likelihoods_list) == Ny + likelihood = GPy.likelihoods.mixed_noise.MixedNoise(likelihoods_list=likelihoods_list, noise_index=noise_index) + return likelihood + + +def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='X'): """ - Builds a kernel for a linear coregionalization model + Builds a kernel for an Intrinsic Coregionalization Model :input_dim: Input dimensionality :num_outputs: Number of outputs - :param CK: List of coregionalized kernels (i.e., this will be multiplied by a coregionalize kernel). - :param K: List of kernels that will be added up together with CK, but won't be multiplied by a coregionalize kernel - :param W_columns: number tuples of the corregionalization parameters 'coregion_W' - :type W_columns: integer + :param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B). + :type kernel: a GPy kernel + :param W_rank: number tuples of the corregionalization parameters 'W' + :type W_rank: integer """ + if kernel.input_dim <> input_dim: + kernel.input_dim = input_dim + warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") - for k in CK: - if k.input_dim <> input_dim: - k.input_dim = input_dim - warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") + K = kernel.prod(GPy.kern.Coregionalize(num_outputs,W_rank,W,kappa,name='B'),tensor=True,name=name) + K['.*variance'] = 1. + K['.*variance'].fix() + return K - for k in NC: - if k.input_dim <> input_dim + 1: - k.input_dim = input_dim + 1 - warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") - kernel = CK[0].prod(kern.Coregionalize(num_outputs,W_columns,W,kappa),tensor=True) - for k in CK[1:]: - k_coreg = kern.Coregionalize(num_outputs,W_columns,W,kappa) - kernel += k.prod(k_coreg,tensor=True) - for k in NC: - kernel += k +def LCM(input_dim, num_outputs, kernels_list, W_rank=1,name='X'): + """ + Builds a kernel for an Linear Coregionalization Model - return kernel + :input_dim: Input dimensionality + :num_outputs: Number of outputs + :param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B). + :type kernel: a GPy kernel + :param W_rank: number tuples of the corregionalization parameters 'W' + :type W_rank: integer + """ + Nk = len(kernels_list) + K = ICM(input_dim,num_outputs,kernels_list[0],W_rank,name='%s%s' %(name,0)) + j = 1 + for kernel in kernels_list[1:]: + K += ICM(input_dim,num_outputs,kernel,W_rank,name='%s%s' %(name,j)) + return K + + +def Private(input_dim, num_outputs, kernel, output, kappa=None,name='X'): + """ + Builds a kernel for an Intrinsic Coregionalization Model + + :input_dim: Input dimensionality + :num_outputs: Number of outputs + :param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B). + :type kernel: a GPy kernel + :param W_rank: number tuples of the corregionalization parameters 'W' + :type W_rank: integer + """ + K = ICM(input_dim,num_outputs,kernel,W_rank=1,kappa=kappa,name=name) + K.B.W.fix(0) + _range = range(num_outputs) + _range.pop(output) + for j in _range: + K.B.kappa[j] = 0 + K.B.kappa[j].fix() + return K From 272afabda79d7f5ba9c542a56071282460250384 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 12:45:11 +0000 Subject: [PATCH 027/116] new functionality added --- GPy/util/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/GPy/util/__init__.py b/GPy/util/__init__.py index 1666fa35..8aea990c 100644 --- a/GPy/util/__init__.py +++ b/GPy/util/__init__.py @@ -14,6 +14,7 @@ import subarray_and_sorting import caching import diag import initialization +import multioutput try: import sympy From e858a0bdc386e0263e0e32027361dce56ea900a0 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 12:45:53 +0000 Subject: [PATCH 028/116] changes for coregionalized models --- GPy/plotting/matplot_dep/models_plots.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 4ca4441e..9e86bf3d 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -56,8 +56,8 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) - - if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs(): + + if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs(): X = model.X.mean X_variance = param_to_array(model.X.variance) else: @@ -86,7 +86,14 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', upper = m + 2*np.sqrt(v) Y = Y else: - m, v, lower, upper = model.predict(Xgrid) + if 'noise_index' in model.Y_metadata.keys(): + if np.unique(model.Y_metadata['noise_index'][which_data_rows]).size > 1: + print "Data slices choosen have different noise models. Just one will be used." + noise_index = np.repeat(model.Y_metadata['noise_index'][which_data_rows][0], Xgrid.shape[0])[:,None] + m, v, lower, upper = model.predict(Xgrid,full_cov=False,noise_index=noise_index) + else: + noise_index = None + m, v, lower, upper = model.predict(Xgrid,full_cov=False) Y = Y for d in which_data_ycols: gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], ax=ax, edgecol=linecol, fillcol=fillcol) From 32d5b449eb5d82ebaa3cd41b087726f30f506e75 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 12:46:54 +0000 Subject: [PATCH 029/116] Y_metadata added as parameter --- .../exact_gaussian_inference.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index 922b52f4..6902c3f1 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -33,7 +33,7 @@ class ExactGaussianInference(object): #if Y in self.cache, return self.Cache[Y], else store Y in cache and return L. raise NotImplementedError, 'TODO' #TODO - def inference(self, kern, X, likelihood, Y, Y_metadata=None): + def inference(self, kern, X, likelihood, Y, **Y_metadata): """ Returns a Posterior class containing essential quantities of the posterior """ @@ -41,7 +41,7 @@ class ExactGaussianInference(object): K = kern.K(X) - Wi, LW, LWi, W_logdet = pdinv(K + likelihood.covariance_matrix(Y, Y_metadata)) + Wi, LW, LWi, W_logdet = pdinv(K + likelihood.covariance_matrix(Y, **Y_metadata)) alpha, _ = dpotrs(LW, YYT_factor, lower=1) @@ -49,9 +49,4 @@ class ExactGaussianInference(object): dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi) - #TODO: does this really live here? - likelihood.update_gradients(np.diag(dL_dK)) - return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK} - - From 0422a565245727b1e3be4f211f532ec210839348 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 12:48:35 +0000 Subject: [PATCH 030/116] Y_metadata is now **kwags --- GPy/likelihoods/gaussian.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index b82750ac..8e34f6b9 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) #TODO """ -A lot of this code assumes that the link function is the identity. +A lot of this code assumes that the link function is the identity. I think laplace code is okay, but I'm quite sure that the EP moments will only work if the link is identity. @@ -49,7 +49,7 @@ class Gaussian(Likelihood): if isinstance(gp_link, link_functions.Identity): self.log_concave = True - def covariance_matrix(self, Y, Y_metadata=None): + def covariance_matrix(self, Y, **Y_metadata): return np.eye(Y.shape[0]) * self.variance def update_gradients(self, partial): From 45973dce10082f7659e73d14aa3ee3f5fc8dd106 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 12:50:57 +0000 Subject: [PATCH 031/116] mixed_noise added --- GPy/likelihoods/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/GPy/likelihoods/__init__.py b/GPy/likelihoods/__init__.py index 59e8fb74..28e44541 100644 --- a/GPy/likelihoods/__init__.py +++ b/GPy/likelihoods/__init__.py @@ -5,3 +5,4 @@ from gamma import Gamma from poisson import Poisson from student_t import StudentT from likelihood import Likelihood +from mixed_noise import MixedNoise From 6ced5b124287c167c469d4d67776efd4eced2f11 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 12:52:52 +0000 Subject: [PATCH 032/116] GPCoregionalizedRegresssion added --- GPy/core/gp.py | 9 ++-- GPy/likelihoods/mixed_noise.py | 58 ++++++++++++++++++++++ GPy/models/__init__.py | 6 +-- GPy/models/gp_coregionalized_regression.py | 44 ++++++++++++++++ 4 files changed, 110 insertions(+), 7 deletions(-) create mode 100644 GPy/likelihoods/mixed_noise.py create mode 100644 GPy/models/gp_coregionalized_regression.py diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 1add8268..b19f9ab2 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -27,7 +27,7 @@ class GP(Model): """ - def __init__(self, X, Y, kernel, likelihood, inference_method=None, Y_metadata=None, name='gp'): + def __init__(self, X, Y, kernel, likelihood, inference_method=None, name='gp', **Y_metadata): super(GP, self).__init__(name) assert X.ndim == 2 @@ -43,7 +43,7 @@ class GP(Model): _, self.output_dim = self.Y.shape if Y_metadata is not None: - self.Y_metadata = ObservableArray(Y_metadata) + self.Y_metadata = Y_metadata else: self.Y_metadata = None @@ -56,7 +56,7 @@ class GP(Model): #find a sensible inference method if inference_method is None: - if isinstance(likelihood, likelihoods.Gaussian): + if isinstance(likelihood, likelihoods.Gaussian) or isinstance(likelihood, likelihoods.MixedNoise): inference_method = exact_gaussian_inference.ExactGaussianInference() else: inference_method = expectation_propagation @@ -67,7 +67,8 @@ class GP(Model): self.add_parameter(self.likelihood) def parameters_changed(self): - self.posterior, self._log_marginal_likelihood, grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y, Y_metadata=self.Y_metadata) + self.posterior, self._log_marginal_likelihood, grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y, **self.Y_metadata) + self.likelihood.update_gradients(np.diag(grad_dict['dL_dK']), **self.Y_metadata) self.kern.update_gradients_full(grad_dict['dL_dK'], self.X) def log_likelihood(self): diff --git a/GPy/likelihoods/mixed_noise.py b/GPy/likelihoods/mixed_noise.py new file mode 100644 index 00000000..b60f3adf --- /dev/null +++ b/GPy/likelihoods/mixed_noise.py @@ -0,0 +1,58 @@ +import numpy as np +from scipy import stats, special +from GPy.util.univariate_Gaussian import std_norm_pdf, std_norm_cdf +import link_functions +from likelihood import Likelihood +from ..core.parameterization import Param +from ..core.parameterization.transformations import Logexp +from ..core.parameterization import Parameterized +import itertools + +class MixedNoise(Likelihood): + def __init__(self, likelihoods_list, noise_index, variance = None, name='mixed_noise'): + + Nlike = len(likelihoods_list) + self.order = np.unique(noise_index) + + assert self.order.size == Nlike + + if variance is None: + variance = np.ones(Nlike) + else: + assert variance.size == Nlike + + super(Likelihood, self).__init__(name=name) + + self.add_parameters(*likelihoods_list) + self.likelihoods_list = likelihoods_list + self.noise_index = noise_index + self.log_concave = False + self.likelihoods_indices = [noise_index.flatten()==j for j in self.order] + + def covariance_matrix(self, Y, noise_index, **Y_metadata): + variance = np.zeros(Y.shape[0]) + for lik, ind in itertools.izip(self.likelihoods_list, self.likelihoods_indices): + variance[ind] = lik.variance + return np.diag(variance) + + def update_gradients(self, partial, noise_index, **Y_metadata): + [lik.update_gradients(partial[ind]) for lik,ind in itertools.izip(self.likelihoods_list, self.likelihoods_indices)] + + def predictive_values(self, mu, var, full_cov=False, noise_index=None, **Y_metadata): + _variance = np.array([ self.likelihoods_list[j].variance for j in noise_index ]) + if full_cov: + var += np.eye(var.shape[0])*_variance + d = 2*np.sqrt(np.diag(var)) + low, up = mu - d, mu + d + else: + var += _variance + d = 2*np.sqrt(var) + low, up = mu - d, mu + d + return mu, var, low, up + + def predictive_variance(self, mu, sigma, noise_index, predictive_mean=None, **Y_metadata): + if isinstance(noise_index,int): + _variance = self.variance[noise_index] + else: + _variance = np.array([ self.variance[j] for j in noise_index ])[:,None] + return _variance + sigma**2 diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index 83db4b8c..a253c63d 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -13,6 +13,6 @@ from warped_gp import WarpedGP from bayesian_gplvm import BayesianGPLVM from mrd import MRD from gradient_checker import GradientChecker -from gp_multioutput_regression import GPMultioutputRegression -from sparse_gp_multioutput_regression import SparseGPMultioutputRegression -from ss_gplvm import SSGPLVM \ No newline at end of file +from ss_gplvm import SSGPLVM +from gp_coregionalized_regression import GPCoregionalizedRegression +from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression diff --git a/GPy/models/gp_coregionalized_regression.py b/GPy/models/gp_coregionalized_regression.py new file mode 100644 index 00000000..313e09d4 --- /dev/null +++ b/GPy/models/gp_coregionalized_regression.py @@ -0,0 +1,44 @@ +# Copyright (c) 2012 - 2014 the GPy Austhors (see AUTHORS.txt) +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import numpy as np +from ..core import GP +from .. import likelihoods +from .. import kern +from .. import util + +class GPCoregionalizedRegression(GP): + """ + Gaussian Process model for heteroscedastic multioutput regression + + This is a thin wrapper around the models.GP class, with a set of sensible defaults + + :param X_list: list of input observations corresponding to each output + :type X_list: list of numpy arrays + :param Y_list: list of observed values related to the different noise models + :type Y_list: list of numpy arrays + :param kernel: a GPy kernel, defaults to RBF ** Coregionalized + :type kernel: None | GPy.kernel defaults + :likelihoods_list: a list of likelihoods, defaults to list of Gaussian likelihoods + :type likelihoods_list: None | a list GPy.likelihoods + :param name: model name + :type name: string + :param W_rank: number tuples of the corregionalization parameters 'W' (see coregionalize kernel documentation) + :type W_rank: integer + :param kernel_name: name of the kernel + :type kernel_name: string + """ + def __init__(self, X_list, Y_list, kernel=None, likelihoods_list=None, name='GPCR',W_rank=1,kernel_name='X'): + + #Input and Output + X,Y,self.noise_index = util.multioutput.build_XY(X_list,Y_list) + Ny = len(Y_list) + + #Kernel + if kernel is None: + kernel = util.multioutput.ICM(input_dim=X.shape[1]-1, num_outputs=Ny, kernel=GPy.kern.rbf(X.shape[1]-1), W_rank=1,name=kernel_name) + + #Likelihood + likelihood = util.multioutput.build_likelihood(Y_list,self.noise_index,likelihoods_list) + + super(GPCoregionalizedRegression, self).__init__(X,Y,kernel,likelihood, noise_index=self.noise_index) From abc7545e0993dd927b20ee4a5854fd72e8cb0694 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 13:00:02 +0000 Subject: [PATCH 033/116] kernel slicer --- GPy/kern/_src/kernel_slice_operations.py | 108 +++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 GPy/kern/_src/kernel_slice_operations.py diff --git a/GPy/kern/_src/kernel_slice_operations.py b/GPy/kern/_src/kernel_slice_operations.py new file mode 100644 index 00000000..c1774a35 --- /dev/null +++ b/GPy/kern/_src/kernel_slice_operations.py @@ -0,0 +1,108 @@ +''' +Created on 11 Mar 2014 + +@author: maxz +''' +from ...core.parameterization.parameterized import ParametersChangedMeta + +class KernCallsViaSlicerMeta(ParametersChangedMeta): + def __call__(self, *args, **kw): + instance = super(ParametersChangedMeta, self).__call__(*args, **kw) + instance.K = _slice_wrapper(instance, instance.K) + instance.Kdiag = _slice_wrapper(instance, instance.Kdiag, True) + instance.update_gradients_full = _slice_wrapper(instance, instance.update_gradients_full, False, True) + instance.update_gradients_diag = _slice_wrapper(instance, instance.update_gradients_diag, True, True) + instance.gradients_X = _slice_wrapper(instance, instance.gradients_X, False, True) + instance.gradients_X_diag = _slice_wrapper(instance, instance.gradients_X_diag, True, True) + instance.psi0 = _slice_wrapper(instance, instance.psi0, False, False) + instance.psi1 = _slice_wrapper(instance, instance.psi1, False, False) + instance.psi2 = _slice_wrapper(instance, instance.psi2, False, False) + instance.update_gradients_expectations = _slice_wrapper(instance, instance.update_gradients_expectations, psi_stat=True) + instance.gradients_Z_expectations = _slice_wrapper(instance, instance.gradients_Z_expectations, psi_stat_Z=True) + instance.gradients_qX_expectations = _slice_wrapper(instance, instance.gradients_qX_expectations, psi_stat=True) + instance.parameters_changed() + return instance + +def _slice_wrapper(kern, operation, diag=False, derivative=False, psi_stat=False, psi_stat_Z=False): + """ + This method wraps the functions in kernel to make sure all kernels allways see their respective input dimension. + The different switches are: + diag: if X2 exists + derivative: if first arg is dL_dK + psi_stat: if first 3 args are dL_dpsi0..2 + psi_stat_Z: if first 2 args are dL_dpsi1..2 + """ + if derivative: + if diag: + def x_slice_wrapper(dL_dK, X): + X = kern._slice_X(X) if not kern._sliced_X else X + kern._sliced_X += 1 + try: + ret = operation(dL_dK, X) + except: + raise + finally: + kern._sliced_X -= 1 + return ret + else: + def x_slice_wrapper(dL_dK, X, X2=None): + X, X2 = kern._slice_X(X) if not kern._sliced_X else X, kern._slice_X(X2) if X2 is not None and not kern._sliced_X else X2 + kern._sliced_X += 1 + try: + ret = operation(dL_dK, X, X2) + except: + raise + finally: + kern._sliced_X -= 1 + return ret + elif psi_stat: + def x_slice_wrapper(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + Z, variational_posterior = kern._slice_X(Z) if not kern._sliced_X else Z, kern._slice_X(variational_posterior) if not kern._sliced_X else variational_posterior + kern._sliced_X += 1 + try: + ret = operation(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior) + except: + raise + finally: + kern._sliced_X -= 1 + return ret + elif psi_stat_Z: + def x_slice_wrapper(dL_dpsi1, dL_dpsi2, Z, variational_posterior): + Z, variational_posterior = kern._slice_X(Z) if not kern._sliced_X else Z, kern._slice_X(variational_posterior) if not kern._sliced_X else variational_posterior + kern._sliced_X += 1 + try: + ret = operation(dL_dpsi1, dL_dpsi2, Z, variational_posterior) + except: + raise + finally: + kern._sliced_X -= 1 + return ret + else: + if diag: + def x_slice_wrapper(X, *args, **kw): + X = kern._slice_X(X) if not kern._sliced_X else X + kern._sliced_X += 1 + try: + ret = operation(X, *args, **kw) + except: + raise + finally: + kern._sliced_X -= 1 + return ret + else: + def x_slice_wrapper(X, X2=None, *args, **kw): + X, X2 = kern._slice_X(X) if not kern._sliced_X else X, kern._slice_X(X2) if X2 is not None and not kern._sliced_X else X2 + kern._sliced_X += 1 + try: + ret = operation(X, X2, *args, **kw) + except: raise + finally: + kern._sliced_X -= 1 + return ret + x_slice_wrapper._operation = operation + x_slice_wrapper.__name__ = ("slicer("+operation.__name__ + +(","+str(bool(diag)) if diag else'') + +(','+str(bool(derivative)) if derivative else '') + +')') + x_slice_wrapper.__doc__ = "**sliced**\n" + (operation.__doc__ or "") + return x_slice_wrapper \ No newline at end of file From b975a45cd23dfdbe4689e8fdc6983816080462fe Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 13:02:52 +0000 Subject: [PATCH 034/116] copy --- GPy/core/parameterization/parameter_core.py | 28 ++++++++++----------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 5727bc17..d1122f79 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -16,7 +16,7 @@ Observable Pattern for patameterization from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED import numpy as np -__updated__ = '2014-03-11' +__updated__ = '2014-03-12' class HierarchyError(Exception): """ @@ -796,27 +796,27 @@ class Parameterizable(OptimizationHandlable): """ if not param in self._parameters_: raise RuntimeError, "Parameter {} does not belong to this object, remove parameters directly from their respective parents".format(param._short()) - + start = sum([p.size for p in self._parameters_[:param._parent_index_]]) self._remove_parameter_name(param) self.size -= param.size del self._parameters_[param._parent_index_] - + param._disconnect_parent() param.remove_observer(self, self._pass_through_notify_observers) self.constraints.shift_left(start, param.size) - + self._connect_fixes() self._connect_parameters() self._notify_parent_change() - + parent = self._parent_ while parent is not None: parent._connect_fixes() parent._connect_parameters() parent._notify_parent_change() parent = parent._parent_ - + def _connect_parameters(self, ignore_added_names=False): # connect parameterlist to this parameterized object # This just sets up the right connection for the params objects @@ -829,29 +829,26 @@ class Parameterizable(OptimizationHandlable): old_size = 0 self._param_array_ = np.empty(self.size, dtype=np.float64) self._gradient_array_ = np.empty(self.size, dtype=np.float64) - + self._param_slices_ = [] - for i, p in enumerate(self._parameters_): p._parent_ = self p._parent_index_ = i - + pslice = slice(old_size, old_size+p.size) - # first connect all children p._propagate_param_grad(self._param_array_[pslice], self._gradient_array_[pslice]) - # then connect children to self self._param_array_[pslice] = p._param_array_.ravel()#, requirements=['C', 'W']).ravel(order='C') self._gradient_array_[pslice] = p._gradient_array_.ravel()#, requirements=['C', 'W']).ravel(order='C') - + if not p._param_array_.flags['C_CONTIGUOUS']: import ipdb;ipdb.set_trace() p._param_array_.data = self._param_array_[pslice].data p._gradient_array_.data = self._gradient_array_[pslice].data - + self._param_slices_.append(pslice) - + self._add_parameter_name(p, ignore_added_names=ignore_added_names) old_size += p.size @@ -862,12 +859,13 @@ class Parameterizable(OptimizationHandlable): self.parameters_changed() def _pass_through_notify_observers(self, which): self.notify_observers(which) - + #=========================================================================== # TODO: not working yet #=========================================================================== def copy(self): """Returns a (deep) copy of the current model""" + raise NotImplementedError, "Copy is not yet implemented, TODO: Observable hierarchy" import copy from .index_operations import ParameterIndexOperations, ParameterIndexOperationsView from .lists_and_dicts import ArrayList From 5c939bc0d0b92d7f563be35762cf7ee035a7c8f2 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Wed, 12 Mar 2014 13:08:24 +0000 Subject: [PATCH 035/116] fixing fitc --- GPy/inference/latent_function_inference/dtc.py | 6 +----- GPy/inference/latent_function_inference/fitc.py | 9 ++------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/GPy/inference/latent_function_inference/dtc.py b/GPy/inference/latent_function_inference/dtc.py index 1a811de6..df2d5a03 100644 --- a/GPy/inference/latent_function_inference/dtc.py +++ b/GPy/inference/latent_function_inference/dtc.py @@ -19,7 +19,7 @@ class DTC(object): def __init__(self): self.const_jitter = 1e-6 - def inference(self, kern, X, X_variance, Z, likelihood, Y): + def inference(self, kern, X, Z, likelihood, Y): assert X_variance is None, "cannot use X_variance with DTC. Try varDTC." #TODO: MAX! fix this! @@ -80,10 +80,6 @@ class DTC(object): grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU.T} - #update gradients - kern.update_gradients_sparse(X=X, Z=Z, **grad_dict) - likelihood.update_gradients(dL_dR) - #construct a posterior object post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L) diff --git a/GPy/inference/latent_function_inference/fitc.py b/GPy/inference/latent_function_inference/fitc.py index 3ad51155..9e9c14e2 100644 --- a/GPy/inference/latent_function_inference/fitc.py +++ b/GPy/inference/latent_function_inference/fitc.py @@ -17,8 +17,7 @@ class FITC(object): def __init__(self): self.const_jitter = 1e-6 - def inference(self, kern, X, X_variance, Z, likelihood, Y): - assert X_variance is None, "cannot use X_variance with FITC. Try varDTC." + def inference(self, kern, X, Z, likelihood, Y): #TODO: MAX! fix this! from ...util.misc import param_to_array @@ -81,11 +80,7 @@ class FITC(object): dL_dU *= beta_star dL_dU -= 2.*KiU*dL_dR - grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR, 'dL_dKnm':dL_dU.T} - - #update gradients - kern.update_gradients_sparse(X=X, Z=Z, **grad_dict) - likelihood.update_gradients(dL_dR) + grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR, 'dL_dKnm':dL_dU.T, 'partial_for_likelihood':dL_dR} #construct a posterior object post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L) From 58523eaa3a1512a8f25d92b7b8d9018bfff4cf89 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Wed, 12 Mar 2014 13:23:01 +0000 Subject: [PATCH 036/116] old way of tensor product --- GPy/kern/_src/kern.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index f8f2d588..7d1f8d16 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -147,11 +147,14 @@ class Kern(Parameterized): """ Here we overload the '*' operator. See self.prod for more information""" return self.prod(other) - #def __pow__(self, other): - # """ - # Shortcut for tensor `prod`. - # """ - # return self.prod(other, tensor=True) + def __pow__(self, other): + """ + Shortcut for tensor `prod`. + """ + assert self.active_dims == range(self.input_dim), "Can only use kernels, which have their input_dims defined from 0" + assert other.active_dims == range(other.input_dim), "Can only use kernels, which have their input_dims defined from 0" + other.active_dims += self.input_dim + return self.prod(other) def prod(self, other, name=None): """ From 7b42fa535d7ce8c6ffeecc1e263734e8f13f98ba Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 19:22:17 +0000 Subject: [PATCH 037/116] fixing coreg kernel --- GPy/util/multioutput.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/GPy/util/multioutput.py b/GPy/util/multioutput.py index 09a64518..bec0e490 100644 --- a/GPy/util/multioutput.py +++ b/GPy/util/multioutput.py @@ -54,7 +54,8 @@ def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='X'): kernel.input_dim = input_dim warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") - K = kernel.prod(GPy.kern.Coregionalize(num_outputs,W_rank,W,kappa,name='B'),tensor=True,name=name) + #K = kernel.prod(GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa,name='B'),tensor=True,name=name) + K = kernel.prod(GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa,name='B'),name=name) K['.*variance'] = 1. K['.*variance'].fix() return K From 5d6b26f3f5a48368501f8bccdf6935ccc8e9fe8c Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 19:22:50 +0000 Subject: [PATCH 038/116] fixing coreg kernel --- GPy/kern/_src/coregionalize.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/kern/_src/coregionalize.py b/GPy/kern/_src/coregionalize.py index 1381b611..3503bbd6 100644 --- a/GPy/kern/_src/coregionalize.py +++ b/GPy/kern/_src/coregionalize.py @@ -34,8 +34,8 @@ class Coregionalize(Kern): .. note: see coregionalization examples in GPy.examples.regression for some usage. """ - def __init__(self, output_dim, rank=1, W=None, kappa=None, name='coregion'): - super(Coregionalize, self).__init__(input_dim=1, name=name) + def __init__(self, input_dim, output_dim, rank=1, W=None, kappa=None, name='coregion'): + super(Coregionalize, self).__init__(input_dim, name=name) self.output_dim = output_dim self.rank = rank if self.rank>output_dim: From c7ec34e4d98ab938111617cb0e157e9410251e0d Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 12 Mar 2014 20:16:17 +0000 Subject: [PATCH 039/116] missing bracket --- GPy/kern/_src/prod.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index 77b2ea51..bd664681 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -40,7 +40,7 @@ class Prod(CombinationKernel): def update_gradients_full(self, dL_dK, X): for k1,k2 in itertools.combinations(self.parts, 2): k1._sliced_X = k1._sliced_X2 = k2._sliced_X = k2._sliced_X2 = True - k1.update_gradients_full(dL_dK*k2.K(X, X) + k1.update_gradients_full(dL_dK*k2.K(X, X)) self.k2.update_gradients_full(dL_dK*self.k1.K(X[:,self.slice1]), X[:,self.slice2]) def gradients_X(self, dL_dK, X, X2=None): From d59a8576e14354cc260a52bd752943ba72be663a Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 08:02:25 +0000 Subject: [PATCH 040/116] bugfixing --- GPy/kern/_src/add.py | 3 --- GPy/models/__init__.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 3f00b22d..fdebdfac 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -46,9 +46,6 @@ class Add(CombinationKernel): def update_gradients_diag(self, dL_dK, X): [p.update_gradients_diag(dL_dK, X) for p in self.parts] - def update_gradients_diag(self, dL_dKdiag, X): - [p.update_gradients_diag(dL_dKdiag, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] - def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index a253c63d..34e5a17e 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -15,4 +15,4 @@ from mrd import MRD from gradient_checker import GradientChecker from ss_gplvm import SSGPLVM from gp_coregionalized_regression import GPCoregionalizedRegression -from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression +#.py file not included!!! #from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression From 8f7d8537b768e2877f664e751df5e2d035aac260 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 09:00:31 +0000 Subject: [PATCH 041/116] whitespaces --- GPy/core/parameterization/parameter_core.py | 28 ++++++++++----------- GPy/core/parameterization/parameterized.py | 1 - 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index d1122f79..2a61c970 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -16,7 +16,7 @@ Observable Pattern for patameterization from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED import numpy as np -__updated__ = '2014-03-12' +__updated__ = '2014-03-13' class HierarchyError(Exception): """ @@ -644,10 +644,10 @@ class OptimizationHandlable(Constrainable, Observable): self._param_array_[pislice] = pi._param_array_.ravel()#, requirements=['C', 'W']).flat self._gradient_array_[pislice] = pi._gradient_array_.ravel()#, requirements=['C', 'W']).flat - + pi._param_array_.data = parray[pislice].data pi._gradient_array_.data = garray[pislice].data - + pi._propagate_param_grad(parray[pislice], garray[pislice]) pi_old_size += pi.size @@ -660,11 +660,11 @@ class Parameterizable(OptimizationHandlable): self._param_array_ = np.empty(self.size, dtype=np.float64) self._gradient_array_ = np.empty(self.size, dtype=np.float64) self._added_names_ = set() - + def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True): """ Get the names of all parameters of this model. - + :param bool add_self: whether to add the own name in front of names :param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names :param bool recursive: whether to traverse through hierarchy and append leaf node names @@ -675,11 +675,11 @@ class Parameterizable(OptimizationHandlable): else: names = [adjust(x.name) for x in self._parameters_] if add_self: names = map(lambda x: adjust(self.name) + "." + x, names) return names - + @property def num_params(self): return len(self._parameters_) - + def _add_parameter_name(self, param, ignore_added_names=False): pname = adjust_name_for_printing(param.name) if ignore_added_names: @@ -694,7 +694,7 @@ class Parameterizable(OptimizationHandlable): elif pname not in dir(self): self.__dict__[pname] = param self._added_names_.add(pname) - + def _remove_parameter_name(self, param=None, pname=None): assert param is None or pname is None, "can only delete either param by name, or the name of a param" pname = adjust_name_for_printing(pname) or adjust_name_for_printing(param.name) @@ -706,14 +706,14 @@ class Parameterizable(OptimizationHandlable): def _name_changed(self, param, old_name): self._remove_parameter_name(None, old_name) self._add_parameter_name(param) - + #========================================================================= # Gradient handling #========================================================================= @property def gradient(self): return self._gradient_array_ - + @gradient.setter def gradient(self, val): self._gradient_array_[:] = val @@ -734,13 +734,13 @@ class Parameterizable(OptimizationHandlable): # def _set_gradient(self, g): # [p._set_gradient(g[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)] #=========================================================================== - + def add_parameter(self, param, index=None, _ignore_added_names=False): """ :param parameters: the parameters to add :type parameters: list of or one :py:class:`GPy.core.param.Param` :param [index]: index of where to put parameters - + :param bool _ignore_added_names: whether the name of the parameter overrides a possibly existing field Add all parameters to this param class, you can insert parameters @@ -771,9 +771,9 @@ class Parameterizable(OptimizationHandlable): self.constraints.update(param.constraints, start) self.priors.update(param.priors, start) self._parameters_.insert(index, param) - + param.add_observer(self, self._pass_through_notify_observers, -np.inf) - + self.size += param.size self._connect_parameters(ignore_added_names=_ignore_added_names) diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index a98f0098..8551c831 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -101,7 +101,6 @@ class Parameterized(Parameterizable, Pickleable): return G return node - def _getstate(self): """ Get the current state of the class, From eb8b2c8b47666355b6636464caaded37463751a6 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 09:07:27 +0000 Subject: [PATCH 042/116] combination slices full now, independent output kernel --- GPy/kern/_src/independent_outputs.py | 32 ++++++++++++++-------------- GPy/kern/_src/kern.py | 3 ++- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 252a7bc3..c6860d3c 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -40,24 +40,25 @@ class IndependentOutputs(Kern): the rest of the columns of X are passed to the underlying kernel for computation (in blocks). """ - def __init__(self, kern, name='independ'): - super(IndependentOutputs, self).__init__(kern.input_dim+1, name) + def __init__(self, active_dim, kern, name='independ'): + super(IndependentOutputs, self).__init__(np.hstack((kern.active_dims,np.r_[active_dim])), name) + self.index_dim = active_dim self.kern = kern self.add_parameters(self.kern) def K(self,X ,X2=None): - X, slices = X[:,:-1], index_to_slices(X[:,-1]) + slices = index_to_slices(X[:,self.index_dim]) if X2 is None: target = np.zeros((X.shape[0], X.shape[0])) - [[np.copyto(target[s,s], self.kern.K(X[s], None)) for s in slices_i] for slices_i in slices] + [[np.copyto(target[s,s], self.kern.K(X[s,:], None)) for s in slices_i] for slices_i in slices] else: - X2, slices2 = X2[:,:-1],index_to_slices(X2[:,-1]) + slices2 = index_to_slices(X2[:,self.index_dim]) target = np.zeros((X.shape[0], X2.shape[0])) - [[[np.copyto(target[s, s2], self.kern.K(X[s],X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] + [[[np.copyto(target[s, s2], self.kern.K(X[s,:],X2[s2,:])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] return target def Kdiag(self,X): - X, slices = X[:,:-1], index_to_slices(X[:,-1]) + slices = index_to_slices(X[:,self.index_dim]) target = np.zeros(X.shape[0]) [[np.copyto(target[s], self.kern.Kdiag(X[s])) for s in slices_i] for slices_i in slices] return target @@ -66,20 +67,19 @@ class IndependentOutputs(Kern): target = np.zeros(self.kern.size) def collate_grads(dL, X, X2): self.kern.update_gradients_full(dL,X,X2) - self.kern._collect_gradient(target) + target += self.kern.gradient - X,slices = X[:,:-1],index_to_slices(X[:,-1]) + slices = index_to_slices(X[:,self.index_dim]) if X2 is None: [[collate_grads(dL_dK[s,s], X[s], None) for s in slices_i] for slices_i in slices] else: - X2, slices2 = X2[:,:-1], index_to_slices(X2[:,-1]) + slices2 = index_to_slices(X2[:,self.index_dim]) [[[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] - - self.kern._set_gradient(target) + self.kern.gradient = target def gradients_X(self,dL_dK, X, X2=None): target = np.zeros_like(X) - X, slices = X[:,:-1],index_to_slices(X[:,-1]) + slices = index_to_slices(X[:,self.index_dim]) if X2 is None: [[np.copyto(target[s,:-1], self.kern.gradients_X(dL_dK[s,s],X[s],None)) for s in slices_i] for slices_i in slices] else: @@ -88,7 +88,7 @@ class IndependentOutputs(Kern): return target def gradients_X_diag(self, dL_dKdiag, X): - X, slices = X[:,:-1], index_to_slices(X[:,-1]) + slices = index_to_slices(X[:,self.index_dim]) target = np.zeros(X.shape) [[np.copyto(target[s,:-1], self.kern.gradients_X_diag(dL_dKdiag[s],X[s])) for s in slices_i] for slices_i in slices] return target @@ -97,10 +97,10 @@ class IndependentOutputs(Kern): target = np.zeros(self.kern.size) def collate_grads(dL, X): self.kern.update_gradients_diag(dL,X) - self.kern._collect_gradient(target) + self.target += self.kern.gradient X,slices = X[:,:-1],index_to_slices(X[:,-1]) [[collate_grads(dL_dKdiag[s], X[s,:]) for s in slices_i] for slices_i in slices] - self.kern._set_gradient(target) + self.kern.gradient = target class Hierarchical(Kern): """ diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 7d1f8d16..0aa414ca 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -195,7 +195,8 @@ class Kern(Parameterized): class CombinationKernel(Kern): def __init__(self, kernels, name): assert all([isinstance(k, Kern) for k in kernels]) - input_dim = reduce(np.union1d, (x.active_dims for x in kernels)) + ma = reduce(lambda a,b: max(a, max(b)), (x.active_dims for x in kernels), 0) + input_dim = np.r_[0:ma+1] super(CombinationKernel, self).__init__(input_dim, name) self.add_parameters(*kernels) From 7adf5217f20f4de275003c57cd02fa7710d3d468 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 09:07:56 +0000 Subject: [PATCH 043/116] grad dict is property of self --- GPy/core/gp.py | 6 +++--- GPy/models/gplvm.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 2cff4341..23623214 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -67,9 +67,9 @@ class GP(Model): self.add_parameter(self.likelihood) def parameters_changed(self): - self.posterior, self._log_marginal_likelihood, grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y, Y_metadata=self.Y_metadata) - self.likelihood.update_gradients(np.diag(grad_dict['dL_dK'])) - self.kern.update_gradients_full(grad_dict['dL_dK'], self.X) + self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y, Y_metadata=self.Y_metadata) + self.likelihood.update_gradients(np.diag(self.grad_dict['dL_dK'])) + self.kern.update_gradients_full(self.grad_dict['dL_dK'], self.X) def log_likelihood(self): return self._log_marginal_likelihood diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index ba270dad..5f7e3265 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -41,7 +41,7 @@ class GPLVM(GP): def parameters_changed(self): super(GPLVM, self).parameters_changed() - self.X.gradient = self.kern.gradients_X(self.dL_dK, self.X, None) + self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None) def _getstate(self): return GP._getstate(self) From ecccf0cbbf68dab71bda1c8b816d359531f1f24e Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 09:09:13 +0000 Subject: [PATCH 044/116] add kernel has its own gradients update --- GPy/kern/_src/add.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 3f00b22d..fdebdfac 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -46,9 +46,6 @@ class Add(CombinationKernel): def update_gradients_diag(self, dL_dK, X): [p.update_gradients_diag(dL_dK, X) for p in self.parts] - def update_gradients_diag(self, dL_dKdiag, X): - [p.update_gradients_diag(dL_dKdiag, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)] - def gradients_X(self, dL_dK, X, X2=None): """Compute the gradient of the objective function with respect to X. From 5fb0acbdb44d6bdf43716d99b338b2c50006284d Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 09:38:12 +0000 Subject: [PATCH 045/116] Independent outputs kernel --- GPy/kern/_src/independent_outputs.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index c6860d3c..5588fdb2 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -41,7 +41,8 @@ class IndependentOutputs(Kern): """ def __init__(self, active_dim, kern, name='independ'): - super(IndependentOutputs, self).__init__(np.hstack((kern.active_dims,np.r_[active_dim])), name) + assert isinstance(active_dim, int), "IndependentOutputs kernel is only defined with one input dimension being the indeces" + super(IndependentOutputs, self).__init__(np.r_[0:max(max(kern.active_dims)+1, active_dim+1)], name) self.index_dim = active_dim self.kern = kern self.add_parameters(self.kern) From b87e25d031804237bc71d4bc5cfc07093138a6cd Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 09:57:59 +0000 Subject: [PATCH 046/116] ind ops --- GPy/kern/_src/independent_outputs.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 5588fdb2..73a8c585 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -4,6 +4,7 @@ from kern import Kern import numpy as np +import itertools def index_to_slices(index): """ @@ -33,17 +34,17 @@ def index_to_slices(index): class IndependentOutputs(Kern): """ - A kernel which can reopresent several independent functions. + A kernel which can represent several independent functions. this kernel 'switches off' parts of the matrix where the output indexes are different. The index of the functions is given by the last column in the input X the rest of the columns of X are passed to the underlying kernel for computation (in blocks). """ - def __init__(self, active_dim, kern, name='independ'): - assert isinstance(active_dim, int), "IndependentOutputs kernel is only defined with one input dimension being the indeces" - super(IndependentOutputs, self).__init__(np.r_[0:max(max(kern.active_dims)+1, active_dim+1)], name) - self.index_dim = active_dim + def __init__(self, index_dim, kern, name='independ'): + assert isinstance(index_dim, int), "IndependentOutputs kernel is only defined with one input dimension being the indeces" + super(IndependentOutputs, self).__init__(np.r_[0:max(max(kern.active_dims)+1, index_dim+1)], name) + self.index_dim = index_dim self.kern = kern self.add_parameters(self.kern) @@ -51,7 +52,7 @@ class IndependentOutputs(Kern): slices = index_to_slices(X[:,self.index_dim]) if X2 is None: target = np.zeros((X.shape[0], X.shape[0])) - [[np.copyto(target[s,s], self.kern.K(X[s,:], None)) for s in slices_i] for slices_i in slices] + [[np.copyto(target[s,ss], self.kern.K(X[s,:], X[ss,:])) for s,ss in itertools.product(slices_i, slices_i)] for slices_i in slices] else: slices2 = index_to_slices(X2[:,self.index_dim]) target = np.zeros((X.shape[0], X2.shape[0])) @@ -68,7 +69,7 @@ class IndependentOutputs(Kern): target = np.zeros(self.kern.size) def collate_grads(dL, X, X2): self.kern.update_gradients_full(dL,X,X2) - target += self.kern.gradient + target[:] += self.kern.gradient slices = index_to_slices(X[:,self.index_dim]) if X2 is None: From 4d00b9db039e97fd61c5951b305ea3b8e8ce3b87 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 10:23:07 +0000 Subject: [PATCH 047/116] import not relative in tests --- GPy/kern/_src/independent_outputs.py | 14 +++++++------- GPy/testing/likelihood_tests.py | 6 +++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 73a8c585..0cbd5be4 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -73,7 +73,7 @@ class IndependentOutputs(Kern): slices = index_to_slices(X[:,self.index_dim]) if X2 is None: - [[collate_grads(dL_dK[s,s], X[s], None) for s in slices_i] for slices_i in slices] + [[collate_grads(dL_dK[s,ss], X[s], X[ss]) for s,ss in itertools.product(slices_i, slices_i)] for slices_i in slices] else: slices2 = index_to_slices(X2[:,self.index_dim]) [[[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] @@ -83,10 +83,10 @@ class IndependentOutputs(Kern): target = np.zeros_like(X) slices = index_to_slices(X[:,self.index_dim]) if X2 is None: - [[np.copyto(target[s,:-1], self.kern.gradients_X(dL_dK[s,s],X[s],None)) for s in slices_i] for slices_i in slices] + [[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X(dL_dK[s,s],X[s],X[ss])) for s, ss in product(slices_i, slices_i)] for slices_i in slices] else: - X2,slices2 = X2[:,:-1],index_to_slices(X2[:,-1]) - [[[np.copyto(target[s,:-1], self.kern.gradients_X(dL_dK[s,s2], X[s], X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] + X2,slices2 = X2[:,:self.index_dim],index_to_slices(X2[:,-1]) + [[[np.copyto(target[s,:self.index_dim], self.kern.gradients_X(dL_dK[s,s2], X[s], X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] return target def gradients_X_diag(self, dL_dKdiag, X): @@ -95,12 +95,12 @@ class IndependentOutputs(Kern): [[np.copyto(target[s,:-1], self.kern.gradients_X_diag(dL_dKdiag[s],X[s])) for s in slices_i] for slices_i in slices] return target - def update_gradients_diag(self,dL_dKdiag,X,target): + def update_gradients_diag(self, dL_dKdiag, X): target = np.zeros(self.kern.size) def collate_grads(dL, X): self.kern.update_gradients_diag(dL,X) - self.target += self.kern.gradient - X,slices = X[:,:-1],index_to_slices(X[:,-1]) + target[:] += self.kern.gradient + slices = index_to_slices(X[:,self.index_dim]) [[collate_grads(dL_dKdiag[s], X[s,:]) for s in slices_i] for slices_i in slices] self.kern.gradient = target diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index c71842d8..d55b0190 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -1,11 +1,11 @@ import numpy as np import unittest import GPy -from ..models import GradientChecker +from GPy.models import GradientChecker import functools import inspect -from ..likelihoods import link_functions -from ..core.parameterization import Param +from GPy.likelihoods import link_functions +from GPy.core.parameterization import Param from functools import partial #np.random.seed(300) #np.random.seed(7) From 050bc94a309dce45c197ac453698887f89cedd76 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Thu, 13 Mar 2014 10:26:50 +0000 Subject: [PATCH 048/116] temporal fix --- GPy/util/multioutput.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/GPy/util/multioutput.py b/GPy/util/multioutput.py index bec0e490..7bcbaddc 100644 --- a/GPy/util/multioutput.py +++ b/GPy/util/multioutput.py @@ -55,7 +55,8 @@ def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='X'): warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") #K = kernel.prod(GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa,name='B'),tensor=True,name=name) - K = kernel.prod(GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa,name='B'),name=name) + #K = kernel.prod(GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa,name='B') )#,name=name) + K = kernel * GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa) K['.*variance'] = 1. K['.*variance'].fix() return K From e9c96632ba9857db0e428c34b6188f5573c205b6 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 11:01:48 +0000 Subject: [PATCH 049/116] product kernel and combination kernel updates --- GPy/kern/_src/add.py | 1 - GPy/kern/_src/kern.py | 16 +++--- GPy/kern/_src/kernel_slice_operations.py | 68 ++++++++++++------------ GPy/kern/_src/prod.py | 27 +++++----- 4 files changed, 58 insertions(+), 54 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index fdebdfac..1e386c01 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -3,7 +3,6 @@ import numpy as np import itertools -from ...core.parameterization import Parameterized from ...util.caching import Cache_this from kern import CombinationKernel diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 0aa414ca..014c4659 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -156,7 +156,7 @@ class Kern(Parameterized): other.active_dims += self.input_dim return self.prod(other) - def prod(self, other, name=None): + def prod(self, other, name='prod'): """ Multiply two kernels (either on the same space, or on the tensor product of the input space). @@ -169,12 +169,12 @@ class Kern(Parameterized): """ assert isinstance(other, Kern), "only kernels can be added to kernels..." from prod import Prod - kernels = [] - if isinstance(self, Prod): kernels.extend(self._parameters_) - else: kernels.append(self) - if isinstance(other, Prod): kernels.extend(other._parameters_) - else: kernels.append(other) - return Prod(self, other, name) + #kernels = [] + #if isinstance(self, Prod): kernels.extend(self._parameters_) + #else: kernels.append(self) + #if isinstance(other, Prod): kernels.extend(other._parameters_) + #else: kernels.append(other) + return Prod([self, other], name) def _getstate(self): """ @@ -195,8 +195,10 @@ class Kern(Parameterized): class CombinationKernel(Kern): def __init__(self, kernels, name): assert all([isinstance(k, Kern) for k in kernels]) + # make sure the active dimensions of all underlying kernels are covered: ma = reduce(lambda a,b: max(a, max(b)), (x.active_dims for x in kernels), 0) input_dim = np.r_[0:ma+1] + # initialize the kernel with the full input_dim super(CombinationKernel, self).__init__(input_dim, name) self.add_parameters(*kernels) diff --git a/GPy/kern/_src/kernel_slice_operations.py b/GPy/kern/_src/kernel_slice_operations.py index c1774a35..ff33cc24 100644 --- a/GPy/kern/_src/kernel_slice_operations.py +++ b/GPy/kern/_src/kernel_slice_operations.py @@ -9,17 +9,17 @@ class KernCallsViaSlicerMeta(ParametersChangedMeta): def __call__(self, *args, **kw): instance = super(ParametersChangedMeta, self).__call__(*args, **kw) instance.K = _slice_wrapper(instance, instance.K) - instance.Kdiag = _slice_wrapper(instance, instance.Kdiag, True) - instance.update_gradients_full = _slice_wrapper(instance, instance.update_gradients_full, False, True) - instance.update_gradients_diag = _slice_wrapper(instance, instance.update_gradients_diag, True, True) - instance.gradients_X = _slice_wrapper(instance, instance.gradients_X, False, True) - instance.gradients_X_diag = _slice_wrapper(instance, instance.gradients_X_diag, True, True) - instance.psi0 = _slice_wrapper(instance, instance.psi0, False, False) - instance.psi1 = _slice_wrapper(instance, instance.psi1, False, False) - instance.psi2 = _slice_wrapper(instance, instance.psi2, False, False) - instance.update_gradients_expectations = _slice_wrapper(instance, instance.update_gradients_expectations, psi_stat=True) - instance.gradients_Z_expectations = _slice_wrapper(instance, instance.gradients_Z_expectations, psi_stat_Z=True) - instance.gradients_qX_expectations = _slice_wrapper(instance, instance.gradients_qX_expectations, psi_stat=True) + instance.Kdiag = _slice_wrapper(instance, instance.Kdiag, diag=True) + instance.update_gradients_full = _slice_wrapper(instance, instance.update_gradients_full, diag=False, derivative=True) + instance.update_gradients_diag = _slice_wrapper(instance, instance.update_gradients_diag, diag=True, derivative=True) + instance.gradients_X = _slice_wrapper(instance, instance.gradients_X, diag=False, derivative=True) + instance.gradients_X_diag = _slice_wrapper(instance, instance.gradients_X_diag, diag=True, derivative=True) + instance.psi0 = _slice_wrapper(instance, instance.psi0, diag=False, derivative=False) + instance.psi1 = _slice_wrapper(instance, instance.psi1, diag=False, derivative=False) + instance.psi2 = _slice_wrapper(instance, instance.psi2, diag=False, derivative=False) + instance.update_gradients_expectations = _slice_wrapper(instance, instance.update_gradients_expectations, derivative=True, psi_stat=True) + instance.gradients_Z_expectations = _slice_wrapper(instance, instance.gradients_Z_expectations, derivative=True, psi_stat_Z=True) + instance.gradients_qX_expectations = _slice_wrapper(instance, instance.gradients_qX_expectations, derivative=True, psi_stat=True) instance.parameters_changed() return instance @@ -44,7 +44,29 @@ def _slice_wrapper(kern, operation, diag=False, derivative=False, psi_stat=False finally: kern._sliced_X -= 1 return ret - else: + elif psi_stat: + def x_slice_wrapper(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + Z, variational_posterior = kern._slice_X(Z) if not kern._sliced_X else Z, kern._slice_X(variational_posterior) if not kern._sliced_X else variational_posterior + kern._sliced_X += 1 + try: + ret = operation(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior) + except: + raise + finally: + kern._sliced_X -= 1 + return ret + elif psi_stat_Z: + def x_slice_wrapper(dL_dpsi1, dL_dpsi2, Z, variational_posterior): + Z, variational_posterior = kern._slice_X(Z) if not kern._sliced_X else Z, kern._slice_X(variational_posterior) if not kern._sliced_X else variational_posterior + kern._sliced_X += 1 + try: + ret = operation(dL_dpsi1, dL_dpsi2, Z, variational_posterior) + except: + raise + finally: + kern._sliced_X -= 1 + return ret + else: def x_slice_wrapper(dL_dK, X, X2=None): X, X2 = kern._slice_X(X) if not kern._sliced_X else X, kern._slice_X(X2) if X2 is not None and not kern._sliced_X else X2 kern._sliced_X += 1 @@ -55,28 +77,6 @@ def _slice_wrapper(kern, operation, diag=False, derivative=False, psi_stat=False finally: kern._sliced_X -= 1 return ret - elif psi_stat: - def x_slice_wrapper(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - Z, variational_posterior = kern._slice_X(Z) if not kern._sliced_X else Z, kern._slice_X(variational_posterior) if not kern._sliced_X else variational_posterior - kern._sliced_X += 1 - try: - ret = operation(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior) - except: - raise - finally: - kern._sliced_X -= 1 - return ret - elif psi_stat_Z: - def x_slice_wrapper(dL_dpsi1, dL_dpsi2, Z, variational_posterior): - Z, variational_posterior = kern._slice_X(Z) if not kern._sliced_X else Z, kern._slice_X(variational_posterior) if not kern._sliced_X else variational_posterior - kern._sliced_X += 1 - try: - ret = operation(dL_dpsi1, dL_dpsi2, Z, variational_posterior) - except: - raise - finally: - kern._sliced_X -= 1 - return ret else: if diag: def x_slice_wrapper(X, *args, **kw): diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index bd664681..f9c36023 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -18,6 +18,7 @@ class Prod(CombinationKernel): """ def __init__(self, kernels, name='prod'): + assert len(kernels) == 2, 'only implemented for two kernels as of yet' super(Prod, self).__init__(kernels, name) @Cache_this(limit=2, force_kwargs=['which_parts']) @@ -37,26 +38,28 @@ class Prod(CombinationKernel): which_parts = self.parts return reduce(np.multiply, (p.Kdiag(X) for p in which_parts)) - def update_gradients_full(self, dL_dK, X): + def update_gradients_full(self, dL_dK, X, X2=None): for k1,k2 in itertools.combinations(self.parts, 2): - k1._sliced_X = k1._sliced_X2 = k2._sliced_X = k2._sliced_X2 = True - k1.update_gradients_full(dL_dK*k2.K(X, X)) - self.k2.update_gradients_full(dL_dK*self.k1.K(X[:,self.slice1]), X[:,self.slice2]) + k1.update_gradients_full(dL_dK*k2.K(X, X2), X, X2) + k2.update_gradients_full(dL_dK*k1.K(X, X2), X, X2) + + def update_gradients_diag(self, dL_dKdiag, X): + for k1,k2 in itertools.combinations(self.parts, 2): + k1.update_gradients_diag(dL_dKdiag*k2.Kdiag(X), X) + k2.update_gradients_diag(dL_dKdiag*k1.Kdiag(X), X) def gradients_X(self, dL_dK, X, X2=None): target = np.zeros(X.shape) - if X2 is None: - target[:,self.slice1] += self.k1.gradients_X(dL_dK*self.k2.K(X[:,self.slice2]), X[:,self.slice1], None) - target[:,self.slice2] += self.k2.gradients_X(dL_dK*self.k1.K(X[:,self.slice1]), X[:,self.slice2], None) - else: - target[:,self.slice1] += self.k1.gradients_X(dL_dK*self.k2.K(X[:,self.slice2], X2[:,self.slice2]), X[:,self.slice1], X2[:,self.slice1]) - target[:,self.slice2] += self.k2.gradients_X(dL_dK*self.k1.K(X[:,self.slice1], X2[:,self.slice1]), X[:,self.slice2], X2[:,self.slice2]) + for k1,k2 in itertools.combinations(self.parts, 2): + target[:,k1.active_dims] += k1.gradients_X(dL_dK*k2.K(X, X2), X, X2) + target[:,k2.active_dims] += k2.gradients_X(dL_dK*k1.K(X, X2), X, X2) return target def gradients_X_diag(self, dL_dKdiag, X): target = np.zeros(X.shape) - target[:,self.slice1] = self.k1.gradients_X(dL_dKdiag*self.k2.Kdiag(X[:,self.slice2]), X[:,self.slice1]) - target[:,self.slice2] += self.k2.gradients_X(dL_dKdiag*self.k1.Kdiag(X[:,self.slice1]), X[:,self.slice2]) + for k1,k2 in itertools.combinations(self.parts, 2): + target[:,k1.active_dims] += k1.gradients_X(dL_dKdiag*k2.Kdiag(X), X) + target[:,k2.active_dims] += k2.gradients_X(dL_dKdiag*k1.Kdiag(X), X) return target From b95cc90ffb3f0e20c345580bfed4b7bb04ec2c38 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 11:51:54 +0000 Subject: [PATCH 050/116] object without args --- GPy/core/parameterization/param.py | 28 ++--- GPy/core/parameterization/parameter_core.py | 112 ++++++++++---------- 2 files changed, 70 insertions(+), 70 deletions(-) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 8eb10608..8cad2d29 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -94,15 +94,15 @@ class Param(OptimizationHandlable, ObservableArray): @property def _param_array_(self): return self - + @property def gradient(self): return self._gradient_array_[self._current_slice_] - + @gradient.setter def gradient(self, val): self.gradient[:] = val - + #=========================================================================== # Pickling operations #=========================================================================== @@ -135,7 +135,7 @@ class Param(OptimizationHandlable, ObservableArray): self._parent_index_ = state.pop() self._parent_ = state.pop() self.name = state.pop() - + def copy(self, *args): constr = self.constraints.copy() priors = self.priors.copy() @@ -151,13 +151,13 @@ class Param(OptimizationHandlable, ObservableArray): # if trigger_parent: min_priority = None # else: min_priority = -numpy.inf # self.notify_observers(None, min_priority) -# +# # def _get_params(self): # return self.flat -# +# # def _collect_gradient(self, target): # target += self.gradient.flat -# +# # def _set_gradient(self, g): # self.gradient = g.reshape(self._realshape_) @@ -173,10 +173,10 @@ class Param(OptimizationHandlable, ObservableArray): try: new_arr._current_slice_ = s; new_arr._original_ = self.base is new_arr.base except AttributeError: pass # returning 0d array or float, double etc return new_arr - + def __setitem__(self, s, val): super(Param, self).__setitem__(s, val) - + #=========================================================================== # Index Operations: #=========================================================================== @@ -195,7 +195,7 @@ class Param(OptimizationHandlable, ObservableArray): a = self._realshape_[i] + a internal_offset += a * extended_realshape[i] return internal_offset - + def _raveled_index(self, slice_index=None): # return an index array on the raveled array, which is formed by the current_slice # of this object @@ -203,7 +203,7 @@ class Param(OptimizationHandlable, ObservableArray): ind = self._indices(slice_index) if ind.ndim < 2: ind = ind[:, None] return numpy.asarray(numpy.apply_along_axis(lambda x: numpy.sum(extended_realshape * x), 1, ind), dtype=int) - + def _expand_index(self, slice_index=None): # this calculates the full indexing arrays from the slicing objects given by get_item for _real..._ attributes # it basically translates slices to their respective index arrays and turns negative indices around @@ -245,7 +245,7 @@ class Param(OptimizationHandlable, ObservableArray): #=========================================================================== @property def _description_str(self): - if self.size <= 1: + if self.size <= 1: return [str(self.view(numpy.ndarray)[0])] else: return [str(self.shape)] def parameter_names(self, add_self=False, adjust_for_printing=False): @@ -356,7 +356,7 @@ class ParamConcatenation(object): self._param_sizes = [p.size for p in self.params] startstops = numpy.cumsum([0] + self._param_sizes) self._param_slices_ = [slice(start, stop) for start,stop in zip(startstops, startstops[1:])] - + parents = dict() for p in self.params: if p.has_parent(): @@ -396,7 +396,7 @@ class ParamConcatenation(object): def update_all_params(self): for par in self.parents: par.notify_observers(-numpy.inf) - + def constrain(self, constraint, warning=True): [param.constrain(constraint, trigger_parent=False) for param in self.params] self.update_all_params() diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 2a61c970..04257b9f 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -1,7 +1,7 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) """ -Core module for parameterization. +Core module for parameterization. This module implements all parameterization techniques, split up in modular bits. HierarchyError: @@ -41,7 +41,7 @@ class Observable(object): """ _updated = True def __init__(self, *args, **kwargs): - super(Observable, self).__init__(*args, **kwargs) + super(Observable, self).__init__() self._observer_callables_ = [] def add_observer(self, observer, callble, priority=0): @@ -61,7 +61,7 @@ class Observable(object): def notify_observers(self, which=None, min_priority=None): """ - Notifies all observers. Which is the element, which kicked off this + Notifies all observers. Which is the element, which kicked off this notification loop. NOTE: notifies only observers with priority p > min_priority! @@ -91,11 +91,11 @@ class Observable(object): class Pickleable(object): """ - Make an object pickleable (See python doc 'pickling'). + Make an object pickleable (See python doc 'pickling'). This class allows for pickling support by Memento pattern. _getstate returns a memento of the class, which gets pickled. - _setstate() (re-)sets the state of the class to the memento + _setstate() (re-)sets the state of the class to the memento """ #=========================================================================== # Pickling operations @@ -112,14 +112,14 @@ class Pickleable(object): with open(f, 'w') as f: cPickle.dump(self, f, protocol) else: - cPickle.dump(self, f, protocol) + cPickle.dump(self, f, protocol) def __getstate__(self): if self._has_get_set_state(): return self._getstate() return self.__dict__ def __setstate__(self, state): if self._has_get_set_state(): - self._setstate(state) + self._setstate(state) # TODO: maybe parameters_changed() here? return self.__dict__ = state @@ -160,7 +160,7 @@ class Parentable(object): _parent_ = None _parent_index_ = None def __init__(self, *args, **kwargs): - super(Parentable, self).__init__(*args, **kwargs) + super(Parentable, self).__init__() def has_parent(self): """ @@ -201,18 +201,18 @@ class Gradcheckable(Parentable): Adds the functionality for an object to be gradcheckable. It is just a thin wrapper of a call to the highest parent for now. TODO: Can be done better, by only changing parameters of the current parameter handle, - such that object hierarchy only has to change for those. + such that object hierarchy only has to change for those. """ def __init__(self, *a, **kw): super(Gradcheckable, self).__init__(*a, **kw) def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): """ - Check the gradient of this parameter with respect to the highest parent's + Check the gradient of this parameter with respect to the highest parent's objective function. This is a three point estimate of the gradient, wiggling at the parameters with a stepsize step. - The check passes if either the ratio or the difference between numerical and + The check passes if either the ratio or the difference between numerical and analytical gradient is smaller then tolerance. :param bool verbose: whether each parameter shall be checked individually. @@ -275,22 +275,22 @@ class Indexable(object): The raveled index of an object is the index for its parameters in a flattened int array. """ def __init__(self, *a, **kw): - super(Indexable, self).__init__(*a, **kw) - + super(Indexable, self).__init__() + def _raveled_index(self): """ Flattened array of ints, specifying the index of this object. This has to account for shaped parameters! """ raise NotImplementedError, "Need to be able to get the raveled Index" - + def _internal_offset(self): """ - The offset for this parameter inside its parent. + The offset for this parameter inside its parent. This has to account for shaped parameters! """ return 0 - + def _offset_for(self, param): """ Return the offset of the param inside this parameterized object. @@ -298,15 +298,15 @@ class Indexable(object): basically just sums up the parameter sizes which come before param. """ raise NotImplementedError, "shouldnt happen, offset required from non parameterization object?" - + def _raveled_index_for(self, param): """ get the raveled index for a param that is an int array, containing the indexes for the flattened param inside this parameterized logic. """ - raise NotImplementedError, "shouldnt happen, raveld index transformation required from non parameterization object?" - + raise NotImplementedError, "shouldnt happen, raveld index transformation required from non parameterization object?" + class Constrainable(Nameable, Indexable): """ @@ -315,7 +315,7 @@ class Constrainable(Nameable, Indexable): Adding a constraint to a Parameter means to tell the highest parent that the constraint was added and making sure that all parameters covered by this object are indeed conforming to the constraint. - + :func:`constrain()` and :func:`unconstrain()` are main methods here """ def __init__(self, name, default_constraint=None, *a, **kw): @@ -326,7 +326,7 @@ class Constrainable(Nameable, Indexable): self.priors = ParameterIndexOperations() if self._default_constraint_ is not None: self.constrain(self._default_constraint_) - + def _disconnect_parent(self, constr=None, *args, **kw): """ From Parentable: @@ -340,7 +340,7 @@ class Constrainable(Nameable, Indexable): self._parent_index_ = None self._connect_fixes() self._notify_parent_change() - + #=========================================================================== # Fixing Parameters: #=========================================================================== @@ -356,20 +356,20 @@ class Constrainable(Nameable, Indexable): rav_i = self._highest_parent_._raveled_index_for(self) self._highest_parent_._set_fixed(rav_i) fix = constrain_fixed - + def unconstrain_fixed(self): """ This parameter will no longer be fixed. """ unconstrained = self.unconstrain(__fixed__) - self._highest_parent_._set_unfixed(unconstrained) + self._highest_parent_._set_unfixed(unconstrained) unfix = unconstrain_fixed - + def _set_fixed(self, index): if not self._has_fixes(): self._fixes_ = np.ones(self.size, dtype=bool) self._fixes_[index] = FIXED if np.all(self._fixes_): self._fixes_ = None # ==UNFIXED - + def _set_unfixed(self, index): if not self._has_fixes(): self._fixes_ = np.ones(self.size, dtype=bool) # rav_i = self._raveled_index_for(param)[index] @@ -383,7 +383,7 @@ class Constrainable(Nameable, Indexable): self._fixes_[fixed_indices] = FIXED else: self._fixes_ = None - + def _has_fixes(self): return hasattr(self, "_fixes_") and self._fixes_ is not None @@ -398,21 +398,21 @@ class Constrainable(Nameable, Indexable): """ repriorized = self.unset_priors() self._add_to_index_operations(self.priors, repriorized, prior, warning) - + def unset_priors(self, *priors): """ Un-set all priors given from this parameter handle. - + """ return self._remove_from_index_operations(self.priors, priors) - + def log_prior(self): """evaluate the prior""" if self.priors.size > 0: x = self._get_params() return reduce(lambda a, b: a + b, [p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()], 0) return 0. - + def _log_prior_gradients(self): """evaluate the gradients of the priors""" if self.priors.size > 0: @@ -421,7 +421,7 @@ class Constrainable(Nameable, Indexable): [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()] return ret return 0. - + #=========================================================================== # Constrain operations -> done #=========================================================================== @@ -448,7 +448,7 @@ class Constrainable(Nameable, Indexable): transformats of this parameter object. """ return self._remove_from_index_operations(self.constraints, transforms) - + def constrain_positive(self, warning=True, trigger_parent=True): """ :param warning: print a warning if re-constraining parameters. @@ -493,7 +493,7 @@ class Constrainable(Nameable, Indexable): Remove (lower, upper) bounded constrain from this parameter/ """ self.unconstrain(Logistic(lower, upper)) - + def _parent_changed(self, parent): """ From Parentable: @@ -522,7 +522,7 @@ class Constrainable(Nameable, Indexable): def _remove_from_index_operations(self, which, what): """ Helper preventing copy code. - Remove given what (transform prior etc) from which param index ops. + Remove given what (transform prior etc) from which param index ops. """ if len(what) == 0: transforms = which.properties() @@ -532,7 +532,7 @@ class Constrainable(Nameable, Indexable): removed = np.union1d(removed, unconstrained) if t is __fixed__: self._highest_parent_._set_unfixed(unconstrained) - + return removed class OptimizationHandlable(Constrainable, Observable): @@ -543,13 +543,13 @@ class OptimizationHandlable(Constrainable, Observable): """ def __init__(self, name, default_constraint=None, *a, **kw): super(OptimizationHandlable, self).__init__(name, default_constraint=default_constraint, *a, **kw) - + def transform(self): [np.put(self._param_array_, ind, c.finv(self._param_array_[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] - + def untransform(self): [np.put(self._param_array_, ind, c.f(self._param_array_[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] - + def _get_params_transformed(self): # transformed parameters (apply transformation rules) p = self._param_array_.copy() @@ -565,23 +565,23 @@ class OptimizationHandlable(Constrainable, Observable): else: self._param_array_[:] = p self.untransform() self._trigger_params_changed() - + def _trigger_params_changed(self, trigger_parent=True): [p._trigger_params_changed(trigger_parent=False) for p in self._parameters_] if trigger_parent: min_priority = None else: min_priority = -np.inf self.notify_observers(None, min_priority) - + def _size_transformed(self): return self.size - self.constraints[__fixed__].size -# +# # def _untransform_params(self, p): # # inverse apply transformations for parameters # #p = p.copy() # if self._has_fixes(): tmp = self._get_params(); tmp[self._fixes_] = p; p = tmp; del tmp # [np.put(p, ind, c.f(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] # return p -# +# # def _get_params(self): # """ # get all parameters @@ -592,7 +592,7 @@ class OptimizationHandlable(Constrainable, Observable): # return p # [np.put(p, ind, par._get_params()) for ind, par in itertools.izip(self._param)] # return p - + # def _set_params(self, params, trigger_parent=True): # self._param_array_.flat = params # if trigger_parent: min_priority = None @@ -600,14 +600,14 @@ class OptimizationHandlable(Constrainable, Observable): # self.notify_observers(None, min_priority) # don't overwrite this anymore! #raise NotImplementedError, "Abstract superclass: This needs to be implemented in Param and Parameterizable" - + #=========================================================================== # Optimization handles: #=========================================================================== def _get_param_names(self): n = np.array([p.hierarchy_name() + '[' + str(i) + ']' for p in self.flattened_parameters for i in p._indices()]) return n - + def _get_param_names_transformed(self): n = self._get_param_names() if self._has_fixes(): @@ -621,7 +621,7 @@ class OptimizationHandlable(Constrainable, Observable): """ Randomize the model. Make this draw from the prior if one exists, else draw from given random generator - + :param rand_gen: numpy random number generator which takes args and kwargs :param flaot loc: loc parameter for random number generator :param float scale: scale parameter for random number generator @@ -663,7 +663,7 @@ class Parameterizable(OptimizationHandlable): def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True): """ - Get the names of all parameters of this model. + Get the names of all parameters of this model. :param bool add_self: whether to add the own name in front of names :param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names @@ -712,7 +712,7 @@ class Parameterizable(OptimizationHandlable): #========================================================================= @property def gradient(self): - return self._gradient_array_ + return self._gradient_array_ @gradient.setter def gradient(self, val): @@ -821,8 +821,8 @@ class Parameterizable(OptimizationHandlable): # connect parameterlist to this parameterized object # This just sets up the right connection for the params objects # to be used as parameters - # it also sets the constraints for each parameter to the constraints - # of their respective parents + # it also sets the constraints for each parameter to the constraints + # of their respective parents if not hasattr(self, "_parameters_") or len(self._parameters_) < 1: # no parameters for this class return @@ -837,7 +837,7 @@ class Parameterizable(OptimizationHandlable): pslice = slice(old_size, old_size+p.size) # first connect all children - p._propagate_param_grad(self._param_array_[pslice], self._gradient_array_[pslice]) + p._propagate_param_grad(self._param_array_[pslice], self._gradient_array_[pslice]) # then connect children to self self._param_array_[pslice] = p._param_array_.ravel()#, requirements=['C', 'W']).ravel(order='C') self._gradient_array_[pslice] = p._gradient_array_.ravel()#, requirements=['C', 'W']).ravel(order='C') @@ -879,7 +879,7 @@ class Parameterizable(OptimizationHandlable): dc[k] = copy.deepcopy(v) if k == '_parameters_': params = [p.copy() for p in v] - + dc['_parent_'] = None dc['_parent_index_'] = None dc['_observer_callables_'] = [] @@ -890,12 +890,12 @@ class Parameterizable(OptimizationHandlable): s = self.__new__(self.__class__) s.__dict__ = dc - + for p in params: s.add_parameter(p, _ignore_added_names=True) - + return s - + #=========================================================================== # From being parentable, we have to define the parent_change notification #=========================================================================== From 90f8944361e98083dabfc2051e6dde061f32d95a Mon Sep 17 00:00:00 2001 From: Ricardo Date: Thu, 13 Mar 2014 11:51:59 +0000 Subject: [PATCH 051/116] Fix needed --- GPy/util/multioutput.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/GPy/util/multioutput.py b/GPy/util/multioutput.py index 7bcbaddc..79022a5f 100644 --- a/GPy/util/multioutput.py +++ b/GPy/util/multioutput.py @@ -54,9 +54,8 @@ def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='X'): kernel.input_dim = input_dim warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") - #K = kernel.prod(GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa,name='B'),tensor=True,name=name) - #K = kernel.prod(GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa,name='B') )#,name=name) - K = kernel * GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa) + K = kernel.prod(GPy.kern.Coregionalize([input_dim], num_outputs,W_rank,W,kappa,name='B'),name=name) + #K = kernel ** GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa, name= 'B') K['.*variance'] = 1. K['.*variance'].fix() return K From a18b54ed7381436067b29738a22454b62d50b53f Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 12:02:40 +0000 Subject: [PATCH 052/116] constrain notifies observers --- GPy/core/parameterization/parameter_core.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 04257b9f..001b98ed 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -308,7 +308,7 @@ class Indexable(object): raise NotImplementedError, "shouldnt happen, raveld index transformation required from non parameterization object?" -class Constrainable(Nameable, Indexable): +class Constrainable(Nameable, Indexable, Observable): """ Make an object constrainable with Priors and Transformations. TODO: Mappings!! @@ -352,9 +352,11 @@ class Constrainable(Nameable, Indexable): """ if value is not None: self[:] = value - self.constrain(__fixed__, warning=warning, trigger_parent=trigger_parent) + reconstrained = self.unconstrain() + self._add_to_index_operations(self.constraints, reconstrained, __fixed__, warning) rav_i = self._highest_parent_._raveled_index_for(self) self._highest_parent_._set_fixed(rav_i) + self.notify_observers(self, None if trigger_parent else -np.inf) fix = constrain_fixed def unconstrain_fixed(self): @@ -435,10 +437,10 @@ class Constrainable(Nameable, Indexable): Constrain the parameter to the given :py:class:`GPy.core.transformations.Transformation`. """ - if isinstance(transform, Transformation): - self._param_array_[:] = transform.initialize(self._param_array_) + self._param_array_[:] = transform.initialize(self._param_array_) reconstrained = self.unconstrain() self._add_to_index_operations(self.constraints, reconstrained, transform, warning) + self.notify_observers(self, None if trigger_parent else -np.inf) def unconstrain(self, *transforms): """ @@ -535,7 +537,7 @@ class Constrainable(Nameable, Indexable): return removed -class OptimizationHandlable(Constrainable, Observable): +class OptimizationHandlable(Constrainable): """ This enables optimization handles on an Object as done in GPy 0.4. @@ -568,9 +570,7 @@ class OptimizationHandlable(Constrainable, Observable): def _trigger_params_changed(self, trigger_parent=True): [p._trigger_params_changed(trigger_parent=False) for p in self._parameters_] - if trigger_parent: min_priority = None - else: min_priority = -np.inf - self.notify_observers(None, min_priority) + self.notify_observers(None, None if trigger_parent else -np.inf) def _size_transformed(self): return self.size - self.constraints[__fixed__].size From ccf5167b75832517a685a4187a04fb331f419440 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 12:13:00 +0000 Subject: [PATCH 053/116] chancges to where gradients are computed in laplace --- GPy/core/gp.py | 27 ++++++++------------------- GPy/likelihoods/student_t.py | 6 +++--- 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 185cc149..3552c37e 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -27,7 +27,7 @@ class GP(Model): """ - def __init__(self, X, Y, kernel, likelihood, inference_method=None, name='gp', **Y_metadata): + def __init__(self, X, Y, kernel, likelihood, inference_method=None, name='gp', Y_metadata=None): super(GP, self).__init__(name) assert X.ndim == 2 @@ -42,10 +42,7 @@ class GP(Model): assert Y.shape[0] == self.num_data _, self.output_dim = self.Y.shape - if Y_metadata is not None: - self.Y_metadata = Y_metadata - else: - self.Y_metadata = None + self.Y_metadata = Y_metadata or {} assert isinstance(kernel, kern.Kern) #assert self.input_dim == kernel.input_dim @@ -67,8 +64,8 @@ class GP(Model): self.add_parameter(self.likelihood) def parameters_changed(self): - self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y, **self.Y_metadata) - self.likelihood.update_gradients(np.diag(self.grad_dict['dL_dK']), **self.Y_metadata) + self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y, self.Y_metadata) + self.likelihood.update_gradients(self.grad_dict['dL_dthetaL']) self.kern.update_gradients_full(self.grad_dict['dL_dK'], self.X) def log_likelihood(self): @@ -99,7 +96,7 @@ class GP(Model): var = var.reshape(-1, 1) return mu, var - def predict(self, Xnew, full_cov=False, **likelihood_args): + def predict(self, Xnew, full_cov=False, Y_metadata=None): """ Predict the function(s) at the new point(s) Xnew. @@ -123,7 +120,7 @@ class GP(Model): mu, var = self._raw_predict(Xnew, full_cov=full_cov) # now push through likelihood - mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov, **likelihood_args) + mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov, Y_metadata) return mean, var, _025pm, _975pm def posterior_samples_f(self,X,size=10, full_cov=True): @@ -147,7 +144,7 @@ class GP(Model): return Ysim - def posterior_samples(self,X,size=10, full_cov=True,noise_model=None): + def posterior_samples(self,X,size=10, full_cov=True, Y_metadata=None): """ Samples the posterior GP at the points X. @@ -162,15 +159,7 @@ class GP(Model): :returns: Ysim: set of simulations, a Numpy array (N x samples). """ Ysim = self.posterior_samples_f(X, size, full_cov=full_cov) - if isinstance(self.likelihood, Gaussian): - noise_std = np.sqrt(self.likelihood._get_params()) - Ysim += np.random.normal(0,noise_std,Ysim.shape) - elif isinstance(self.likelihood, Gaussian_Mixed_Noise): - assert noise_model is not None, "A noise model must be specified." - noise_std = np.sqrt(self.likelihood._get_params()[noise_model]) - Ysim += np.random.normal(0,noise_std,Ysim.shape) - else: - Ysim = self.likelihood.noise_model.samples(Ysim) + Ysim = self.likelihood.noise_model.samples(Ysim, Y_metadata) return Ysim diff --git a/GPy/likelihoods/student_t.py b/GPy/likelihoods/student_t.py index ac93f204..50d91953 100644 --- a/GPy/likelihoods/student_t.py +++ b/GPy/likelihoods/student_t.py @@ -37,13 +37,13 @@ class StudentT(Likelihood): def parameters_changed(self): self.variance = (self.v / float(self.v - 2)) * self.sigma2 - def update_gradients(self, partial): + def update_gradients(self, grads): """ Pull out the gradients, be careful as the order must match the order in which the parameters are added """ - self.sigma2.gradient = partial[0] - self.v.gradient = partial[1] + self.sigma2.gradient = grads[0] + self.v.gradient = grads[1] def pdf_link(self, link_f, y, extra_data=None): """ From 365b8ae1e1ff9720b374babb1788023948e05778 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 12:14:02 +0000 Subject: [PATCH 054/116] more chancges to laplace --- GPy/inference/latent_function_inference/laplace.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index cf10c730..e6ca720a 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -56,11 +56,8 @@ class Laplace(object): #Compute hessian and other variables at mode log_marginal, woodbury_vector, woodbury_inv, dL_dK, dL_dthetaL = self.mode_computations(f_hat, Ki_fhat, K, Y, likelihood, kern, Y_metadata) - kern.update_gradients_full(dL_dK, X) - likelihood.update_gradients(dL_dthetaL) - self._previous_Ki_fhat = Ki_fhat.copy() - return Posterior(woodbury_vector=woodbury_vector, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK} + return Posterior(woodbury_vector=woodbury_vector, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None): """ From 8b2cff954e3dc59e50522cb3ec4af7413962695f Mon Sep 17 00:00:00 2001 From: Ricardo Date: Thu, 13 Mar 2014 12:22:20 +0000 Subject: [PATCH 055/116] coregionalization example --- GPy/examples/coreg_example.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 GPy/examples/coreg_example.py diff --git a/GPy/examples/coreg_example.py b/GPy/examples/coreg_example.py new file mode 100644 index 00000000..967758c6 --- /dev/null +++ b/GPy/examples/coreg_example.py @@ -0,0 +1,30 @@ +import numpy as np +import pylab as pb +import GPy +pb.ion() + +X1 = 100 * np.random.rand(100)[:,None] +X2 = 100 * np.random.rand(100)[:,None] +#X1.sort() +#X2.sort() + +Y1 = np.sin(X1/10.) + np.random.rand(100)[:,None] +Y2 = np.cos(X2/10.) + np.random.rand(100)[:,None] + + + + +Mlist = [GPy.kern.Matern32(1,lengthscale=20.,name="Mat")] +kern = GPy.util.multioutput.LCM(input_dim=1,num_outputs=12,kernels_list=Mlist,name='H') + + +m = GPy.models.GPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2], kernel=kern) +m.optimize() + +fig = pb.figure() +ax0 = fig.add_subplot(211) +ax1 = fig.add_subplot(212) +slices = GPy.util.multioutput.get_slices([Y1,Y2]) +m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],ax=ax0) +m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],ax=ax1) + From 1102387a7671ef00cc0c5ceb8a316c23779d9f4a Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 12:27:59 +0000 Subject: [PATCH 056/116] periodic kernel gradients and parameterized updates --- GPy/kern/_src/periodic.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/GPy/kern/_src/periodic.py b/GPy/kern/_src/periodic.py index 36ff3527..6b423a57 100644 --- a/GPy/kern/_src/periodic.py +++ b/GPy/kern/_src/periodic.py @@ -85,8 +85,9 @@ class PeriodicExponential(Periodic): self.b = [1] self.basis_alpha = np.ones((self.n_basis,)) - self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))[:,0] - self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[])) + self.basis_omega = (2*np.pi*np.arange(1,self.n_freq+1)/self.period).repeat(2) + self.basis_phi = np.zeros(self.n_freq * 2) + self.basis_phi[::2] = -np.pi/2 self.G = self.Gram_matrix() self.Gi = np.linalg.inv(self.G) @@ -100,7 +101,6 @@ class PeriodicExponential(Periodic): Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None] return(self.lengthscale/(2*self.variance) * Gint + 1./self.variance*np.dot(Flower,Flower.T)) - #@silence_errors def update_gradients_full(self, dL_dK, X, X2=None): """derivative of the covariance matrix with respect to the parameters (shape is N x num_inducing x num_params)""" if X2 is None: X2 = X @@ -194,8 +194,9 @@ class PeriodicMatern32(Periodic): self.b = [1,self.lengthscale**2/3] self.basis_alpha = np.ones((self.n_basis,)) - self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[])) - self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[])) + self.basis_omega = (2*np.pi*np.arange(1,self.n_freq+1)/self.period).repeat(2) + self.basis_phi = np.zeros(self.n_freq * 2) + self.basis_phi[::2] = -np.pi/2 self.G = self.Gram_matrix() self.Gi = np.linalg.inv(self.G) @@ -212,8 +213,8 @@ class PeriodicMatern32(Periodic): return(self.lengthscale**3/(12*np.sqrt(3)*self.variance) * Gint + 1./self.variance*np.dot(Flower,Flower.T) + self.lengthscale**2/(3.*self.variance)*np.dot(F1lower,F1lower.T)) - @silence_errors - def update_gradients_full(self,dL_dK,X,X2,target): + #@silence_errors + def update_gradients_full(self,dL_dK,X,X2): """derivative of the covariance matrix with respect to the parameters (shape is num_data x num_inducing x num_params)""" if X2 is None: X2 = X FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X) @@ -307,8 +308,9 @@ class PeriodicMatern52(Periodic): self.b = [9./8, 9*self.lengthscale**4/200., 3*self.lengthscale**2/5., 3*self.lengthscale**2/(5*8.), 3*self.lengthscale**2/(5*8.)] self.basis_alpha = np.ones((2*self.n_freq,)) - self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[])) - self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[])) + self.basis_omega = (2*np.pi*np.arange(1,self.n_freq+1)/self.period).repeat(2) + self.basis_phi = np.zeros(self.n_freq * 2) + self.basis_phi[::2] = -np.pi/2 self.G = self.Gram_matrix() self.Gi = np.linalg.inv(self.G) From 3d6a69e5f090c17ffe325ad9f072fe0382397a2b Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 12:28:56 +0000 Subject: [PATCH 057/116] we need to update all the tests: here discontinuous kernel testsee messing, mrd and bgplvm model tests not needed anymore --- GPy/testing/bgplvm_tests.py | 85 ------------------------------ GPy/testing/kernel_tests.py | 55 ++++++++++++++++--- GPy/testing/likelihood_tests.py | 4 +- GPy/testing/mrd_tests.py | 32 ----------- GPy/testing/parameterized_tests.py | 22 ++++---- GPy/testing/unit_tests.py | 28 +++++----- 6 files changed, 74 insertions(+), 152 deletions(-) delete mode 100644 GPy/testing/bgplvm_tests.py delete mode 100644 GPy/testing/mrd_tests.py diff --git a/GPy/testing/bgplvm_tests.py b/GPy/testing/bgplvm_tests.py deleted file mode 100644 index fd55d314..00000000 --- a/GPy/testing/bgplvm_tests.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2012, Nicolo Fusi -# Licensed under the BSD 3-clause license (see LICENSE.txt) - -import unittest -import numpy as np -import GPy -from ..models import BayesianGPLVM - -class BGPLVMTests(unittest.TestCase): - def test_bias_kern(self): - N, num_inducing, input_dim, D = 10, 3, 2, 4 - X = np.random.rand(N, input_dim) - k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) - K = k.K(X) - Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T - Y -= Y.mean(axis=0) - k = GPy.kern.bias(input_dim) + GPy.kern.White(input_dim, 0.00001) - m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.randomize() - self.assertTrue(m.checkgrad()) - - def test_linear_kern(self): - N, num_inducing, input_dim, D = 10, 3, 2, 4 - X = np.random.rand(N, input_dim) - k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) - K = k.K(X) - Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T - Y -= Y.mean(axis=0) - k = GPy.kern.Linear(input_dim) + GPy.kern.White(input_dim, 0.00001) - m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.randomize() - self.assertTrue(m.checkgrad()) - - def test_rbf_kern(self): - N, num_inducing, input_dim, D = 10, 3, 2, 4 - X = np.random.rand(N, input_dim) - k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) - K = k.K(X) - Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T - Y -= Y.mean(axis=0) - k = GPy.kern.RBF(input_dim) + GPy.kern.White(input_dim, 0.00001) - m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.randomize() - self.assertTrue(m.checkgrad()) - - def test_rbf_bias_kern(self): - N, num_inducing, input_dim, D = 10, 3, 2, 4 - X = np.random.rand(N, input_dim) - k = GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) - K = k.K(X) - Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T - Y -= Y.mean(axis=0) - k = GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) - m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.randomize() - self.assertTrue(m.checkgrad()) - - def test_rbf_line_kern(self): - N, num_inducing, input_dim, D = 10, 3, 2, 4 - X = np.random.rand(N, input_dim) - k = GPy.kern.RBF(input_dim) + GPy.kern.Linear(input_dim) + GPy.kern.White(input_dim, 0.00001) - K = k.K(X) - Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T - Y -= Y.mean(axis=0) - k = GPy.kern.RBF(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) - m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.randomize() - self.assertTrue(m.checkgrad()) - - def test_linear_bias_kern(self): - N, num_inducing, input_dim, D = 30, 5, 4, 30 - X = np.random.rand(N, input_dim) - k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) - K = k.K(X) - Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T - Y -= Y.mean(axis=0) - k = GPy.kern.Linear(input_dim) + GPy.kern.Bias(input_dim) + GPy.kern.White(input_dim, 0.00001) - m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.randomize() - self.assertTrue(m.checkgrad()) - - -if __name__ == "__main__": - print "Running unit tests, please be (very) patient..." - unittest.main() diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 2789d1de..657f5ac4 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -33,9 +33,10 @@ class Kern_check_model(GPy.core.Model): self.X2 = X2 self.dL_dK = dL_dK - def is_positive_definite(self): + def is_positive_semi_definite(self): v = np.linalg.eig(self.kernel.K(self.X))[0] - if any(v<-10*sys.float_info.epsilon): + if any(v.real<=-1e-10): + print v.real.min() return False else: return True @@ -89,7 +90,7 @@ class Kern_check_dKdiag_dX(Kern_check_dK_dX): return (np.diag(self.dL_dK)*self.kernel.Kdiag(self.X)).sum() def parameters_changed(self): - self.X.gradient = self.kernel.gradients_X_diag(self.dL_dK, self.X) + self.X.gradient = self.kernel.gradients_X_diag(self.dL_dK.diagonal(), self.X) @@ -119,7 +120,7 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb if verbose: print("Checking covariance function is positive definite.") - result = Kern_check_model(kern, X=X).is_positive_definite() + result = Kern_check_model(kern, X=X).is_positive_semi_definite() if result and verbose: print("Check passed.") if not result: @@ -214,18 +215,55 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb class KernelGradientTestsContinuous(unittest.TestCase): def setUp(self): - self.X = np.random.randn(100,2) - self.X2 = np.random.randn(110,2) + self.N, self.D = 100, 5 + self.X = np.random.randn(self.N,self.D) + self.X2 = np.random.randn(self.N+10,self.D) continuous_kerns = ['RBF', 'Linear'] self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns] def test_Matern32(self): - k = GPy.kern.Matern32(2) + k = GPy.kern.Matern32(self.D) + k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) def test_Matern52(self): - k = GPy.kern.Matern52(2) + k = GPy.kern.Matern52(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + + def test_RBF(self): + k = GPy.kern.RBF(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + + def test_Linear(self): + k = GPy.kern.Linear(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + +class KernelGradientTestsContinuous1D(unittest.TestCase): + def setUp(self): + self.N, self.D = 100, 1 + self.X = np.random.randn(self.N,self.D) + self.X2 = np.random.randn(self.N+10,self.D) + + continuous_kerns = ['RBF', 'Linear'] + self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns] + + def test_PeriodicExponential(self): + k = GPy.kern.PeriodicExponential(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + + def test_PeriodicMatern32(self): + k = GPy.kern.PeriodicMatern32(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + + def test_PeriodicMatern52(self): + k = GPy.kern.PeriodicMatern52(self.D) + k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) #TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize @@ -251,6 +289,7 @@ class KernelTestsMiscellaneous(unittest.TestCase): self.assertTrue(np.allclose(self.sumkern.K(self.X, which_parts=[self.linear, self.rbf]), self.linear.K(self.X)+self.rbf.K(self.X))) self.assertTrue(np.allclose(self.sumkern.K(self.X, which_parts=self.sumkern.parts[0]), self.rbf.K(self.X))) + if __name__ == "__main__": print "Running unit tests, please be (very) patient..." unittest.main() diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index c71842d8..ab26910e 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -541,7 +541,8 @@ class TestNoiseModels(object): #import ipdb; ipdb.set_trace() #NOTE this test appears to be stochastic for some likelihoods (student t?) # appears to all be working in test mode right now... - + if isinstance(model, GPy.likelihoods.StudentT): + import ipdb;ipdb.set_trace() assert m.checkgrad(step=step) ########### @@ -700,7 +701,6 @@ class LaplaceTests(unittest.TestCase): np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2) #Check marginals are the same with random m1.randomize() - import ipdb;ipdb.set_trace() m2[:] = m1[:] np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2) diff --git a/GPy/testing/mrd_tests.py b/GPy/testing/mrd_tests.py deleted file mode 100644 index 40fcb86a..00000000 --- a/GPy/testing/mrd_tests.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2013, Max Zwiessele -# Licensed under the BSD 3-clause license (see LICENSE.txt) -''' -Created on 10 Apr 2013 - -@author: maxz -''' - -import unittest -import numpy as np -import GPy - -class MRDTests(unittest.TestCase): - - def test_gradients(self): - num_m = 3 - N, num_inducing, input_dim, D = 20, 8, 6, 20 - X = np.random.rand(N, input_dim) - - k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim) - K = k.K(X) - - Ylist = [np.random.multivariate_normal(np.zeros(N), K, input_dim).T for _ in range(num_m)] - likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist] - - m = GPy.models.MRD(likelihood_list, input_dim=input_dim, kernels=k, num_inducing=num_inducing) - - self.assertTrue(m.checkgrad()) - -if __name__ == "__main__": - print "Running unit tests, please be (very) patient..." - unittest.main() diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index b2f57144..6555b8f4 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -16,21 +16,21 @@ class Test(unittest.TestCase): from GPy.core.parameterization import Param from GPy.core.parameterization.transformations import Logistic self.param = Param('param', np.random.rand(25,2), Logistic(0, 1)) - + self.test1 = GPy.core.Parameterized("test model") self.test1.add_parameter(self.white) self.test1.add_parameter(self.rbf, 0) self.test1.add_parameter(self.param) - + x = np.linspace(-2,6,4)[:,None] y = np.sin(x) self.testmodel = GPy.models.GPRegression(x,y) - + def test_add_parameter(self): self.assertEquals(self.rbf._parent_index_, 0) self.assertEquals(self.white._parent_index_, 1) pass - + def test_fixes(self): self.white.fix(warning=False) self.test1.remove_parameter(self.test1.param) @@ -41,18 +41,18 @@ class Test(unittest.TestCase): self.test1.add_parameter(self.white, 0) self.assertListEqual(self.test1._fixes_.tolist(),[FIXED,UNFIXED,UNFIXED]) - + def test_remove_parameter(self): from GPy.core.parameterization.transformations import FIXED, UNFIXED, __fixed__, Logexp self.white.fix() self.test1.remove_parameter(self.white) self.assertIs(self.test1._fixes_,None) - + self.assertListEqual(self.white._fixes_.tolist(), [FIXED]) self.assertEquals(self.white.constraints._offset, 0) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) - + self.test1.add_parameter(self.white, 0) self.assertIs(self.test1.constraints, self.white.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) @@ -60,17 +60,17 @@ class Test(unittest.TestCase): self.assertListEqual(self.test1.constraints[__fixed__].tolist(), [0]) self.assertIs(self.white._fixes_,None) self.assertListEqual(self.test1._fixes_.tolist(),[FIXED] + [UNFIXED] * 52) - + self.test1.remove_parameter(self.white) self.assertIs(self.test1._fixes_,None) self.assertListEqual(self.white._fixes_.tolist(), [FIXED]) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) self.assertListEqual(self.test1.constraints[Logexp()].tolist(), [0,1]) - + def test_add_parameter_already_in_hirarchy(self): self.assertRaises(HierarchyError, self.test1.add_parameter, self.white._parameters_[0]) - + def test_default_constraints(self): self.assertIs(self.rbf.variance.constraints._param_index_ops, self.rbf.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) @@ -83,7 +83,7 @@ class Test(unittest.TestCase): self.rbf.constrain(GPy.transformations.Square(), False) self.assertListEqual(self.test1.constraints[GPy.transformations.Square()].tolist(), range(2)) self.assertListEqual(self.test1.constraints[GPy.transformations.Logexp()].tolist(), [2]) - + self.test1.remove_parameter(self.rbf) self.assertListEqual(self.test1.constraints[GPy.transformations.Square()].tolist(), []) diff --git a/GPy/testing/unit_tests.py b/GPy/testing/unit_tests.py index 0cb4cd66..1aec7d7a 100644 --- a/GPy/testing/unit_tests.py +++ b/GPy/testing/unit_tests.py @@ -34,7 +34,7 @@ class GradientTests(unittest.TestCase): model_fit = getattr(GPy.models, model_type) # noise = GPy.kern.White(dimension) - kern = kern # + noise + kern = kern # + noise if uncertain_inputs: m = model_fit(X, Y, kernel=kern, X_variance=np.random.rand(X.shape[0], X.shape[1])) else: @@ -60,7 +60,7 @@ class GradientTests(unittest.TestCase): def test_GPRegression_mlp_1d(self): ''' Testing the GP regression with mlp kernel with white kernel on 1d data ''' - mlp = GPy.kern.mlp(1) + mlp = GPy.kern.MLP(1) self.check_model(mlp, model_type='GPRegression', dimension=1) def test_GPRegression_poly_1d(self): @@ -163,14 +163,14 @@ class GradientTests(unittest.TestCase): rbflin = GPy.kern.RBF(2) + GPy.kern.Linear(2) self.check_model(rbflin, model_type='SparseGPRegression', dimension=2) - #@unittest.expectedFailure + # @unittest.expectedFailure def test_SparseGPRegression_rbf_linear_white_kern_2D_uncertain_inputs(self): ''' Testing the sparse GP regression with rbf, linear kernel on 2d data with uncertain inputs''' rbflin = GPy.kern.RBF(2) + GPy.kern.Linear(2) raise unittest.SkipTest("This is not implemented yet!") self.check_model(rbflin, model_type='SparseGPRegression', dimension=2, uncertain_inputs=1) - #@unittest.expectedFailure + # @unittest.expectedFailure def test_SparseGPRegression_rbf_linear_white_kern_1D_uncertain_inputs(self): ''' Testing the sparse GP regression with rbf, linear kernel on 1d data with uncertain inputs''' rbflin = GPy.kern.RBF(1) + GPy.kern.Linear(1) @@ -202,7 +202,7 @@ class GradientTests(unittest.TestCase): X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None] Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None] kernel = GPy.kern.RBF(1) - m = GPy.models.GPClassification(X,Y,kernel=kernel) + m = GPy.models.GPClassification(X, Y, kernel=kernel) m.update_likelihood_approximation() self.assertTrue(m.checkgrad()) @@ -212,11 +212,11 @@ class GradientTests(unittest.TestCase): Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None] Z = np.linspace(0, 15, 4)[:, None] kernel = GPy.kern.RBF(1) - m = GPy.models.SparseGPClassification(X,Y,kernel=kernel,Z=Z) - #distribution = GPy.likelihoods.likelihood_functions.Bernoulli() - #likelihood = GPy.likelihoods.EP(Y, distribution) - #m = GPy.core.SparseGP(X, likelihood, kernel, Z) - #m.ensure_default_constraints() + m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, Z=Z) + # distribution = GPy.likelihoods.likelihood_functions.Bernoulli() + # likelihood = GPy.likelihoods.EP(Y, distribution) + # m = GPy.core.SparseGP(X, likelihood, kernel, Z) + # m.ensure_default_constraints() m.update_likelihood_approximation() self.assertTrue(m.checkgrad()) @@ -224,8 +224,8 @@ class GradientTests(unittest.TestCase): N = 20 X = np.hstack([np.random.rand(N / 2) + 1, np.random.rand(N / 2) - 1])[:, None] k = GPy.kern.RBF(1) + GPy.kern.White(1) - Y = np.hstack([np.ones(N/2),np.zeros(N/2)])[:,None] - m = GPy.models.FITCClassification(X, Y, kernel = k) + Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None] + m = GPy.models.FITCClassification(X, Y, kernel=k) m.update_likelihood_approximation() self.assertTrue(m.checkgrad()) @@ -238,7 +238,7 @@ class GradientTests(unittest.TestCase): Y = np.vstack((Y1, Y2)) k1 = GPy.kern.RBF(1) - m = GPy.models.GPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1]) + m = GPy.models.GPMultioutputRegression(X_list=[X1, X2], Y_list=[Y1, Y2], kernel_list=[k1]) m.constrain_fixed('.*rbf_var', 1.) self.assertTrue(m.checkgrad()) @@ -251,7 +251,7 @@ class GradientTests(unittest.TestCase): Y = np.vstack((Y1, Y2)) k1 = GPy.kern.RBF(1) - m = GPy.models.SparseGPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1]) + m = GPy.models.SparseGPMultioutputRegression(X_list=[X1, X2], Y_list=[Y1, Y2], kernel_list=[k1]) m.constrain_fixed('.*rbf_var', 1.) self.assertTrue(m.checkgrad()) From c6b1f513d3569cc55eb204b8d4fdd6f9649bd741 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 12:29:14 +0000 Subject: [PATCH 058/116] caching now resets cache on error --- GPy/util/caching.py | 50 ++++++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/GPy/util/caching.py b/GPy/util/caching.py index 5de03059..ec8f9754 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -52,29 +52,33 @@ class Cacher(object): #if the result is cached, return the cached computation state = [all(a is b for a, b in itertools.izip_longest(args, cached_i)) for cached_i in self.cached_inputs] - if any(state): - i = state.index(True) - if self.inputs_changed[i]: - #(elements of) the args have changed since we last computed: update - self.cached_outputs[i] = self.operation(*args, **kw) - self.inputs_changed[i] = False - return self.cached_outputs[i] - else: - #first time we've seen these arguments: compute + try: + if any(state): + i = state.index(True) + if self.inputs_changed[i]: + #(elements of) the args have changed since we last computed: update + self.cached_outputs[i] = self.operation(*args, **kw) + self.inputs_changed[i] = False + return self.cached_outputs[i] + else: + #first time we've seen these arguments: compute - #first make sure the depth limit isn't exceeded - if len(self.cached_inputs) == self.limit: - args_ = self.cached_inputs.pop(0) - [a.remove_observer(self, self.on_cache_changed) for a in args_ if a is not None] - self.inputs_changed.pop(0) - self.cached_outputs.pop(0) - - #compute - self.cached_inputs.append(oa_all) - self.cached_outputs.append(self.operation(*args, **kw)) - self.inputs_changed.append(False) - [a.add_observer(self, self.on_cache_changed) for a in observable_args] - return self.cached_outputs[-1]#return + #first make sure the depth limit isn't exceeded + if len(self.cached_inputs) == self.limit: + args_ = self.cached_inputs.pop(0) + [a.remove_observer(self, self.on_cache_changed) for a in args_ if a is not None] + self.inputs_changed.pop(0) + self.cached_outputs.pop(0) + #compute + self.cached_inputs.append(oa_all) + self.cached_outputs.append(self.operation(*args, **kw)) + self.inputs_changed.append(False) + [a.add_observer(self, self.on_cache_changed) for a in observable_args] + return self.cached_outputs[-1]#return + except: + raise + finally: + self.reset() def on_cache_changed(self, arg): """ @@ -84,7 +88,7 @@ class Cacher(object): """ self.inputs_changed = [any([a is arg for a in args]) or old_ic for args, old_ic in zip(self.cached_inputs, self.inputs_changed)] - def reset(self, obj): + def reset(self): """ Totally reset the cache """ From e471ec7a15b873d69a164e60b8e4d70b5a47df28 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 12:29:35 +0000 Subject: [PATCH 059/116] whitespaces --- GPy/models/mrd.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index b547f2d1..81018015 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -104,23 +104,23 @@ class MRD(Model): setattr(self, 'Y{}'.format(i), p) self.add_parameter(p) self._in_init_ = False - + def parameters_changed(self): self._log_marginal_likelihood = 0 self.posteriors = [] self.Z.gradient = 0. self.X.mean.gradient = 0. self.X.variance.gradient = 0. - + for y, k, l, i in itertools.izip(self.Ylist, self.kern, self.likelihood, self.inference_method): posterior, lml, grad_dict = i.inference(k, self.X, self.Z, l, y) - + self.posteriors.append(posterior) self._log_marginal_likelihood += lml - + # likelihood gradients l.update_gradients(grad_dict.pop('partial_for_likelihood')) - + #gradients wrt kernel dL_dKmm = grad_dict.pop('dL_dKmm') k.update_gradients_full(dL_dKmm, self.Z, None) @@ -132,7 +132,7 @@ class MRD(Model): self.Z.gradient += k.gradients_X(dL_dKmm, self.Z) self.Z.gradient += k.gradients_Z_expectations( grad_dict['dL_dpsi1'], grad_dict['dL_dpsi2'], Z=self.Z, variational_posterior=self.X) - + dL_dmean, dL_dS = k.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, **grad_dict) self.X.mean.gradient += dL_dmean self.X.variance.gradient += dL_dS From f77233acf9adfbf46db0e5a09b5905c30b8afb98 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 12:30:07 +0000 Subject: [PATCH 060/116] fixed mlp kern --- GPy/kern/_src/mlp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/kern/_src/mlp.py b/GPy/kern/_src/mlp.py index 85792acd..ee15d967 100644 --- a/GPy/kern/_src/mlp.py +++ b/GPy/kern/_src/mlp.py @@ -96,12 +96,12 @@ class MLP(Kern): vec = (X*X).sum(1)*self.weight_variance+self.bias_variance + 1. return 2*four_over_tau*self.weight_variance*self.variance*((X[None, :, :]/denom[:, :, None] - vec[None, :, None]*X[:, None, :]*(numer/denom3)[:, :, None])*(dL_dK/np.sqrt(1-arg*arg))[:, :, None]).sum(1) - def dKdiag_dX(self, dL_dKdiag, X, target): + def gradients_X_diag(self, dL_dKdiag, X): """Gradient of diagonal of covariance with respect to X""" self._K_diag_computations(X) arg = self._K_diag_asin_arg denom = self._K_diag_denom - numer = self._K_diag_numer + #numer = self._K_diag_numer return four_over_tau*2.*self.weight_variance*self.variance*X*(1./denom*(1. - arg)*dL_dKdiag/(np.sqrt(1-arg*arg)))[:, None] From 1f9509d9795164c1fd74caceb822362889bf290a Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Thu, 13 Mar 2014 13:13:15 +0000 Subject: [PATCH 061/116] testing a bit cleaned periodic is turned off, bc it need different tests, discontinuous still needed --- GPy/core/model.py | 9 +-- GPy/core/parameterization/param.py | 4 +- GPy/core/parameterization/parameter_core.py | 12 ++-- GPy/kern/_src/kern.py | 2 +- GPy/kern/_src/prod.py | 2 +- GPy/models/mrd.py | 33 ++++++----- GPy/testing/kernel_tests.py | 61 ++++++++++++--------- GPy/testing/likelihood_tests.py | 4 +- GPy/testing/unit_tests.py | 9 +-- 9 files changed, 71 insertions(+), 65 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 710c1b22..c2a9ed23 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -253,7 +253,7 @@ class Model(Parameterized): sgd.run() self.optimization_runs.append(sgd) - def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, _debug=False): + def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3): """ Check the gradient of the ,odel by comparing to a numerical estimate. If the verbose flag is passed, invividual @@ -349,13 +349,6 @@ class Model(Parameterized): xx[xind] -= 2.*step f2 = self.objective_function(xx) numerical_gradient = (f1 - f2) / (2 * step) - if _debug: - for p in self.kern.flattened_parameters: - p._parent_._debug=True - self.gradient[xind] = numerical_gradient - self._set_params_transformed(x) - for p in self.kern.flattened_parameters: - p._parent_._debug=False if np.all(gradient[xind]==0): ratio = (f1-f2) == gradient[xind] else: ratio = (f1 - f2) / (2 * step * gradient[xind]) difference = np.abs((f1 - f2) / 2 / step - gradient[xind]) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 8cad2d29..cad20a8a 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -446,8 +446,8 @@ class ParamConcatenation(object): def untie(self, *ties): [param.untie(*ties) for param in self.params] - def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): - return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance, _debug=_debug) + def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3): + return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance) #checkgrad.__doc__ = Gradcheckable.checkgrad.__doc__ __lt__ = lambda self, val: self._vals() < val diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 001b98ed..51b6cddf 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -206,7 +206,7 @@ class Gradcheckable(Parentable): def __init__(self, *a, **kw): super(Gradcheckable, self).__init__(*a, **kw) - def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): + def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3): """ Check the gradient of this parameter with respect to the highest parent's objective function. @@ -220,10 +220,10 @@ class Gradcheckable(Parentable): :param flaot tolerance: the tolerance for the gradient ratio or difference. """ if self.has_parent(): - return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance, _debug=_debug) - return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance, _debug=_debug) + return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance) + return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance) - def _checkgrad(self, param, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): + def _checkgrad(self, param, verbose=0, step=1e-6, tolerance=1e-3): """ Perform the checkgrad on the model. TODO: this can be done more efficiently, when doing it inside here @@ -694,6 +694,10 @@ class Parameterizable(OptimizationHandlable): elif pname not in dir(self): self.__dict__[pname] = param self._added_names_.add(pname) + else: + print "WARNING: added a parameter with formatted name {}, which is already a member of {} object. Trying to change the parameter name to\n {}".format(pname, self.__class__, param.name+"_") + param.name += "_" + self._add_parameter_name(param, ignore_added_names) def _remove_parameter_name(self, param=None, pname=None): assert param is None or pname is None, "can only delete either param by name, or the name of a param" diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 014c4659..dc6eceb4 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -156,7 +156,7 @@ class Kern(Parameterized): other.active_dims += self.input_dim return self.prod(other) - def prod(self, other, name='prod'): + def prod(self, other, name='mul'): """ Multiply two kernels (either on the same space, or on the tensor product of the input space). diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index f9c36023..f3b2b50f 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -17,7 +17,7 @@ class Prod(CombinationKernel): :rtype: kernel object """ - def __init__(self, kernels, name='prod'): + def __init__(self, kernels, name='mul'): assert len(kernels) == 2, 'only implemented for two kernels as of yet' super(Prod, self).__init__(kernels, name) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 81018015..17949012 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -15,13 +15,13 @@ from ..likelihoods import Gaussian class MRD(Model): """ - Apply MRD to all given datasets Y in Ylist. - + Apply MRD to all given datasets Y in Ylist. + Y_i in [n x p_i] - - The samples n in the datasets need + + The samples n in the datasets need to match up, whereas the dimensionality p_d can differ. - + :param [array-like] Ylist: List of datasets to apply MRD on :param input_dim: latent dimensionality :type input_dim: int @@ -45,13 +45,12 @@ class MRD(Model): :param str name: the name of this model :param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None """ - - def __init__(self, Ylist, input_dim, X=None, X_variance=None, + def __init__(self, Ylist, input_dim, X=None, X_variance=None, initx = 'PCA', initz = 'permute', - num_inducing=10, Z=None, kernel=None, + num_inducing=10, Z=None, kernel=None, inference_method=None, likelihood=None, name='mrd', Ynames=None): super(MRD, self).__init__(name) - + # sort out the kernels if kernel is None: from ..kern import RBF @@ -64,23 +63,23 @@ class MRD(Model): self.kern = kernel self.input_dim = input_dim self.num_inducing = num_inducing - + self.Ylist = Ylist self._in_init_ = True X = self._init_X(initx, Ylist) self.Z = Param('inducing inputs', self._init_Z(initz, X)) self.num_inducing = self.Z.shape[0] # ensure M==N if M>N - + if X_variance is None: X_variance = np.random.uniform(0, .2, X.shape) - + self.variational_prior = NormalPrior() self.X = NormalPosterior(X, X_variance) - + if likelihood is None: self.likelihood = [Gaussian(name='Gaussian_noise'.format(i)) for i in range(len(Ylist))] else: self.likelihood = likelihood - + if inference_method is None: self.inference_method= [] for y in Ylist: @@ -91,12 +90,12 @@ class MRD(Model): else: self.inference_method = inference_method self.inference_method.set_limit(len(Ylist)) - + self.add_parameters(self.X, self.Z) - + if Ynames is None: Ynames = ['Y{}'.format(i) for i in range(len(Ylist))] - + for i, n, k, l in itertools.izip(itertools.count(), Ynames, self.kern, self.likelihood): p = Parameterized(name=n) p.add_parameter(k) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 657f5ac4..d54b3871 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -227,6 +227,16 @@ class KernelGradientTestsContinuous(unittest.TestCase): k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + def test_Prod(self): + k = GPy.kern.Matern32([2,3]) * GPy.kern.RBF([0,4]) + GPy.kern.Linear(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + + def test_Add(self): + k = GPy.kern.Matern32([2,3]) + GPy.kern.RBF([0,4]) + GPy.kern.Linear(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + def test_Matern52(self): k = GPy.kern.Matern52(self.D) k.randomize() @@ -242,31 +252,30 @@ class KernelGradientTestsContinuous(unittest.TestCase): k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) -class KernelGradientTestsContinuous1D(unittest.TestCase): - def setUp(self): - self.N, self.D = 100, 1 - self.X = np.random.randn(self.N,self.D) - self.X2 = np.random.randn(self.N+10,self.D) - - continuous_kerns = ['RBF', 'Linear'] - self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns] - - def test_PeriodicExponential(self): - k = GPy.kern.PeriodicExponential(self.D) - k.randomize() - self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) - - def test_PeriodicMatern32(self): - k = GPy.kern.PeriodicMatern32(self.D) - k.randomize() - self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) - - def test_PeriodicMatern52(self): - k = GPy.kern.PeriodicMatern52(self.D) - k.randomize() - self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) - - #TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize +#TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize +# class KernelGradientTestsContinuous1D(unittest.TestCase): +# def setUp(self): +# self.N, self.D = 100, 1 +# self.X = np.random.randn(self.N,self.D) +# self.X2 = np.random.randn(self.N+10,self.D) +# +# continuous_kerns = ['RBF', 'Linear'] +# self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns] +# +# def test_PeriodicExponential(self): +# k = GPy.kern.PeriodicExponential(self.D) +# k.randomize() +# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) +# +# def test_PeriodicMatern32(self): +# k = GPy.kern.PeriodicMatern32(self.D) +# k.randomize() +# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) +# +# def test_PeriodicMatern52(self): +# k = GPy.kern.PeriodicMatern52(self.D) +# k.randomize() +# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) class KernelTestsMiscellaneous(unittest.TestCase): @@ -275,7 +284,7 @@ class KernelTestsMiscellaneous(unittest.TestCase): N, D = 100, 10 self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D) self.rbf = GPy.kern.RBF(range(2)) - self.linear = GPy.kern.Linear((3,5,6)) + self.linear = GPy.kern.Linear((3,6)) self.matern = GPy.kern.Matern32(np.array([2,4,7])) self.sumkern = self.rbf + self.linear self.sumkern += self.matern diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index ab26910e..3c6d9e39 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -541,8 +541,8 @@ class TestNoiseModels(object): #import ipdb; ipdb.set_trace() #NOTE this test appears to be stochastic for some likelihoods (student t?) # appears to all be working in test mode right now... - if isinstance(model, GPy.likelihoods.StudentT): - import ipdb;ipdb.set_trace() + #if isinstance(model, GPy.likelihoods.StudentT): + # import ipdb;ipdb.set_trace() assert m.checkgrad(step=step) ########### diff --git a/GPy/testing/unit_tests.py b/GPy/testing/unit_tests.py index 1aec7d7a..a7ebe6fe 100644 --- a/GPy/testing/unit_tests.py +++ b/GPy/testing/unit_tests.py @@ -63,10 +63,11 @@ class GradientTests(unittest.TestCase): mlp = GPy.kern.MLP(1) self.check_model(mlp, model_type='GPRegression', dimension=1) - def test_GPRegression_poly_1d(self): - ''' Testing the GP regression with polynomial kernel with white kernel on 1d data ''' - mlp = GPy.kern.Poly(1, degree=5) - self.check_model(mlp, model_type='GPRegression', dimension=1) + #TODO: + #def test_GPRegression_poly_1d(self): + # ''' Testing the GP regression with polynomial kernel with white kernel on 1d data ''' + # mlp = GPy.kern.Poly(1, degree=5) + # self.check_model(mlp, model_type='GPRegression', dimension=1) def test_GPRegression_matern52_1D(self): ''' Testing the GP regression with matern52 kernel on 1d data ''' From cc96f5b3d5c061d239bd5d0c7a072cee603236e7 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 14:42:03 +0000 Subject: [PATCH 062/116] lots of fixes, including prediction being mean and variance only --- GPy/core/gp.py | 8 ++- GPy/core/sparse_gp.py | 4 +- .../latent_function_inference/__init__.py | 12 ++-- .../latent_function_inference/dtc.py | 13 ++-- .../exact_gaussian_inference.py | 10 +-- .../latent_function_inference/fitc.py | 11 ++-- .../latent_function_inference/var_dtc.py | 38 +++++------ GPy/likelihoods/gaussian.py | 21 ++++--- GPy/likelihoods/likelihood.py | 39 +++++------- GPy/likelihoods/mixed_noise.py | 63 ++++++++++--------- GPy/plotting/matplot_dep/models_plots.py | 21 +++---- GPy/testing/likelihood_tests.py | 4 +- GPy/util/caching.py | 2 +- 13 files changed, 118 insertions(+), 128 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 3552c37e..73680900 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -120,8 +120,12 @@ class GP(Model): mu, var = self._raw_predict(Xnew, full_cov=full_cov) # now push through likelihood - mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov, Y_metadata) - return mean, var, _025pm, _975pm + mean, var = self.likelihood.predictive_values(mu, var, full_cov, Y_metadata) + return mean, var + + def predict_quantiles(self, X, quantiles=(0.025, 0.975), Y_metadata=None): + m, v = self._raw_predict(X, full_cov=False) + return self.likelihood.predictive_quantiles(m, v, quantiles, Y_metadata) def posterior_samples_f(self,X,size=10, full_cov=True): """ diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 16b66676..23f8e690 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -54,13 +54,13 @@ class SparseGP(GP): def parameters_changed(self): self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y) - self.likelihood.update_gradients(self.grad_dict.pop('partial_for_likelihood')) + self.likelihood.update_gradients(self.grad_dict['dL_dthetaL']) if isinstance(self.X, VariationalPosterior): #gradients wrt kernel dL_dKmm = self.grad_dict.pop('dL_dKmm') self.kern.update_gradients_full(dL_dKmm, self.Z, None) target = self.kern.gradient.copy() - self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict) + self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2']) self.kern.gradient += target #gradients wrt Z diff --git a/GPy/inference/latent_function_inference/__init__.py b/GPy/inference/latent_function_inference/__init__.py index a633c381..58d77c03 100644 --- a/GPy/inference/latent_function_inference/__init__.py +++ b/GPy/inference/latent_function_inference/__init__.py @@ -16,8 +16,8 @@ If the likelihood object is something other than Gaussian, then exact inference is not tractable. We then resort to a Laplace approximation (laplace.py) or expectation propagation (ep.py). -The inference methods return a -:class:`~GPy.inference.latent_function_inference.posterior.Posterior` +The inference methods return a +:class:`~GPy.inference.latent_function_inference.posterior.Posterior` instance, which is a simple structure which contains a summary of the posterior. The model classes can then use this posterior object for making predictions, optimizing hyper-parameters, @@ -33,13 +33,13 @@ from dtc import DTC from fitc import FITC # class FullLatentFunctionData(object): -# -# +# +# # class LatentFunctionInference(object): # def inference(self, kern, X, likelihood, Y, Y_metadata=None): # """ # Do inference on the latent functions given a covariance function `kern`, -# inputs and outputs `X` and `Y`, and a likelihood `likelihood`. +# inputs and outputs `X` and `Y`, and a likelihood `likelihood`. # Additional metadata for the outputs `Y` can be given in `Y_metadata`. # """ -# raise NotImplementedError, "Abstract base class for full inference" \ No newline at end of file +# raise NotImplementedError, "Abstract base class for full inference" diff --git a/GPy/inference/latent_function_inference/dtc.py b/GPy/inference/latent_function_inference/dtc.py index df2d5a03..5ebc5e53 100644 --- a/GPy/inference/latent_function_inference/dtc.py +++ b/GPy/inference/latent_function_inference/dtc.py @@ -40,7 +40,7 @@ class DTC(object): U = Knm Uy = np.dot(U.T,Y) - #factor Kmm + #factor Kmm Kmmi, L, Li, _ = pdinv(Kmm) # Compute A @@ -78,7 +78,9 @@ class DTC(object): Uv = np.dot(U, v) dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./beta + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1))*beta**2 - grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU.T} + dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) + + grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL} #construct a posterior object post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L) @@ -154,11 +156,8 @@ class vDTC(object): dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./beta + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) )*beta**2 dL_dR -=beta*trace_term/num_data - grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*beta, 'dL_dKnm':dL_dU.T} - - #update gradients - kern.update_gradients_sparse(X=X, Z=Z, **grad_dict) - likelihood.update_gradients(dL_dR) + dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) + grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*beta, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL} #construct a posterior object post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L) diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index b063b64d..e76575c6 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -33,7 +33,7 @@ class ExactGaussianInference(object): #if Y in self.cache, return self.Cache[Y], else store Y in cache and return L. raise NotImplementedError, 'TODO' #TODO - def inference(self, kern, X, likelihood, Y, **Y_metadata): + def inference(self, kern, X, likelihood, Y, Y_metadata=None): """ Returns a Posterior class containing essential quantities of the posterior """ @@ -41,12 +41,14 @@ class ExactGaussianInference(object): K = kern.K(X) - Wi, LW, LWi, W_logdet = pdinv(K + likelihood.covariance_matrix(Y, **Y_metadata)) + Wi, LW, LWi, W_logdet = pdinv(K + likelihood.covariance_matrix(Y, Y_metadata)) alpha, _ = dpotrs(LW, YYT_factor, lower=1) log_marginal = 0.5*(-Y.size * log_2_pi - Y.shape[1] * W_logdet - np.sum(alpha * YYT_factor)) - + dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi) - return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK} + dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK)) + + return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} diff --git a/GPy/inference/latent_function_inference/fitc.py b/GPy/inference/latent_function_inference/fitc.py index 9e9c14e2..9294e25d 100644 --- a/GPy/inference/latent_function_inference/fitc.py +++ b/GPy/inference/latent_function_inference/fitc.py @@ -18,10 +18,6 @@ class FITC(object): self.const_jitter = 1e-6 def inference(self, kern, X, Z, likelihood, Y): - - #TODO: MAX! fix this! - from ...util.misc import param_to_array - Y = param_to_array(Y) num_inducing, _ = Z.shape num_data, output_dim = Y.shape @@ -36,7 +32,7 @@ class FITC(object): Knm = kern.K(X, Z) U = Knm - #factor Kmm + #factor Kmm Kmmi, L, Li, _ = pdinv(Kmm) #compute beta_star, the effective noise precision @@ -72,7 +68,7 @@ class FITC(object): vvT_P = tdot(v.reshape(-1,1)) + P dL_dK = 0.5*(Kmmi - vvT_P) KiU = np.dot(Kmmi, U.T) - dL_dK += np.dot(KiU*dL_dR, KiU.T) + dL_dK += np.dot(KiU*dL_dR, KiU.T) # Compute dL_dU vY = np.dot(v.reshape(-1,1),Y.T) @@ -80,7 +76,8 @@ class FITC(object): dL_dU *= beta_star dL_dU -= 2.*KiU*dL_dR - grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR, 'dL_dKnm':dL_dU.T, 'partial_for_likelihood':dL_dR} + dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) + grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL} #construct a posterior object post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L) diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 6239e5a4..7a0c14e8 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -137,25 +137,25 @@ class VarDTC(object): psi0, A, LB, trYYT, data_fit) #put the gradients in the right places - partial_for_likelihood = _compute_partial_for_likelihood(likelihood, + dL_dR = _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT) - #likelihood.update_gradients(partial_for_likelihood) + dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) if uncertain_inputs: grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dpsi0':dL_dpsi0, 'dL_dpsi1':dL_dpsi1, 'dL_dpsi2':dL_dpsi2, - 'partial_for_likelihood':partial_for_likelihood} + 'dL_dthetaL':dL_dthetaL} else: grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dKdiag':dL_dpsi0, 'dL_dKnm':dL_dpsi1, - 'partial_for_likelihood':partial_for_likelihood} + 'dL_dthetaL':dL_dthetaL} #get sufficient things for posterior prediction #TODO: do we really want to do this in the loop? @@ -232,7 +232,7 @@ class VarDTCMissingData(object): if uncertain_inputs: dL_dpsi2_all = np.zeros((Y.shape[0], num_inducing, num_inducing)) - partial_for_likelihood = 0 + dL_dR = 0 woodbury_vector = np.zeros((num_inducing, Y.shape[1])) woodbury_inv_all = np.zeros((num_inducing, num_inducing, Y.shape[1])) dL_dKmm = 0 @@ -308,7 +308,7 @@ class VarDTCMissingData(object): psi0, A, LB, trYYT, data_fit) #put the gradients in the right places - partial_for_likelihood += _compute_partial_for_likelihood(likelihood, + dL_dR += _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, @@ -334,12 +334,12 @@ class VarDTCMissingData(object): 'dL_dpsi0':dL_dpsi0_all, 'dL_dpsi1':dL_dpsi1_all, 'dL_dpsi2':dL_dpsi2_all, - 'partial_for_likelihood':partial_for_likelihood} + 'dL_dR':dL_dR} else: grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dKdiag':dL_dpsi0_all, 'dL_dKnm':dL_dpsi1_all, - 'partial_for_likelihood':partial_for_likelihood} + 'dL_dR':dL_dR} #get sufficient things for posterior prediction #TODO: do we really want to do this in the loop? @@ -385,11 +385,11 @@ def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, C return dL_dpsi0, dL_dpsi1, dL_dpsi2 -def _compute_partial_for_likelihood(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT): +def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT): # the partial derivative vector for the likelihood if likelihood.size == 0: # save computation here. - partial_for_likelihood = None + dL_dR = None elif het_noise: if uncertain_inputs: raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented" @@ -399,20 +399,20 @@ def _compute_partial_for_likelihood(likelihood, het_noise, uncertain_inputs, LB, Lmi_psi1, nil = dtrtrs(Lm, psi1.T, lower=1, trans=0) _LBi_Lmi_psi1, _ = dtrtrs(LB, Lmi_psi1, lower=1, trans=0) - partial_for_likelihood = -0.5 * beta + 0.5 * likelihood.V**2 - partial_for_likelihood += 0.5 * output_dim * (psi0 - np.sum(Lmi_psi1**2,0))[:,None] * beta**2 + dL_dR = -0.5 * beta + 0.5 * likelihood.V**2 + dL_dR += 0.5 * output_dim * (psi0 - np.sum(Lmi_psi1**2,0))[:,None] * beta**2 - partial_for_likelihood += 0.5*np.sum(mdot(LBi.T,LBi,Lmi_psi1)*Lmi_psi1,0)[:,None]*beta**2 + dL_dR += 0.5*np.sum(mdot(LBi.T,LBi,Lmi_psi1)*Lmi_psi1,0)[:,None]*beta**2 - partial_for_likelihood += -np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * likelihood.Y * beta**2 - partial_for_likelihood += 0.5*np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T**2 * beta**2 + dL_dR += -np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * likelihood.Y * beta**2 + dL_dR += 0.5*np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T**2 * beta**2 else: # likelihood is not heteroscedatic - partial_for_likelihood = -0.5 * num_data * output_dim * beta + 0.5 * trYYT * beta ** 2 - partial_for_likelihood += 0.5 * output_dim * (psi0.sum() * beta ** 2 - np.trace(A) * beta) - partial_for_likelihood += beta * (0.5 * np.sum(A * DBi_plus_BiPBi) - data_fit) - return partial_for_likelihood + dL_dR = -0.5 * num_data * output_dim * beta + 0.5 * trYYT * beta ** 2 + dL_dR += 0.5 * output_dim * (psi0.sum() * beta ** 2 - np.trace(A) * beta) + dL_dR += beta * (0.5 * np.sum(A * DBi_plus_BiPBi) - data_fit) + return dL_dR def _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit): #compute log marginal likelihood diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 8e34f6b9..032136a7 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -18,6 +18,7 @@ import link_functions from likelihood import Likelihood from ..core.parameterization import Param from ..core.parameterization.transformations import Logexp +from scipy import stats class Gaussian(Likelihood): """ @@ -49,11 +50,14 @@ class Gaussian(Likelihood): if isinstance(gp_link, link_functions.Identity): self.log_concave = True - def covariance_matrix(self, Y, **Y_metadata): + def covariance_matrix(self, Y, Y_metadata=None): return np.eye(Y.shape[0]) * self.variance - def update_gradients(self, partial): - self.variance.gradient = np.sum(partial) + def update_gradients(self, grad): + self.variance.gradient = grad + + def exact_inference_gradients(self, dL_dKdiag): + return dL_dKdiag.sum() def _preprocess_values(self, Y): """ @@ -76,16 +80,12 @@ class Gaussian(Likelihood): Z_hat = 1./np.sqrt(2.*np.pi*sum_var)*np.exp(-.5*(data_i - v_i/tau_i)**2./sum_var) return Z_hat, mu_hat, sigma2_hat - def predictive_values(self, mu, var, full_cov=False): + def predictive_values(self, mu, var, full_cov=False, Y_metadata=None): if full_cov: var += np.eye(var.shape[0])*self.variance - d = 2*np.sqrt(np.diag(var)) - low, up = mu - d, mu + d else: var += self.variance - d = 2*np.sqrt(var) - low, up = mu - d, mu + d - return mu, var, low, up + return mu, var def predictive_mean(self, mu, sigma): return mu @@ -93,6 +93,9 @@ class Gaussian(Likelihood): def predictive_variance(self, mu, sigma, predictive_mean=None): return self.variance + sigma**2 + def predictive_quantiles(self, mu, var, quantiles, Y_metadata): + return [stats.norm.ppf(q)*np.sqrt(var) + mu for q in quantiles] + def pdf_link(self, link_f, y, extra_data=None): """ Likelihood function given link(f) diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index aff55533..331bcf8d 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -135,7 +135,7 @@ class Likelihood(Parameterized): return mean - def _predictive_variance(self,mu,variance,predictive_mean=None): + def _predictive_variance(self, mu,variance, predictive_mean=None): """ Numerical approximation to the predictive variance: V(Y_star) @@ -358,7 +358,7 @@ class Likelihood(Parameterized): return dlogpdf_dtheta, dlogpdf_df_dtheta, d2logpdf_df2_dtheta - def predictive_values(self, mu, var, full_cov=False, sampling=True, num_samples=10000): + def predictive_values(self, mu, var, full_cov=False, Y_metadata=None): """ Compute mean, variance and conficence interval (percentiles 5 and 95) of the prediction. @@ -366,14 +366,21 @@ class Likelihood(Parameterized): :param var: variance of the latent variable, f, of posterior :param full_cov: whether to use the full covariance or just the diagonal :type full_cov: Boolean - :param num_samples: number of samples to use in computing quantiles and - possibly mean variance - :type num_samples: integer - :param sampling: Whether to use samples for mean and variances anyway - :type sampling: Boolean - """ + pred_mean = self.predictive_mean(mu, var, Y_metadata) + pred_var = self.predictive_variance(mu, var, pred_mean, Y_metadata) + + return pred_mean, pred_var + + + def samples(self, gp): + """ + Returns a set of samples of observations based on a given value of the latent variable. + + :param gp: latent variable + """ + raise NotImplementedError if sampling: #Get gp_samples f* using posterior mean and variance if not full_cov: @@ -393,20 +400,4 @@ class Likelihood(Parameterized): q1 = np.percentile(samples, 2.5, axis=axis)[:,None] q3 = np.percentile(samples, 97.5, axis=axis)[:,None] - else: - pred_mean = self.predictive_mean(mu, var) - pred_var = self.predictive_variance(mu, var, pred_mean) - print "WARNING: Predictive quantiles are only computed when sampling." - q1 = np.repeat(np.nan,pred_mean.size)[:,None] - q3 = q1.copy() - - return pred_mean, pred_var, q1, q3 - - def samples(self, gp): - """ - Returns a set of samples of observations based on a given value of the latent variable. - - :param gp: latent variable - """ - raise NotImplementedError diff --git a/GPy/likelihoods/mixed_noise.py b/GPy/likelihoods/mixed_noise.py index b60f3adf..946cbaf6 100644 --- a/GPy/likelihoods/mixed_noise.py +++ b/GPy/likelihoods/mixed_noise.py @@ -3,56 +3,57 @@ from scipy import stats, special from GPy.util.univariate_Gaussian import std_norm_pdf, std_norm_cdf import link_functions from likelihood import Likelihood +from gaussian import Gaussian from ..core.parameterization import Param from ..core.parameterization.transformations import Logexp from ..core.parameterization import Parameterized import itertools class MixedNoise(Likelihood): - def __init__(self, likelihoods_list, noise_index, variance = None, name='mixed_noise'): - - Nlike = len(likelihoods_list) - self.order = np.unique(noise_index) - - assert self.order.size == Nlike - - if variance is None: - variance = np.ones(Nlike) - else: - assert variance.size == Nlike + def __init__(self, likelihoods_list, name='mixed_noise'): super(Likelihood, self).__init__(name=name) self.add_parameters(*likelihoods_list) self.likelihoods_list = likelihoods_list - self.noise_index = noise_index self.log_concave = False - self.likelihoods_indices = [noise_index.flatten()==j for j in self.order] - def covariance_matrix(self, Y, noise_index, **Y_metadata): - variance = np.zeros(Y.shape[0]) - for lik, ind in itertools.izip(self.likelihoods_list, self.likelihoods_indices): - variance[ind] = lik.variance - return np.diag(variance) + def update_gradients(self, gradients): + self.gradient = gradients - def update_gradients(self, partial, noise_index, **Y_metadata): - [lik.update_gradients(partial[ind]) for lik,ind in itertools.izip(self.likelihoods_list, self.likelihoods_indices)] + def exact_inference_gradients(self, dL_dKdiag, Y_metadata): + assert all([isinstance(l, Gaussian) for l in self.likelihoods_list]) + ind = Y_metadata['output_index'] + return np.array([dL_dKdiag[ind==i].sum() for i in range(len(self.likelihoods_list))]) - def predictive_values(self, mu, var, full_cov=False, noise_index=None, **Y_metadata): - _variance = np.array([ self.likelihoods_list[j].variance for j in noise_index ]) - if full_cov: - var += np.eye(var.shape[0])*_variance - d = 2*np.sqrt(np.diag(var)) - low, up = mu - d, mu + d + def predictive_values(self, mu, var, full_cov=False, Y_metadata=None): + if all([isinstance(l, Gaussian) for l in self.likelihoods_list]): + ind = Y_metadata['output_index'] + _variance = np.array([self.likelihoods_list[j].variance for j in ind ]) + if full_cov: + var += np.eye(var.shape[0])*_variance + d = 2*np.sqrt(np.diag(var)) + low, up = mu - d, mu + d + else: + var += _variance + d = 2*np.sqrt(var) + low, up = mu - d, mu + d + return mu, var, low, up else: - var += _variance - d = 2*np.sqrt(var) - low, up = mu - d, mu + d - return mu, var, low, up + raise NotImplementedError - def predictive_variance(self, mu, sigma, noise_index, predictive_mean=None, **Y_metadata): + def predictive_variance(self, mu, sigma, **other_shit): if isinstance(noise_index,int): _variance = self.variance[noise_index] else: _variance = np.array([ self.variance[j] for j in noise_index ])[:,None] return _variance + sigma**2 + + + def covariance_matrix(self, Y, Y_metadata): + assert all([isinstance(l, Gaussian) for l in self.likelihoods_list]) + variance = np.zeros(Y.shape[0]) + for lik, ind in itertools.izip(self.likelihoods_list, self.likelihoods_indices): + variance[ind] = lik.variance + return np.diag(variance) + diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index faf2cf84..c87eb694 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -12,7 +12,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', which_data_ycols='all', fixed_inputs=[], levels=20, samples=0, fignum=None, ax=None, resolution=None, plot_raw=False, - linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue']): + linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None): """ Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. @@ -84,17 +84,12 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', m, v = model._raw_predict(Xgrid) lower = m - 2*np.sqrt(v) upper = m + 2*np.sqrt(v) - Y = Y else: - if 'noise_index' in model.Y_metadata.keys(): - if np.unique(model.Y_metadata['noise_index'][which_data_rows]).size > 1: - print "Data slices choosen have different noise models. Just one will be used." - noise_index = np.repeat(model.Y_metadata['noise_index'][which_data_rows][0], Xgrid.shape[0])[:,None] - m, v, lower, upper = model.predict(Xgrid,full_cov=False,noise_index=noise_index) - else: - noise_index = None - m, v, lower, upper = model.predict(Xgrid,full_cov=False) - Y = Y + m, v = model.predict(Xgrid, full_cov=False, Y_metadata=Y_metadata) + + lower, upper = model.predict_quantiles(Xgrid, Y_metadata=Y_metadata) + + for d in which_data_ycols: plots['gpplot'] = gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], ax=ax, edgecol=linecol, fillcol=fillcol) plots['dataplot'] = ax.plot(X[which_data_rows,free_dims], Y[which_data_rows, d], 'kx', mew=1.5) @@ -144,10 +139,8 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', #predict on the frame and plot if plot_raw: m, _ = model._raw_predict(Xgrid) - Y = Y else: - m, _, _, _ = model.predict(Xgrid) - Y = Y + m, _ = model.predict(Xgrid) for d in which_data_ycols: m_d = m[:,d].reshape(resolution, resolution).T plots['contour'] = ax.contour(x, y, m_d, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index d55b0190..b1db94a7 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -667,8 +667,8 @@ class LaplaceTests(unittest.TestCase): m2[:] = m1[:] #Predict for training points to get posterior mean and variance - post_mean, post_var, _, _ = m1.predict(X) - post_mean_approx, post_var_approx, _, _ = m2.predict(X) + post_mean, post_var = m1.predict(X) + post_mean_approx, post_var_approx, = m2.predict(X) if debug: import pylab as pb diff --git a/GPy/util/caching.py b/GPy/util/caching.py index 5de03059..96b4cee9 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -48,7 +48,7 @@ class Cacher(object): if k in kw and kw[k] is not None: return self.operation(*args, **kw) # TODO: WARNING !!! Cache OFFSWITCH !!! WARNING - # return self.operation(*args) + return self.operation(*args) #if the result is cached, return the cached computation state = [all(a is b for a, b in itertools.izip_longest(args, cached_i)) for cached_i in self.cached_inputs] From b7508ce12bd2723e187ee829beffaf1b8262bcd1 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 15:35:54 +0000 Subject: [PATCH 063/116] various fixes in likelihoods, esp studentT and plotting --- GPy/core/gp.py | 2 +- GPy/likelihoods/bernoulli.py | 1 - GPy/likelihoods/likelihood.py | 55 ++++++++++++++++++----------------- GPy/likelihoods/poisson.py | 8 ++--- GPy/likelihoods/student_t.py | 40 ++++++++++--------------- 5 files changed, 50 insertions(+), 56 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 73680900..a04ac8da 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -123,7 +123,7 @@ class GP(Model): mean, var = self.likelihood.predictive_values(mu, var, full_cov, Y_metadata) return mean, var - def predict_quantiles(self, X, quantiles=(0.025, 0.975), Y_metadata=None): + def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None): m, v = self._raw_predict(X, full_cov=False) return self.likelihood.predictive_quantiles(m, v, quantiles, Y_metadata) diff --git a/GPy/likelihoods/bernoulli.py b/GPy/likelihoods/bernoulli.py index 10df906d..42eaaa36 100644 --- a/GPy/likelihoods/bernoulli.py +++ b/GPy/likelihoods/bernoulli.py @@ -93,7 +93,6 @@ class Bernoulli(Likelihood): return 0. else: return np.nan - #raise NotImplementedError def pdf_link(self, link_f, y, extra_data=None): """ diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 331bcf8d..67c406df 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -58,6 +58,18 @@ class Likelihood(Parameterized): """ return Y + def conditional_mean(self, gp): + """ + The mean of the random variable conditioned on one value of the GP + """ + raise NotImplementedError + + def conditional_variance(self, gp): + """ + The variance of the random variable conditioned on one value of the GP + """ + raise NotImplementedError + def log_predictive_density(self, y_test, mu_star, var_star): """ Calculation of the log predictive density @@ -120,7 +132,7 @@ class Likelihood(Parameterized): return z, mean, variance - def _predictive_mean(self,mu,variance): + def predictive_mean(self, mu, variance, Y_metadata=None): """ Quadrature calculation of the predictive mean: E(Y_star|Y) = E( E(Y_star|f_star, Y) ) @@ -128,14 +140,15 @@ class Likelihood(Parameterized): :param sigma: standard deviation of posterior """ + #conditional_mean: the edpected value of y given some f, under this likelihood def int_mean(f,m,v): - return self._mean(f)*np.exp(-(0.5/v)*np.square(f - m)) + return self.conditional_mean(f)*np.exp(-(0.5/v)*np.square(f - m)) scaled_mean = [quad(int_mean, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] mean = np.array(scaled_mean)[:,None] / np.sqrt(2*np.pi*(variance)) return mean - def _predictive_variance(self, mu,variance, predictive_mean=None): + def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None): """ Numerical approximation to the predictive variance: V(Y_star) @@ -152,7 +165,7 @@ class Likelihood(Parameterized): # E( V(Y_star|f_star) ) def int_var(f,m,v): - return self._variance(f)*np.exp(-(0.5/v)*np.square(f - m)) + return self.conditional_variance(f)*np.exp(-(0.5/v)*np.square(f - m)) scaled_exp_variance = [quad(int_var, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] exp_var = np.array(scaled_exp_variance)[:,None] / normalizer @@ -165,13 +178,14 @@ class Likelihood(Parameterized): #E( E(Y_star|f_star)**2 ) def int_pred_mean_sq(f,m,v,predictive_mean_sq): - return self._mean(f)**2*np.exp(-(0.5/v)*np.square(f - m)) + return self.conditional_mean(f)**2*np.exp(-(0.5/v)*np.square(f - m)) scaled_exp_exp2 = [quad(int_pred_mean_sq, -np.inf, np.inf,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)] exp_exp2 = np.array(scaled_exp_exp2)[:,None] / normalizer var_exp = exp_exp2 - predictive_mean_sq - # V(Y_star) = E( V(Y_star|f_star) ) + V( E(Y_star|f_star) ) + # V(Y_star) = E[ V(Y_star|f_star) ] + V[ E(Y_star|f_star) ] + # V(Y_star) = E[ V(Y_star|f_star) ] + E(Y_star**2|f_star) - E[Y_star|f_star]**2 return exp_var + var_exp def pdf_link(self, link_f, y, extra_data=None): @@ -373,6 +387,15 @@ class Likelihood(Parameterized): return pred_mean, pred_var + def predictive_quantiles(self, mu, var, quantiles, Y_metadata): + #compute the quantiles by sampling!!! + N_samp = 1000 + s = np.random.randn(mu.shape[0], N_samp)*np.sqrt(var) + mu + ss_f = s.flatten() + ss_y = self.samples(ss_f) + ss_y = ss_y.reshape(mu.shape[0], N_samp) + + return [np.percentile(ss_y ,q, axis=1)[:,None] for q in quantiles] def samples(self, gp): """ @@ -381,23 +404,3 @@ class Likelihood(Parameterized): :param gp: latent variable """ raise NotImplementedError - if sampling: - #Get gp_samples f* using posterior mean and variance - if not full_cov: - gp_samples = np.random.multivariate_normal(mu.flatten(), np.diag(var.flatten()), - size=num_samples).T - else: - gp_samples = np.random.multivariate_normal(mu.flatten(), var, - size=num_samples).T - #Push gp samples (f*) through likelihood to give p(y*|f*) - samples = self.samples(gp_samples) - axis=-1 - - #Calculate mean, variance and precentiles from samples - print "WARNING: Using sampling to calculate mean, variance and predictive quantiles." - pred_mean = np.mean(samples, axis=axis)[:,None] - pred_var = np.var(samples, axis=axis)[:,None] - q1 = np.percentile(samples, 2.5, axis=axis)[:,None] - q3 = np.percentile(samples, 97.5, axis=axis)[:,None] - - diff --git a/GPy/likelihoods/poisson.py b/GPy/likelihoods/poisson.py index ba6915b8..419514d1 100644 --- a/GPy/likelihoods/poisson.py +++ b/GPy/likelihoods/poisson.py @@ -131,15 +131,15 @@ class Poisson(Likelihood): d3lik_dlink3 = 2*y/(link_f)**3 return d3lik_dlink3 - def _mean(self,gp): + def conditional_mean(self,gp): """ - Mass (or density) function + The mean of the random variable conditioned on one value of the GP """ return self.gp_link.transf(gp) - def _variance(self,gp): + def conditional_variance(self,gp): """ - Mass (or density) function + The variance of the random variable conditioned on one value of the GP """ return self.gp_link.transf(gp) diff --git a/GPy/likelihoods/student_t.py b/GPy/likelihoods/student_t.py index 50d91953..12e0ae85 100644 --- a/GPy/likelihoods/student_t.py +++ b/GPy/likelihoods/student_t.py @@ -9,6 +9,7 @@ from scipy import stats, integrate from scipy.special import gammaln, gamma from likelihood import Likelihood from ..core.parameterization import Param +from ..core.parameterization.transformations import Logexp class StudentT(Likelihood): """ @@ -28,7 +29,7 @@ class StudentT(Likelihood): self.sigma2 = Param('t_noise', float(sigma2)) self.v = Param('deg_free', float(deg_free)) - self.add_parameter(self.sigma2) + self.add_parameter(self.sigma2, Logexp()) self.add_parameter(self.v) self.v.constrain_fixed() @@ -244,32 +245,23 @@ class StudentT(Likelihood): d2logpdf_dlink2_dv = np.zeros_like(d2logpdf_dlink2_dvar) #FIXME: Not done yet return np.hstack((d2logpdf_dlink2_dvar, d2logpdf_dlink2_dv)) - def predictive_variance(self, mu, sigma, predictive_mean=None): - """ - Compute predictive variance of student_t*normal p(y*|f*)p(f*) - - Need to find what the variance is at the latent points for a student t*normal p(y*|f*)p(f*) - (((g((v+1)/2))/(g(v/2)*s*sqrt(v*pi)))*(1+(1/v)*((y-f)/s)^2)^(-(v+1)/2)) - *((1/(s*sqrt(2*pi)))*exp(-(1/(2*(s^2)))*((y-f)^2))) - """ - - #FIXME: Not correct - #We want the variance around test points y which comes from int p(y*|f*)p(f*) df* - #Var(y*) = Var(E[y*|f*]) + E[Var(y*|f*)] - #Since we are given f* (mu) which is our mean (expected) value of y*|f* then the variance is the variance around this - #Which was also given to us as (var) - #We also need to know the expected variance of y* around samples f*, this is the variance of the student t distribution - #However the variance of the student t distribution is not dependent on f, only on sigma and the degrees of freedom - true_var = 1/(1/sigma**2 + 1/self.variance) - - return true_var - - def predictive_mean(self, mu, sigma): + def predictive_mean(self, mu, sigma, Y_metadata=None): """ Compute mean of the prediction """ - #FIXME: Not correct - return mu + return self.gp_link.transf(mu) # only true in link is monotoci, which it is. + + def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None): + if self.deg_free <2.: + return np.empty(mu.shape)*np.nan #not defined for small degress fo freedom + else: + return super(StudentT, self).predictive_variance(mu, variance, predictive_mean, Y_metadata) + + def conditional_mean(self, gp): + return self.gp_link.transf(gp) + + def conditional_variance(self, gp): + return self.deg_free/(self.deg_free - 2.) def samples(self, gp): """ From 73e877a458ffac15d24e38c554338047c7590c78 Mon Sep 17 00:00:00 2001 From: Neil Lawrence Date: Thu, 13 Mar 2014 15:59:11 +0000 Subject: [PATCH 064/116] Google trends and football data sets. --- GPy/util/data_resources.json | 27 ++++++++++++++ GPy/util/datasets.py | 72 +++++++++++++++++++++++++++++++++--- GPy/util/football_teams.json | 1 + 3 files changed, 94 insertions(+), 6 deletions(-) create mode 100644 GPy/util/football_teams.json diff --git a/GPy/util/data_resources.json b/GPy/util/data_resources.json index ca15bf2d..57b79f10 100644 --- a/GPy/util/data_resources.json +++ b/GPy/util/data_resources.json @@ -32,6 +32,33 @@ "details":"Artificially generated data of silhouettes given poses. Note that the data does not display a left/right ambiguity because across the entire data set one of the arms sticks out more the the other, disambiguating the pose as to which way the individual is facing.", "size":1 }, + "football_data":{ + "files":[ + [ + "E0.csv", "E1.csv", "E2.csv", "E3.csv" + ] + ], + "citation":"", + "license":null, + "urls":[ + "http://www.football-data.co.uk/mmz4281/" + ], + "details":"Results of English football matches since 1993/94 season.", + "size":1 + }, + "google_trends":{ + "files":[ + [ + ] + ], + "citation":"", + "license":null, + "urls":[ + "http://www.google.com/trends/" + ], + "details":"Google trends results.", + "size":0 + }, "osu_accad":{ "files":[ [ diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 3c44703a..54e42733 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -1,5 +1,8 @@ +import csv import os +import copy import numpy as np +import pylab as pb import GPy import scipy.io import cPickle as pickle @@ -7,6 +10,8 @@ import zipfile import tarfile import datetime import json +import re + ipython_available=True try: import IPython @@ -32,11 +37,18 @@ neil_url = 'http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/dataset_mirror/' # Read data resources from json file. # Don't do this when ReadTheDocs is scanning as it breaks things on_rtd = os.environ.get('READTHEDOCS', None) == 'True' #Checks if RTD is scanning + if not (on_rtd): path = os.path.join(os.path.dirname(__file__), 'data_resources.json') json_data=open(path).read() data_resources = json.loads(json_data) +if not (on_rtd): + path = os.path.join(os.path.dirname(__file__), 'football_teams.json') + json_data=open(path).read() + football_dict = json.loads(json_data) + + def prompt_user(prompt): """Ask user for agreeing to data set licenses.""" @@ -274,9 +286,55 @@ def della_gatta_TRP63_gene_expression(data_set='della_gatta', gene_number=None): Y = Y[:, None] return data_details_return({'X': X, 'Y': Y, 'gene_number' : gene_number}, data_set) + + +def football_data(season='1314', data_set='football_data'): + """Football data from English games since 1993. This downloads data from football-data.co.uk for the given season. """ + def league2num(string): + league_dict = {'E0':0, 'E1':1, 'E2': 2, 'E3': 3, 'EC':4} + return league_dict[string] + + def football2num(string): + if football_dict.has_key(string): + return football_dict[string] + else: + football_dict[string] = len(football_dict)+1 + return len(football_dict)+1 + + data_set_season = data_set + '_' + season + data_resources[data_set_season] = copy.deepcopy(data_resources[data_set]) + data_resources[data_set_season]['urls'][0]+=season + '/' + start_year = int(year[0:2]) + end_year = int(year[2:4]) + files = ['E0.csv', 'E1.csv', 'E2.csv', 'E3.csv'] + if start_year>4 and start_year < 93: + files += ['EC.csv'] + data_resources[data_set_season]['files'] = [files] + if not data_available(data_set_season): + download_data(data_set_season) + for file in reversed(files): + filename = os.path.join(data_path, data_set_season, file) + # rewrite files removing blank rows. + writename = os.path.join(data_path, data_set_season, 'temp.csv') + input = open(filename, 'rb') + output = open(writename, 'wb') + writer = csv.writer(output) + for row in csv.reader(input): + if any(field.strip() for field in row): + writer.writerow(row) + input.close() + output.close() + table = np.loadtxt(writename,skiprows=1, usecols=(0, 1, 2, 3, 4, 5), converters = {0: league2num, 1: pb.datestr2num, 2:football2num, 3:football2num}, delimiter=',') + X = table[:, :4] + Y = table[:, 4:] + return data_details_return({'X': X, 'Y': Y}, data_set) + +# This will be for downloading google trends data. def google_trends(query_terms=['big data', 'machine learning', 'data science'], data_set='google_trends'): + """Data downloaded from Google trends for given query terms.""" # Inspired by this notebook: # http://nbviewer.ipython.org/github/sahuguet/notebooks/blob/master/GoogleTrends%20meet%20Notebook.ipynb + # quote the query terms. for i, element in enumerate(query_terms): query_terms[i] = urllib2.quote(element) @@ -284,18 +342,20 @@ def google_trends(query_terms=['big data', 'machine learning', 'data science'], data = urllib2.urlopen(query).read() - # We need to do some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD. + # In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD. header = """// Data table response\ngoogle.visualization.Query.setResponse(""" data = data[len(header):-2] data = re.sub('new Date\((\d+),(\d+),(\d+)\)', (lambda m: '"%s-%02d-%02d"' % (m.group(1).strip(), 1+int(m.group(2)), int(m.group(3)))), data) timeseries = json.loads(data) - import pandas as pd + #import pandas as pd columns = [k['label'] for k in timeseries['table']['cols']] rows = map(lambda x: [k['v'] for k in x['c']], timeseries['table']['rows']) - df = pd.DataFrame(rows, columns=columns) - df.set_index('Date', inplace=True) - df.plot(figsize=(16, 8)) - + terms = len(columns)-1 + X = np.asarray([(pb.datestr2num(row[0]), i) for i in range(terms) for row in rows ]) + Y = np.asarray([[row[i+1]] for i in range(terms) for row in rows ]) + output_info = columns[1:] + return data_details_return({'X': X, 'Y': Y, 'query_terms': output_info, 'info': "Data downloaded from google trends with query terms: " + ', '.join(output_info) + '.'}, data_set) + # The data sets def oil(data_set='three_phase_oil_flow'): """The three phase oil data from Bishop and James (1993).""" diff --git a/GPy/util/football_teams.json b/GPy/util/football_teams.json new file mode 100644 index 00000000..a4eb9c38 --- /dev/null +++ b/GPy/util/football_teams.json @@ -0,0 +1 @@ +{"Canvey Island": 94, "Crewe": 21, "Fleetwood Town": 134, "Wrexham": 89, "Barnet": 69, "Ipswich": 29, "Rochdale": 84, "Bristol Rvs": 70, "Liverpool": 10, "Chelsea": 20, "York": 113, "Newcastle": 18, "QPR": 28, "Middlesboro": 116, "Tranmere": 68, "Bury": 72, "Luton": 24, "AFC Wimbledon": 126, "West Ham": 15, "Braintree Town": 135, "Bournemouth": 58, "Hayes & Yeading": 130, "Rushden & D": 81, "Weymouth": 120, "Chesterfield": 48, "Exeter": 104, "Barnsley": 45, "Aldershot": 95, "Gateshead": 129, "Hartlepool": 55, "Newport County": 132, "Crystal Palace": 23, "Ebbsfleet": 123, "Wigan": 19, "Shrewsbury": 83, "Hereford": 105, "Stevenage": 111, "Grimsby": 73, "Crawley Town": 114, "Morecambe": 109, "Oldham": 61, "Aston Villa": 1, "Bristol City": 51, "Gravesend": 103, "Huddersfield": 60, "Reading": 33, "Nuneaton Town": 140, "AFC Telford United": 137, "Wycombe": 91, "Leeds": 43, "Colchester": 54, "Rotherham": 63, "Southport": 100, "Southampton": 37, "Darlington": 82, "Blackburn": 16, "Bath City": 133, "Yeovil": 62, "Leyton Orient": 75, "Forest Green": 101, "Chester": 80, "Halifax": 110, "Portsmouth": 11, "Woking": 108, "Histon": 125, "Man City": 7, "Northampton": 78, "Arsenal": 17, "Charlton": 14, "Middlesbrough": 9, "Watford": 41, "Nott'm Forest": 59, "Eastbourne Borough": 131, "Hull": 27, "Barrow": 127, "Doncaster": 52, "Carlisle": 92, "Gillingham": 53, "Accrington": 93, "Dartford": 139, "Altrincham": 112, "Scarborough": 106, "Northwich": 117, "Farsley": 124, "Tamworth": 96, "St. Albans": 119, "Alfreton Town": 136, "Mansfield": 86, "Macclesfield": 76, "Torquay": 87, "Brighton": 26, "Bradford": 56, "Lincoln": 77, "Brentford": 49, "Everton": 3, "Cambridge": 102, "Sheffield United": 35, "Stockport": 85, "Bolton": 2, "Southend": 65, "Cheltenham": 71, "Walsall": 64, "Preston": 42, "Peterboro": 79, "Birmingham": 6, "Boston": 90, "Burton": 97, "West Brom": 8, "Man United": 4, "Stafford Rangers": 118, "Wimbledon": 115, "Scunthorpe": 50, "Kidderminster": 107, "Millwall": 44, "Swansea": 67, "Norwich": 31, "Burnley": 22, "Sunderland": 13, "Sheffield Weds": 40, "Fulham": 5, "Dag and Red": 99, "Oxford": 74, "Stoke": 39, "Tottenham": 12, "Kettering Town": 128, "Coventry": 32, "Wolves": 38, "Port Vale": 66, "Milton Keynes Dons": 57, "Plymouth": 34, "Derby": 25, "Notts County": 88, "Leicester": 36, "Droylsden": 121, "Blackpool": 47, "Salisbury": 122, "Cardiff": 30, "Grays": 98, "Swindon": 46, "Hyde United": 138} \ No newline at end of file From f0d97f5b8404023e67ae06510dde5c31db4bb86b Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 16:03:19 +0000 Subject: [PATCH 065/116] fixed the posterior prediction for laplace The mis-match between the woodbury vector and KIf is still a bit of a mystery --- GPy/inference/latent_function_inference/laplace.py | 4 +++- GPy/likelihoods/student_t.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index e6ca720a..4f529d5e 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -53,11 +53,13 @@ class Laplace(object): f_hat, Ki_fhat = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata) self.f_hat = f_hat + self.Ki_fhat = Ki_fhat + self.K = K.copy() #Compute hessian and other variables at mode log_marginal, woodbury_vector, woodbury_inv, dL_dK, dL_dthetaL = self.mode_computations(f_hat, Ki_fhat, K, Y, likelihood, kern, Y_metadata) self._previous_Ki_fhat = Ki_fhat.copy() - return Posterior(woodbury_vector=woodbury_vector, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} + return Posterior(woodbury_vector=Ki_fhat, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None): """ diff --git a/GPy/likelihoods/student_t.py b/GPy/likelihoods/student_t.py index 12e0ae85..ce86d9d6 100644 --- a/GPy/likelihoods/student_t.py +++ b/GPy/likelihoods/student_t.py @@ -27,9 +27,9 @@ class StudentT(Likelihood): super(StudentT, self).__init__(gp_link, name='Student_T') - self.sigma2 = Param('t_noise', float(sigma2)) + self.sigma2 = Param('t_noise', float(sigma2), Logexp()) self.v = Param('deg_free', float(deg_free)) - self.add_parameter(self.sigma2, Logexp()) + self.add_parameter(self.sigma2) self.add_parameter(self.v) self.v.constrain_fixed() From c302e515e20638db0a4463a2bfba412758e38678 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 16:44:39 +0000 Subject: [PATCH 066/116] plotting fix --- GPy/core/gp.py | 4 ++-- GPy/likelihoods/gaussian.py | 2 +- GPy/likelihoods/likelihood.py | 2 +- GPy/likelihoods/student_t.py | 2 +- GPy/plotting/matplot_dep/models_plots.py | 3 +-- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index a04ac8da..35a41cde 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -148,7 +148,7 @@ class GP(Model): return Ysim - def posterior_samples(self,X,size=10, full_cov=True, Y_metadata=None): + def posterior_samples(self, X, size=10, full_cov=False, Y_metadata=None): """ Samples the posterior GP at the points X. @@ -163,7 +163,7 @@ class GP(Model): :returns: Ysim: set of simulations, a Numpy array (N x samples). """ Ysim = self.posterior_samples_f(X, size, full_cov=full_cov) - Ysim = self.likelihood.noise_model.samples(Ysim, Y_metadata) + Ysim = self.likelihood.samples(Ysim, Y_metadata) return Ysim diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 032136a7..aaa356b6 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -94,7 +94,7 @@ class Gaussian(Likelihood): return self.variance + sigma**2 def predictive_quantiles(self, mu, var, quantiles, Y_metadata): - return [stats.norm.ppf(q)*np.sqrt(var) + mu for q in quantiles] + return [stats.norm.ppf(q/100.)*np.sqrt(var) + mu for q in quantiles] def pdf_link(self, link_f, y, extra_data=None): """ diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 67c406df..3eafedb1 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -397,7 +397,7 @@ class Likelihood(Parameterized): return [np.percentile(ss_y ,q, axis=1)[:,None] for q in quantiles] - def samples(self, gp): + def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. diff --git a/GPy/likelihoods/student_t.py b/GPy/likelihoods/student_t.py index ce86d9d6..15fd9fa0 100644 --- a/GPy/likelihoods/student_t.py +++ b/GPy/likelihoods/student_t.py @@ -263,7 +263,7 @@ class StudentT(Likelihood): def conditional_variance(self, gp): return self.deg_free/(self.deg_free - 2.) - def samples(self, gp): + def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index c87eb694..7507c376 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -86,8 +86,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', upper = m + 2*np.sqrt(v) else: m, v = model.predict(Xgrid, full_cov=False, Y_metadata=Y_metadata) - - lower, upper = model.predict_quantiles(Xgrid, Y_metadata=Y_metadata) + lower, upper = model.predict_quantiles(Xgrid, Y_metadata=Y_metadata) for d in which_data_ycols: From 433b2131654d8cc84f646b822f9a3875d83a312c Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 17:02:29 +0000 Subject: [PATCH 067/116] independent output gradients --- GPy/kern/_src/independent_outputs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 0cbd5be4..438168a0 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -83,7 +83,7 @@ class IndependentOutputs(Kern): target = np.zeros_like(X) slices = index_to_slices(X[:,self.index_dim]) if X2 is None: - [[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X(dL_dK[s,s],X[s],X[ss])) for s, ss in product(slices_i, slices_i)] for slices_i in slices] + [[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X(dL_dK[s,ss],X[s],X[ss])) for s, ss in itertools.product(slices_i, slices_i)] for slices_i in slices] else: X2,slices2 = X2[:,:self.index_dim],index_to_slices(X2[:,-1]) [[[np.copyto(target[s,:self.index_dim], self.kern.gradients_X(dL_dK[s,s2], X[s], X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] From f50b121d4d4ca70f44150c4fd2400e20092ecbfe Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 13 Mar 2014 17:05:46 +0000 Subject: [PATCH 068/116] Alans change to checkgrad --- GPy/core/model.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index c2a9ed23..6d90e13a 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -15,7 +15,7 @@ import itertools class Model(Parameterized): _fail_count = 0 # Count of failed optimization steps (see objective) _allowed_failures = 10 # number of allowed failures - + def __init__(self, name): super(Model, self).__init__(name) # Parameterized.__init__(self) self.optimization_runs = [] @@ -27,7 +27,7 @@ class Model(Parameterized): def _log_likelihood_gradients(self): return self.gradient - + def _getstate(self): """ Get the current state of the class. @@ -231,7 +231,7 @@ class Model(Parameterized): raise RuntimeError, "Cannot optimize, when everything is fixed" if self.size == 0: raise RuntimeError, "Model without parameters cannot be minimized" - + if optimizer is None: optimizer = self.preferred_optimizer @@ -301,9 +301,8 @@ class Model(Parameterized): denominator = (2 * np.dot(dx, gradient)) global_ratio = (f1 - f2) / np.where(denominator==0., 1e-32, denominator) - gloabl_diff = (f1 - f2) - denominator - return (np.abs(1. - global_ratio) < tolerance) or (np.abs(gloabl_diff) == 0) + return np.abs(1. - global_ratio) < tolerance) else: # check the gradient of each parameter individually, and do some pretty printing try: @@ -349,7 +348,7 @@ class Model(Parameterized): xx[xind] -= 2.*step f2 = self.objective_function(xx) numerical_gradient = (f1 - f2) / (2 * step) - if np.all(gradient[xind]==0): ratio = (f1-f2) == gradient[xind] + if np.all(gradient[xind]==0): ratio = (f1-f2) == gradient[xind] else: ratio = (f1 - f2) / (2 * step * gradient[xind]) difference = np.abs((f1 - f2) / 2 / step - gradient[xind]) From 55f5da69f9eaa9b387a9c41a2265e0d420b7d0e6 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Thu, 13 Mar 2014 17:09:55 +0000 Subject: [PATCH 069/116] Added test for independent kern --- GPy/testing/kernel_tests.py | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index d54b3871..673acb92 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -258,20 +258,20 @@ class KernelGradientTestsContinuous(unittest.TestCase): # self.N, self.D = 100, 1 # self.X = np.random.randn(self.N,self.D) # self.X2 = np.random.randn(self.N+10,self.D) -# +# # continuous_kerns = ['RBF', 'Linear'] # self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns] -# +# # def test_PeriodicExponential(self): # k = GPy.kern.PeriodicExponential(self.D) # k.randomize() # self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) -# +# # def test_PeriodicMatern32(self): # k = GPy.kern.PeriodicMatern32(self.D) # k.randomize() # self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) -# +# # def test_PeriodicMatern52(self): # k = GPy.kern.PeriodicMatern52(self.D) # k.randomize() @@ -279,7 +279,6 @@ class KernelGradientTestsContinuous(unittest.TestCase): class KernelTestsMiscellaneous(unittest.TestCase): - def setUp(self): N, D = 100, 10 self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D) @@ -298,6 +297,24 @@ class KernelTestsMiscellaneous(unittest.TestCase): self.assertTrue(np.allclose(self.sumkern.K(self.X, which_parts=[self.linear, self.rbf]), self.linear.K(self.X)+self.rbf.K(self.X))) self.assertTrue(np.allclose(self.sumkern.K(self.X, which_parts=self.sumkern.parts[0]), self.rbf.K(self.X))) +class KernelTestsNonContinuous(unittest.TestCase): + def setUp(self): + N = 100 + N1 = 110 + self.D = 2 + D = self.D + self.X = np.random.randn(N,D) + self.X2 = np.random.randn(N1,D) + self.X_block = np.zeros((N+N1, D+D+1)) + self.X_block[0:N, 0:D] = self.X + self.X_block[N:N+N1, D:D+D] = self.X2 + self.X_block[0:N, -1] = 1 + self.X_block[N:N+1, -1] = 2 + + def test_IndependantOutputs(self): + k = GPy.kern.RBF(self.D) + kern = GPy.kern.IndependentOutputs(self.D+self.D,k) + self.assertTrue(check_kernel_gradient_functions(kern, X=self.X, X2=self.X2, verbose=verbose)) if __name__ == "__main__": print "Running unit tests, please be (very) patient..." From efcce6d0af509415c79dc8d8b4ff966b2ff0b809 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 09:18:08 +0000 Subject: [PATCH 070/116] active_dims as extra parameter for kernels, it tells which input dimensions to work on --- GPy/kern/_src/add.py | 4 +++- GPy/kern/_src/brownian.py | 4 ++-- GPy/kern/_src/coregionalize.py | 4 ++-- GPy/kern/_src/kern.py | 42 ++++++++++++++++++++++------------ GPy/kern/_src/linear.py | 4 ++-- GPy/kern/_src/mlp.py | 4 ++-- GPy/kern/_src/periodic.py | 16 ++++++------- GPy/kern/_src/rbf.py | 4 ++-- GPy/kern/_src/ssrbf.py | 4 ++-- GPy/kern/_src/static.py | 14 ++++++------ GPy/kern/_src/stationary.py | 28 +++++++++++------------ GPy/kern/_src/sympykern.py | 4 ++-- 12 files changed, 73 insertions(+), 59 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 1e386c01..97afd1f0 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -9,7 +9,9 @@ from kern import CombinationKernel class Add(CombinationKernel): """ Add given list of kernels together. - propagates gradients thorugh. + propagates gradients through. + + This kernel will take over the active dims of it's subkernels passed in. """ def __init__(self, subkerns, name='add'): super(Add, self).__init__(subkerns, name) diff --git a/GPy/kern/_src/brownian.py b/GPy/kern/_src/brownian.py index 81b57a25..aeb11fa3 100644 --- a/GPy/kern/_src/brownian.py +++ b/GPy/kern/_src/brownian.py @@ -17,9 +17,9 @@ class Brownian(Kern): :param variance: :type variance: float """ - def __init__(self, input_dim=1, variance=1., name='Brownian'): + def __init__(self, input_dim=1, variance=1., active_dims=None, name='Brownian'): assert input_dim==1, "Brownian motion in 1D only" - super(Brownian, self).__init__(input_dim, name) + super(Brownian, self).__init__(input_dim, active_dims, name) self.variance = Param('variance', variance, Logexp()) self.add_parameters(self.variance) diff --git a/GPy/kern/_src/coregionalize.py b/GPy/kern/_src/coregionalize.py index 3503bbd6..7eccff3d 100644 --- a/GPy/kern/_src/coregionalize.py +++ b/GPy/kern/_src/coregionalize.py @@ -34,8 +34,8 @@ class Coregionalize(Kern): .. note: see coregionalization examples in GPy.examples.regression for some usage. """ - def __init__(self, input_dim, output_dim, rank=1, W=None, kappa=None, name='coregion'): - super(Coregionalize, self).__init__(input_dim, name=name) + def __init__(self, input_dim, output_dim, rank=1, W=None, kappa=None, active_dims=None, name='coregion'): + super(Coregionalize, self).__init__(input_dim, active_dims, name=name) self.output_dim = output_dim self.rank = rank if self.rank>output_dim: diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index dc6eceb4..cb38416c 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -16,26 +16,24 @@ class Kern(Parameterized): __metaclass__ = KernCallsViaSlicerMeta #=========================================================================== _debug=False - def __init__(self, input_dim, name, *a, **kw): + def __init__(self, input_dim, active_dims, name, *a, **kw): """ The base class for a kernel: a positive definite function which forms of a covariance function (kernel). - :param input_dim: the number of input dimensions to the function - :type input_dim: int + :param int input_dim: the number of input dimensions to the function + :param array-like|slice active_dims: list of indices on which dimensions this kernel works on Do not instantiate. """ super(Kern, self).__init__(name=name, *a, **kw) - if isinstance(input_dim, int): - self.active_dims = np.r_[0:input_dim] - self.input_dim = input_dim - else: - self.active_dims = np.r_[input_dim] - self.input_dim = len(self.active_dims) + self.active_dims = active_dims or slice(0, input_dim) + self.input_dim = input_dim + assert isinstance(self.active_dims, (slice, list, tuple, np.ndarray)), 'active_dims needs to be an array-like or slice object over dimensions, {} given'.format(self.active_dims.__class__) + assert self.active_dims.size == self.input_dim, "input_dim {} does not match len(active_dim) {}".format(self.input_dim, self.active_dims.size) self._sliced_X = 0 - @Cache_this(limit=10)#, ignore_args = (0,)) + @Cache_this(limit=10) def _slice_X(self, X): return X[:, self.active_dims] @@ -69,9 +67,7 @@ class Kern(Parameterized): def update_gradients_full(self, dL_dK, X, X2): """Set the gradients of all parameters when doing full (N) inference.""" raise NotImplementedError - def update_gradients_diag(self, dL_dKdiag, X): - """Set the gradients for all parameters for the derivative of the diagonal of the covariance w.r.t the kernel parameters.""" - raise NotImplementedError + def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): """ Set the gradients of all parameters when doing inference with @@ -193,13 +189,29 @@ class Kern(Parameterized): super(Kern, self)._setstate(state) class CombinationKernel(Kern): - def __init__(self, kernels, name): + """ + Abstract super class for combination kernels. + A combination kernel combines (a list of) kernels and works on those. + Examples are the HierarchicalKernel or Add and Prod kernels. + """ + def __init__(self, kernels, name, extra_dims=[]): + """ + Abstract super class for combination kernels. + A combination kernel combines (a list of) kernels and works on those. + Examples are the HierarchicalKernel or Add and Prod kernels. + + :param list kernels: List of kernels to combine (can be only one element) + :param str name: name of the combination kernel + :param array-like|slice extra_dims: if needed extra dimensions for the combination kernel to work on + """ assert all([isinstance(k, Kern) for k in kernels]) + import itertools # make sure the active dimensions of all underlying kernels are covered: - ma = reduce(lambda a,b: max(a, max(b)), (x.active_dims for x in kernels), 0) + ma = reduce(lambda a,b: max(a, b.stop if isinstance(b, slice) else max(b)), itertools.chain((x.active_dims for x in kernels), [extra_dims]), 0) input_dim = np.r_[0:ma+1] # initialize the kernel with the full input_dim super(CombinationKernel, self).__init__(input_dim, name) + self.extra_dims = extra_dims self.add_parameters(*kernels) @property diff --git a/GPy/kern/_src/linear.py b/GPy/kern/_src/linear.py index f2ac0124..15e23d5c 100644 --- a/GPy/kern/_src/linear.py +++ b/GPy/kern/_src/linear.py @@ -34,8 +34,8 @@ class Linear(Kern): """ - def __init__(self, input_dim, variances=None, ARD=False, name='linear'): - super(Linear, self).__init__(input_dim, name) + def __init__(self, input_dim, variances=None, ARD=False, active_dims=None, name='linear'): + super(Linear, self).__init__(input_dim, active_dims, name) self.ARD = ARD if not ARD: if variances is not None: diff --git a/GPy/kern/_src/mlp.py b/GPy/kern/_src/mlp.py index ee15d967..0b561d4b 100644 --- a/GPy/kern/_src/mlp.py +++ b/GPy/kern/_src/mlp.py @@ -31,8 +31,8 @@ class MLP(Kern): """ - def __init__(self, input_dim, variance=1., weight_variance=1., bias_variance=100., name='mlp'): - super(MLP, self).__init__(input_dim, name) + def __init__(self, input_dim, variance=1., weight_variance=1., bias_variance=100., active_dims=None, name='mlp'): + super(MLP, self).__init__(input_dim, active_dims, name) self.variance = Param('variance', variance, Logexp()) self.weight_variance = Param('weight_variance', weight_variance, Logexp()) self.bias_variance = Param('bias_variance', bias_variance, Logexp()) diff --git a/GPy/kern/_src/periodic.py b/GPy/kern/_src/periodic.py index 6b423a57..a8573a05 100644 --- a/GPy/kern/_src/periodic.py +++ b/GPy/kern/_src/periodic.py @@ -10,7 +10,7 @@ from ...core.parameterization.param import Param from ...core.parameterization.transformations import Logexp class Periodic(Kern): - def __init__(self, input_dim, variance, lengthscale, period, n_freq, lower, upper, name): + def __init__(self, input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name): """ :type input_dim: int :param variance: the variance of the Matern kernel @@ -25,7 +25,7 @@ class Periodic(Kern): """ assert input_dim==1, "Periodic kernels are only defined for input_dim=1" - super(Periodic, self).__init__(input_dim, name) + super(Periodic, self).__init__(input_dim, active_dims, name) self.input_dim = input_dim self.lower,self.upper = lower, upper self.n_freq = n_freq @@ -77,8 +77,8 @@ class PeriodicExponential(Periodic): Only defined for input_dim=1. """ - def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi, n_freq=10, lower=0., upper=4*np.pi, name='periodic_exponential'): - super(PeriodicExponential, self).__init__(input_dim, variance, lengthscale, period, n_freq, lower, upper, name) + def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi, n_freq=10, lower=0., upper=4*np.pi, active_dims=None, name='periodic_exponential'): + super(PeriodicExponential, self).__init__(input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name) def parameters_changed(self): self.a = [1./self.lengthscale, 1.] @@ -187,8 +187,8 @@ class PeriodicMatern32(Periodic): """ - def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi, n_freq=10, lower=0., upper=4*np.pi, name='periodic_Matern32'): - super(PeriodicMatern32, self).__init__(input_dim, variance, lengthscale, period, n_freq, lower, upper, name) + def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi, n_freq=10, lower=0., upper=4*np.pi, active_dims=None, name='periodic_Matern32'): + super(PeriodicMatern32, self).__init__(input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name) def parameters_changed(self): self.a = [3./self.lengthscale**2, 2*np.sqrt(3)/self.lengthscale, 1.] self.b = [1,self.lengthscale**2/3] @@ -300,8 +300,8 @@ class PeriodicMatern52(Periodic): """ - def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi, n_freq=10, lower=0., upper=4*np.pi, name='periodic_Matern52'): - super(PeriodicMatern52, self).__init__(input_dim, variance, lengthscale, period, n_freq, lower, upper, name) + def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi, n_freq=10, lower=0., upper=4*np.pi, active_dims=None, name='periodic_Matern52'): + super(PeriodicMatern52, self).__init__(input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name) def parameters_changed(self): self.a = [5*np.sqrt(5)/self.lengthscale**3, 15./self.lengthscale**2,3*np.sqrt(5)/self.lengthscale, 1.] diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index 341d46a7..c2877d06 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -19,8 +19,8 @@ class RBF(Stationary): k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) """ - def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='rbf'): - super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, name) + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf'): + super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) self.weave_options = {} def K_of_r(self, r): diff --git a/GPy/kern/_src/ssrbf.py b/GPy/kern/_src/ssrbf.py index c566c414..bf87bf76 100644 --- a/GPy/kern/_src/ssrbf.py +++ b/GPy/kern/_src/ssrbf.py @@ -33,9 +33,9 @@ class SSRBF(Stationary): .. Note: this object implements both the ARD and 'spherical' version of the function """ - def __init__(self, input_dim, variance=1., lengthscale=None, ARD=True, name='SSRBF'): + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=True, active_dims=None, name='SSRBF'): assert ARD==True, "Not Implemented!" - super(SSRBF, self).__init__(input_dim, variance, lengthscale, ARD, name) + super(SSRBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) def K_of_r(self, r): return self.variance * np.exp(-0.5 * r**2) diff --git a/GPy/kern/_src/static.py b/GPy/kern/_src/static.py index 387c92c6..4c9d943c 100644 --- a/GPy/kern/_src/static.py +++ b/GPy/kern/_src/static.py @@ -9,7 +9,7 @@ from ...core.parameterization.transformations import Logexp import numpy as np class Static(Kern): - def __init__(self, input_dim, variance, name): + def __init__(self, input_dim, variance, active_dims, name): super(Static, self).__init__(input_dim, name) self.variance = Param('variance', variance, Logexp()) self.add_parameters(self.variance) @@ -43,8 +43,8 @@ class Static(Kern): class White(Static): - def __init__(self, input_dim, variance=1., name='white'): - super(White, self).__init__(input_dim, variance, name) + def __init__(self, input_dim, variance=1., active_dims=None, name='white'): + super(White, self).__init__(input_dim, variance, active_dims, name) def K(self, X, X2=None): if X2 is None: @@ -66,8 +66,8 @@ class White(Static): class Bias(Static): - def __init__(self, input_dim, variance=1., name='bias'): - super(Bias, self).__init__(input_dim, variance, name) + def __init__(self, input_dim, variance=1., active_dims=None, name='bias'): + super(Bias, self).__init__(input_dim, variance, active_dims, name) def K(self, X, X2=None): shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0]) @@ -90,14 +90,14 @@ class Bias(Static): self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum() class Fixed(Static): - def __init__(self, input_dim, covariance_matrix, variance=1., name='fixed'): + def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'): """ :param input_dim: the number of input dimensions :type input_dim: int :param variance: the variance of the kernel :type variance: float """ - super(Bias, self).__init__(input_dim, variance, name) + super(Bias, self).__init__(input_dim, variance, active_dims, name) self.fixed_K = covariance_matrix def K(self, X, X2): return self.variance * self.fixed_K diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 725f8660..df7ba058 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -41,8 +41,8 @@ class Stationary(Kern): """ - def __init__(self, input_dim, variance, lengthscale, ARD, name): - super(Stationary, self).__init__(input_dim, name) + def __init__(self, input_dim, variance, lengthscale, ARD, active_dims, name): + super(Stationary, self).__init__(input_dim, active_dims, name) self.ARD = ARD if not ARD: if lengthscale is None: @@ -186,8 +186,8 @@ class Stationary(Kern): return np.ones(self.input_dim)/self.lengthscale class Exponential(Stationary): - def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='Exponential'): - super(Exponential, self).__init__(input_dim, variance, lengthscale, ARD, name) + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Exponential'): + super(Exponential, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) def K_of_r(self, r): return self.variance * np.exp(-0.5 * r) @@ -205,8 +205,8 @@ class Matern32(Stationary): """ - def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='Mat32'): - super(Matern32, self).__init__(input_dim, variance, lengthscale, ARD, name) + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Mat32'): + super(Matern32, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) def K_of_r(self, r): return self.variance * (1. + np.sqrt(3.) * r) * np.exp(-np.sqrt(3.) * r) @@ -249,8 +249,8 @@ class Matern52(Stationary): k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r) """ - def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='Mat52'): - super(Matern52, self).__init__(input_dim, variance, lengthscale, ARD, name) + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Mat52'): + super(Matern52, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) def K_of_r(self, r): return self.variance*(1+np.sqrt(5.)*r+5./3*r**2)*np.exp(-np.sqrt(5.)*r) @@ -291,8 +291,8 @@ class Matern52(Stationary): class ExpQuad(Stationary): - def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='ExpQuad'): - super(ExpQuad, self).__init__(input_dim, variance, lengthscale, ARD, name) + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='ExpQuad'): + super(ExpQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) def K_of_r(self, r): return self.variance * np.exp(-0.5 * r**2) @@ -301,8 +301,8 @@ class ExpQuad(Stationary): return -r*self.K_of_r(r) class Cosine(Stationary): - def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='Cosine'): - super(Cosine, self).__init__(input_dim, variance, lengthscale, ARD, name) + def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Cosine'): + super(Cosine, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) def K_of_r(self, r): return self.variance * np.cos(r) @@ -322,8 +322,8 @@ class RatQuad(Stationary): """ - def __init__(self, input_dim, variance=1., lengthscale=None, power=2., ARD=False, name='ExpQuad'): - super(RatQuad, self).__init__(input_dim, variance, lengthscale, ARD, name) + def __init__(self, input_dim, variance=1., lengthscale=None, power=2., ARD=False, active_dims=None, name='ExpQuad'): + super(RatQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) self.power = Param('power', power, Logexp()) self.add_parameters(self.power) diff --git a/GPy/kern/_src/sympykern.py b/GPy/kern/_src/sympykern.py index 3f6b5445..6f066e98 100644 --- a/GPy/kern/_src/sympykern.py +++ b/GPy/kern/_src/sympykern.py @@ -26,13 +26,13 @@ class Sympykern(Kern): - to handle multiple inputs, call them x_1, z_1, etc - to handle multpile correlated outputs, you'll need to add parameters with an index, such as lengthscale_i and lengthscale_j. """ - def __init__(self, input_dim, k=None, output_dim=1, name=None, param=None): + def __init__(self, input_dim, k=None, output_dim=1, name=None, param=None, active_dims=None): if name is None: name='sympykern' if k is None: raise ValueError, "You must provide an argument for the covariance function." - super(Sympykern, self).__init__(input_dim, name) + super(Sympykern, self).__init__(input_dim, active_dims, name) self._sp_k = k From f9b02be40ebcd6ab45e5586dc1f71da24f786507 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 09:18:58 +0000 Subject: [PATCH 071/116] kernel tests periodic --- GPy/testing/kernel_tests.py | 46 ++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index d54b3871..9f366afa 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -253,29 +253,29 @@ class KernelGradientTestsContinuous(unittest.TestCase): self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) #TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize -# class KernelGradientTestsContinuous1D(unittest.TestCase): -# def setUp(self): -# self.N, self.D = 100, 1 -# self.X = np.random.randn(self.N,self.D) -# self.X2 = np.random.randn(self.N+10,self.D) -# -# continuous_kerns = ['RBF', 'Linear'] -# self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns] -# -# def test_PeriodicExponential(self): -# k = GPy.kern.PeriodicExponential(self.D) -# k.randomize() -# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) -# -# def test_PeriodicMatern32(self): -# k = GPy.kern.PeriodicMatern32(self.D) -# k.randomize() -# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) -# -# def test_PeriodicMatern52(self): -# k = GPy.kern.PeriodicMatern52(self.D) -# k.randomize() -# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) +class KernelGradientTestsContinuous1D(unittest.TestCase): + def setUp(self): + self.N, self.D = 100, 1 + self.X = np.random.randn(self.N,self.D) + self.X2 = np.random.randn(self.N+10,self.D) + + continuous_kerns = ['RBF', 'Linear'] + self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns] + + def test_PeriodicExponential(self): + k = GPy.kern.PeriodicExponential(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + + def test_PeriodicMatern32(self): + k = GPy.kern.PeriodicMatern32(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + + def test_PeriodicMatern52(self): + k = GPy.kern.PeriodicMatern52(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) class KernelTestsMiscellaneous(unittest.TestCase): From da4303f71be27788c02acbefaaf0cb5d04891d0d Mon Sep 17 00:00:00 2001 From: James Hensman Date: Fri, 14 Mar 2014 10:29:14 +0000 Subject: [PATCH 072/116] bugfix for grad_dict --- GPy/core/model.py | 2 +- GPy/core/sparse_gp.py | 2 +- GPy/testing/kernel_tests.py | 2 +- GPy/util/caching.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 6d90e13a..0990e7f1 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -302,7 +302,7 @@ class Model(Parameterized): denominator = (2 * np.dot(dx, gradient)) global_ratio = (f1 - f2) / np.where(denominator==0., 1e-32, denominator) - return np.abs(1. - global_ratio) < tolerance) + return np.abs(1. - global_ratio) < tolerance else: # check the gradient of each parameter individually, and do some pretty printing try: diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 23f8e690..d137ceff 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -60,7 +60,7 @@ class SparseGP(GP): dL_dKmm = self.grad_dict.pop('dL_dKmm') self.kern.update_gradients_full(dL_dKmm, self.Z, None) target = self.kern.gradient.copy() - self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2']) + self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2']) self.kern.gradient += target #gradients wrt Z diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index d54b3871..2a35ad3b 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -252,7 +252,7 @@ class KernelGradientTestsContinuous(unittest.TestCase): k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) -#TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize +#TODO: turn off grad checkingwrt X for indexed kernels like coregionalize # class KernelGradientTestsContinuous1D(unittest.TestCase): # def setUp(self): # self.N, self.D = 100, 1 diff --git a/GPy/util/caching.py b/GPy/util/caching.py index 792d82e2..ea09292a 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -48,7 +48,7 @@ class Cacher(object): if k in kw and kw[k] is not None: return self.operation(*args, **kw) # TODO: WARNING !!! Cache OFFSWITCH !!! WARNING - return self.operation(*args) + #return self.operation(*args) #if the result is cached, return the cached computation state = [all(a is b for a, b in itertools.izip_longest(args, cached_i)) for cached_i in self.cached_inputs] From 600b1bde3cb4dd1325c5a2c7b2ccea22967e6ecc Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 10:55:16 +0000 Subject: [PATCH 073/116] kernel slices allowed --- GPy/kern/_src/independent_outputs.py | 10 +++++----- GPy/kern/_src/kern.py | 20 +++++++++++++++----- GPy/testing/kernel_tests.py | 17 +++++++++-------- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 0cbd5be4..387d2613 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern +from kern import Kern, CombinationKernel import numpy as np import itertools @@ -32,7 +32,7 @@ def index_to_slices(index): [ret[ind_i].append(slice(*indexes_i)) for ind_i,indexes_i in zip(ind[switchpoints[:-1]],zip(switchpoints,switchpoints[1:]))] return ret -class IndependentOutputs(Kern): +class IndependentOutputs(CombinationKernel): """ A kernel which can represent several independent functions. this kernel 'switches off' parts of the matrix where the output indexes are different. @@ -41,12 +41,12 @@ class IndependentOutputs(Kern): the rest of the columns of X are passed to the underlying kernel for computation (in blocks). """ - def __init__(self, index_dim, kern, name='independ'): + def __init__(self, kern, index_dim=-1, name='independ'): assert isinstance(index_dim, int), "IndependentOutputs kernel is only defined with one input dimension being the indeces" - super(IndependentOutputs, self).__init__(np.r_[0:max(max(kern.active_dims)+1, index_dim+1)], name) + super(IndependentOutputs, self).__init__(kernels=[kern], extra_dims=[index_dim], name=name) self.index_dim = index_dim self.kern = kern - self.add_parameters(self.kern) + #self.add_parameters(self.kern) def K(self,X ,X2=None): slices = index_to_slices(X[:,self.index_dim]) diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index cb38416c..3efe7f5f 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -27,10 +27,18 @@ class Kern(Parameterized): Do not instantiate. """ super(Kern, self).__init__(name=name, *a, **kw) - self.active_dims = active_dims or slice(0, input_dim) + self.active_dims = active_dims if active_dims is not None else slice(0, input_dim) self.input_dim = input_dim assert isinstance(self.active_dims, (slice, list, tuple, np.ndarray)), 'active_dims needs to be an array-like or slice object over dimensions, {} given'.format(self.active_dims.__class__) - assert self.active_dims.size == self.input_dim, "input_dim {} does not match len(active_dim) {}".format(self.input_dim, self.active_dims.size) + if isinstance(self.active_dims, slice): + self.active_dims = slice(self.active_dims.start or 0, self.active_dims.stop or self.input_dim, self.active_dims.step or 1) + active_dim_size = int(np.round((self.active_dims.stop-self.active_dims.start)/self.active_dims.step)) + elif isinstance(self.active_dims, np.ndarray): + assert self.active_dims.ndim == 1, 'only flat indices allowed, given active_dims.shape={}, provide only indexes to the dimensions of the input'.format(self.active_dims.shape) + active_dim_size = self.active_dims.size + else: + active_dim_size = len(self.active_dims) + assert active_dim_size == self.input_dim, "input_dim={} does not match len(active_dim)={}, active_dims={}".format(self.input_dim, active_dim_size, self.active_dims) self._sliced_X = 0 @Cache_this(limit=10) @@ -207,10 +215,12 @@ class CombinationKernel(Kern): assert all([isinstance(k, Kern) for k in kernels]) import itertools # make sure the active dimensions of all underlying kernels are covered: - ma = reduce(lambda a,b: max(a, b.stop if isinstance(b, slice) else max(b)), itertools.chain((x.active_dims for x in kernels), [extra_dims]), 0) - input_dim = np.r_[0:ma+1] + #ma = reduce(lambda a,b: max(a, b.stop if isinstance(b, slice) else max(b)), itertools.chain((x.active_dims for x in kernels)), 0) + active_dims = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels), np.array([], dtype=int)) + input_dim = active_dims.max()+1 + len(extra_dims) + active_dims = slice(active_dims.max()+1+len(extra_dims)) # initialize the kernel with the full input_dim - super(CombinationKernel, self).__init__(input_dim, name) + super(CombinationKernel, self).__init__(input_dim, active_dims, name) self.extra_dims = extra_dims self.add_parameters(*kernels) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index d61bf6a3..b69dcb79 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -228,12 +228,12 @@ class KernelGradientTestsContinuous(unittest.TestCase): self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) def test_Prod(self): - k = GPy.kern.Matern32([2,3]) * GPy.kern.RBF([0,4]) + GPy.kern.Linear(self.D) + k = GPy.kern.Matern32(2, active_dims=[2,3]) * GPy.kern.RBF(2, active_dims=[0,4]) + GPy.kern.Linear(self.D) k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) def test_Add(self): - k = GPy.kern.Matern32([2,3]) + GPy.kern.RBF([0,4]) + GPy.kern.Linear(self.D) + k = GPy.kern.Matern32(2, active_dims=[2,3]) + GPy.kern.RBF(2, active_dims=[0,4]) + GPy.kern.Linear(self.D) k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) @@ -283,15 +283,16 @@ class KernelTestsMiscellaneous(unittest.TestCase): def setUp(self): N, D = 100, 10 self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D) - self.rbf = GPy.kern.RBF(range(2)) - self.linear = GPy.kern.Linear((3,6)) - self.matern = GPy.kern.Matern32(np.array([2,4,7])) + self.rbf = GPy.kern.RBF(2, active_dims=slice(0,4,2)) + self.linear = GPy.kern.Linear(2, active_dims=(3,9)) + self.matern = GPy.kern.Matern32(3, active_dims=np.array([2,4,9])) self.sumkern = self.rbf + self.linear self.sumkern += self.matern self.sumkern.randomize() def test_active_dims(self): - self.assertListEqual(self.sumkern.active_dims.tolist(), range(8)) + self.assertEqual(self.sumkern.input_dim, 9) + self.assertEqual(self.sumkern.active_dims, slice(9)) def test_which_parts(self): self.assertTrue(np.allclose(self.sumkern.K(self.X, which_parts=[self.linear, self.matern]), self.linear.K(self.X)+self.matern.K(self.X))) @@ -312,9 +313,9 @@ class KernelTestsNonContinuous(unittest.TestCase): self.X_block[0:N, -1] = 1 self.X_block[N:N+1, -1] = 2 - def test_IndependantOutputs(self): + def test_IndependentOutputs(self): k = GPy.kern.RBF(self.D) - kern = GPy.kern.IndependentOutputs(self.D+self.D,k) + kern = GPy.kern.IndependentOutputs(k, -1) self.assertTrue(check_kernel_gradient_functions(kern, X=self.X, X2=self.X2, verbose=verbose)) if __name__ == "__main__": From 7a982c8004e374dbad219a218039aa2e8b7b7766 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 11:19:41 +0000 Subject: [PATCH 074/116] kernel tests --- GPy/testing/kernel_tests.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 45c47dfb..e62dd3ce 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -290,8 +290,8 @@ class KernelTestsMiscellaneous(unittest.TestCase): self.sumkern.randomize() def test_active_dims(self): - self.assertEqual(self.sumkern.input_dim, 9) - self.assertEqual(self.sumkern.active_dims, slice(9)) + self.assertEqual(self.sumkern.input_dim, 10) + self.assertEqual(self.sumkern.active_dims, slice(0, 10, 1)) def test_which_parts(self): self.assertTrue(np.allclose(self.sumkern.K(self.X, which_parts=[self.linear, self.matern]), self.linear.K(self.X)+self.matern.K(self.X))) @@ -311,7 +311,8 @@ class KernelTestsNonContinuous(unittest.TestCase): self.X_block[N:N+N1, D:D+D] = self.X2 self.X_block[0:N, -1] = 1 self.X_block[N:N+1, -1] = 2 - + self.X_block = self.X_block[self.X_block.argsort(-1)[:, -1], :] + def test_IndependentOutputs(self): k = GPy.kern.RBF(self.D) kern = GPy.kern.IndependentOutputs(k, -1) From 26e6b290718c42cda05114797bada4b43b352474 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 11:20:39 +0000 Subject: [PATCH 075/116] static active dims --- GPy/kern/_src/static.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/kern/_src/static.py b/GPy/kern/_src/static.py index 4c9d943c..d853b4e0 100644 --- a/GPy/kern/_src/static.py +++ b/GPy/kern/_src/static.py @@ -10,7 +10,7 @@ import numpy as np class Static(Kern): def __init__(self, input_dim, variance, active_dims, name): - super(Static, self).__init__(input_dim, name) + super(Static, self).__init__(input_dim, active_dims, name) self.variance = Param('variance', variance, Logexp()) self.add_parameters(self.variance) From 7143180842d640bcfaae5e252d2217d4574bfaa6 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 11:31:16 +0000 Subject: [PATCH 076/116] testing --- GPy/testing/index_operations_tests.py | 45 ++++++++++++++++++--------- GPy/testing/kernel_tests.py | 4 +-- GPy/testing/parameterized_tests.py | 18 ++++++++++- 3 files changed, 49 insertions(+), 18 deletions(-) diff --git a/GPy/testing/index_operations_tests.py b/GPy/testing/index_operations_tests.py index 64b0c908..12602879 100644 --- a/GPy/testing/index_operations_tests.py +++ b/GPy/testing/index_operations_tests.py @@ -17,24 +17,33 @@ class Test(unittest.TestCase): self.param_index.add(one, [3]) self.param_index.add(two, [0,5]) self.param_index.add(three, [2,4,7]) + self.view = ParameterIndexOperationsView(self.param_index, 2, 6) + + def test_clear(self): + self.param_index.clear() + self.assertDictEqual(self.param_index._properties, {}) def test_remove(self): self.param_index.remove(three, np.r_[3:10]) self.assertListEqual(self.param_index[three].tolist(), [2]) self.param_index.remove(one, [1]) - self.assertListEqual(self.param_index[one].tolist(), [3]) + self.assertListEqual(self.param_index[one].tolist(), [3]) + self.assertListEqual(self.param_index.remove('not in there', []).tolist(), []) + self.param_index.remove(one, [9]) + self.assertListEqual(self.param_index[one].tolist(), [3]) + self.assertListEqual(self.param_index.remove('not in there', [2,3,4]).tolist(), []) def test_shift_left(self): - self.param_index.shift_left(1, 2) + self.view.shift_left(0, 2) self.assertListEqual(self.param_index[three].tolist(), [2,5]) self.assertListEqual(self.param_index[two].tolist(), [0,3]) - self.assertListEqual(self.param_index[one].tolist(), [1]) + self.assertListEqual(self.param_index[one].tolist(), []) def test_shift_right(self): - self.param_index.shift_right(5, 2) + self.view.shift_right(3, 2) self.assertListEqual(self.param_index[three].tolist(), [2,4,9]) self.assertListEqual(self.param_index[two].tolist(), [0,7]) - self.assertListEqual(self.param_index[one].tolist(), [3]) + self.assertListEqual(self.param_index[one].tolist(), [3]) def test_index_view(self): #======================================================================= @@ -44,17 +53,17 @@ class Test(unittest.TestCase): # three three three # view: [0 1 2 3 4 5 ] #======================================================================= - view = ParameterIndexOperationsView(self.param_index, 2, 6) - self.assertSetEqual(set(view.properties()), set([one, two, three])) - for v,p in zip(view.properties_for(np.r_[:6]), self.param_index.properties_for(np.r_[2:2+6])): + self.view = ParameterIndexOperationsView(self.param_index, 2, 6) + self.assertSetEqual(set(self.view.properties()), set([one, two, three])) + for v,p in zip(self.view.properties_for(np.r_[:6]), self.param_index.properties_for(np.r_[2:2+6])): self.assertEqual(v, p) - self.assertSetEqual(set(view[two]), set([3])) + self.assertSetEqual(set(self.view[two]), set([3])) self.assertSetEqual(set(self.param_index[two]), set([0, 5])) - view.add(two, np.array([0])) - self.assertSetEqual(set(view[two]), set([0,3])) + self.view.add(two, np.array([0])) + self.assertSetEqual(set(self.view[two]), set([0,3])) self.assertSetEqual(set(self.param_index[two]), set([0, 2, 5])) - view.clear() - for v,p in zip(view.properties_for(np.r_[:6]), self.param_index.properties_for(np.r_[2:2+6])): + self.view.clear() + for v,p in zip(self.view.properties_for(np.r_[:6]), self.param_index.properties_for(np.r_[2:2+6])): self.assertEqual(v, p) self.assertEqual(v, []) param_index = ParameterIndexOperations() @@ -62,11 +71,17 @@ class Test(unittest.TestCase): param_index.add(two, [0,5]) param_index.add(three, [2,4,7]) view2 = ParameterIndexOperationsView(param_index, 2, 6) - view.update(view2) + self.view.update(view2) for [i,v],[i2,v2] in zip(sorted(param_index.items()), sorted(self.param_index.items())): self.assertEqual(i, i2) self.assertTrue(np.all(v == v2)) - + + def test_misc(self): + for k,v in self.param_index.copy()._properties.iteritems(): + self.assertListEqual(self.param_index[k].tolist(), v.tolist()) + self.assertEqual(self.param_index.size, 6) + self.assertEqual(self.view.size, 5) + if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.test_index_view'] unittest.main() \ No newline at end of file diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index e62dd3ce..b057f8ef 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -311,12 +311,12 @@ class KernelTestsNonContinuous(unittest.TestCase): self.X_block[N:N+N1, D:D+D] = self.X2 self.X_block[0:N, -1] = 1 self.X_block[N:N+1, -1] = 2 - self.X_block = self.X_block[self.X_block.argsort(-1)[:, -1], :] + self.X_block = self.X_block[self.X_block.argsort(0)[:, -1], :] def test_IndependentOutputs(self): k = GPy.kern.RBF(self.D) kern = GPy.kern.IndependentOutputs(k, -1) - self.assertTrue(check_kernel_gradient_functions(kern, X=self.X, X2=self.X2, verbose=verbose)) + self.assertTrue(check_kernel_gradient_functions(kern, X=self.X_block, X2=self.X_block, verbose=verbose)) if __name__ == "__main__": print "Running unit tests, please be (very) patient..." diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 6555b8f4..9a74ea46 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -7,8 +7,24 @@ import unittest import GPy import numpy as np from GPy.core.parameterization.parameter_core import HierarchyError +from GPy.core.parameterization.array_core import ObservableArray -class Test(unittest.TestCase): +class ArrayCoreTest(unittest.TestCase): + def setUp(self): + self.X = np.random.normal(1,1, size=(100,10)) + self.obsX = ObservableArray(self.X) + + def test_init(self): + X = ObservableArray(self.X) + X2 = ObservableArray(X) + self.assertIs(X, X2, "no new Observable array, when Observable is given") + + def test_slice(self): + t1 = self.X[2:78] + t2 = self.obsX[2:78] + self.assertListEqual(t1.tolist(), t2.tolist(), "Slicing should be the exact same, as in ndarray") + +class ParameterizedTest(unittest.TestCase): def setUp(self): self.rbf = GPy.kern.RBF(1) From 5f229aae2ef5bf698174b86aafdc3f049ed8cc65 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 11:31:41 +0000 Subject: [PATCH 077/116] active dim indices and slices --- GPy/kern/_src/kern.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 3efe7f5f..5924d250 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -213,9 +213,6 @@ class CombinationKernel(Kern): :param array-like|slice extra_dims: if needed extra dimensions for the combination kernel to work on """ assert all([isinstance(k, Kern) for k in kernels]) - import itertools - # make sure the active dimensions of all underlying kernels are covered: - #ma = reduce(lambda a,b: max(a, b.stop if isinstance(b, slice) else max(b)), itertools.chain((x.active_dims for x in kernels)), 0) active_dims = reduce(np.union1d, (np.r_[x.active_dims] for x in kernels), np.array([], dtype=int)) input_dim = active_dims.max()+1 + len(extra_dims) active_dims = slice(active_dims.max()+1+len(extra_dims)) From 3e5e3a099e061c02dfe701b8ac5d9170bbca6ba0 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 11:32:08 +0000 Subject: [PATCH 078/116] checkgrad is zero test --- GPy/core/model.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 0990e7f1..1f53885c 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -301,8 +301,7 @@ class Model(Parameterized): denominator = (2 * np.dot(dx, gradient)) global_ratio = (f1 - f2) / np.where(denominator==0., 1e-32, denominator) - - return np.abs(1. - global_ratio) < tolerance + return np.abs(1. - global_ratio) < tolerance or np.abs(f1-f2).sum() + np.abs((2 * np.dot(dx, gradient))).sum() < tolerance else: # check the gradient of each parameter individually, and do some pretty printing try: From 16bd44eb35be6bfe75c31782c052dd345b9023d5 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 11:32:38 +0000 Subject: [PATCH 079/116] changes due to tests in parameterization --- GPy/core/parameterization/index_operations.py | 60 ++++--- GPy/core/parameterization/lists_and_dicts.py | 20 +-- GPy/core/parameterization/param.py | 79 ++++----- GPy/core/parameterization/parameter_core.py | 152 ++++++++++-------- 4 files changed, 151 insertions(+), 160 deletions(-) diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index f8f6ab5b..c22d8b6b 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -23,17 +23,16 @@ class ParameterIndexOperations(object): if constraints is not None: for t, i in constraints.iteritems(): self.add(t, i) - + def __getstate__(self): - return self._properties#, self._reverse - + return self._properties + def __setstate__(self, state): - self._properties = state[0] - # self._reverse = state[1] + self._properties = state def iteritems(self): return self._properties.iteritems() - + def items(self): return self._properties.items() @@ -42,7 +41,7 @@ class ParameterIndexOperations(object): def iterproperties(self): return self._properties.iterkeys() - + def shift_right(self, start, size): for ind in self.iterindices(): toshift = ind>=start @@ -58,29 +57,26 @@ class ParameterIndexOperations(object): ind[toshift] -= size if ind.size != 0: self._properties[v] = ind else: del self._properties[v] - + def clear(self): self._properties.clear() - + @property def size(self): - return reduce(lambda a,b: a+b.size, self.iterindices(), 0) - + return reduce(lambda a,b: a+b.size, self.iterindices(), 0) + def iterindices(self): return self._properties.itervalues() - + def indices(self): return self._properties.values() def properties_for(self, index): return vectorize(lambda i: [prop for prop in self.iterproperties() if i in self[prop]], otypes=[list])(index) - + def add(self, prop, indices): - try: - self._properties[prop] = combine_indices(self._properties[prop], indices) - except KeyError: - self._properties[prop] = indices - + self._properties[prop] = combine_indices(self._properties[prop], indices) + def remove(self, prop, indices): if prop in self._properties: diff = remove_indices(self[prop], indices) @@ -91,22 +87,22 @@ class ParameterIndexOperations(object): del self._properties[prop] return removed.astype(int) return numpy.array([]).astype(int) - + def update(self, parameter_index_view, offset=0): for i, v in parameter_index_view.iteritems(): self.add(i, v+offset) - + def copy(self): return ParameterIndexOperations(dict(self.iteritems())) - + def __getitem__(self, prop): return self._properties[prop] - + def __str__(self, *args, **kwargs): import pprint return pprint.pformat(dict(self._properties)) - + def combine_indices(arr1, arr2): return numpy.union1d(arr1, arr2) @@ -114,24 +110,22 @@ def remove_indices(arr, to_remove): return numpy.setdiff1d(arr, to_remove, True) def index_empty(index): - return numpy.size(index) == 0 + return numpy.size(index) == 0 class ParameterIndexOperationsView(object): def __init__(self, param_index_operations, offset, size): self._param_index_ops = param_index_operations self._offset = offset self._size = size - + def __getstate__(self): return [self._param_index_ops, self._offset, self._size] - def __setstate__(self, state): self._param_index_ops = state[0] self._offset = state[1] self._size = state[2] - def _filter_index(self, ind): return ind[(ind >= self._offset) * (ind < (self._offset + self._size))] - self._offset @@ -140,7 +134,7 @@ class ParameterIndexOperationsView(object): for i, ind in self._param_index_ops.iteritems(): ind2 = self._filter_index(ind) if ind2.size > 0: - yield i, ind2 + yield i, ind2 def items(self): return [[i,v] for i,v in self.iteritems()] @@ -151,7 +145,7 @@ class ParameterIndexOperationsView(object): def iterproperties(self): for i, _ in self.iteritems(): - yield i + yield i def shift_right(self, start, size): @@ -161,7 +155,7 @@ class ParameterIndexOperationsView(object): self._param_index_ops.shift_left(start+self._offset, size) self._offset -= size self._size -= size - + def clear(self): for i, ind in self.items(): self._param_index_ops.remove(i, ind+self._offset) @@ -198,7 +192,7 @@ class ParameterIndexOperationsView(object): def __getitem__(self, prop): ind = self._filter_index(self._param_index_ops[prop]) return ind - + def __str__(self, *args, **kwargs): import pprint return pprint.pformat(dict(self.iteritems())) @@ -206,8 +200,8 @@ class ParameterIndexOperationsView(object): def update(self, parameter_index_view, offset=0): for i, v in parameter_index_view.iteritems(): self.add(i, v+offset) - - + + def copy(self): return ParameterIndexOperations(dict(self.iteritems())) pass diff --git a/GPy/core/parameterization/lists_and_dicts.py b/GPy/core/parameterization/lists_and_dicts.py index 5b13b3b5..ca0589c9 100644 --- a/GPy/core/parameterization/lists_and_dicts.py +++ b/GPy/core/parameterization/lists_and_dicts.py @@ -5,21 +5,17 @@ Created on 27 Feb 2014 ''' from collections import defaultdict -class DefaultArrayDict(defaultdict): - def __init__(self): + +def intarray_default_factory(): + import numpy as np + return np.int_([]) + +class IntArrayDict(defaultdict): + def __init__(self, default_factory=None): """ Default will be self._default, if not set otherwise """ - defaultdict.__init__(self, self.default_factory) - -class SetDict(DefaultArrayDict): - def default_factory(self): - return set() - -class IntArrayDict(DefaultArrayDict): - def default_factory(self): - import numpy as np - return np.int_([]) + defaultdict.__init__(self, intarray_default_factory) class ArrayList(list): """ diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index cad20a8a..cd1ce39e 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -49,9 +49,6 @@ class Param(OptimizationHandlable, ObservableArray): obj._realshape_ = obj.shape obj._realsize_ = obj.size obj._realndim_ = obj.ndim - from lists_and_dicts import SetDict - obj._tied_to_me_ = SetDict() - obj._tied_to_ = [] obj._original_ = True obj._gradient_array_ = numpy.zeros(obj.shape, dtype=numpy.float64) return obj @@ -80,13 +77,11 @@ class Param(OptimizationHandlable, ObservableArray): self._parent_index_ = getattr(obj, '_parent_index_', None) self._default_constraint_ = getattr(obj, '_default_constraint_', None) self._current_slice_ = getattr(obj, '_current_slice_', None) - self._tied_to_me_ = getattr(obj, '_tied_to_me_', None) - self._tied_to_ = getattr(obj, '_tied_to_', None) self._realshape_ = getattr(obj, '_realshape_', None) self._realsize_ = getattr(obj, '_realsize_', None) self._realndim_ = getattr(obj, '_realndim_', None) self._original_ = getattr(obj, '_original_', None) - self._name = getattr(obj, 'name', None) + self._name = getattr(obj, '_name', None) self._gradient_array_ = getattr(obj, '_gradient_array_', None) self.constraints = getattr(obj, 'constraints', None) self.priors = getattr(obj, 'priors', None) @@ -106,10 +101,10 @@ class Param(OptimizationHandlable, ObservableArray): #=========================================================================== # Pickling operations #=========================================================================== - def __reduce_ex__(self): + def __reduce__(self): func, args, state = super(Param, self).__reduce__() return func, args, (state, - (self.name, + (self._name, self._parent_, self._parent_index_, self._default_constraint_, @@ -117,16 +112,16 @@ class Param(OptimizationHandlable, ObservableArray): self._realshape_, self._realsize_, self._realndim_, - self._tied_to_me_, - self._tied_to_, + self.constraints, + self.priors ) ) def __setstate__(self, state): super(Param, self).__setstate__(state[0]) state = list(state[1]) - self._tied_to_ = state.pop() - self._tied_to_me_ = state.pop() + self.priors = state.pop() + self.constraints = state.pop() self._realndim_ = state.pop() self._realsize_ = state.pop() self._realshape_ = state.pop() @@ -134,12 +129,13 @@ class Param(OptimizationHandlable, ObservableArray): self._default_constraint_ = state.pop() self._parent_index_ = state.pop() self._parent_ = state.pop() - self.name = state.pop() + self._name = state.pop() def copy(self, *args): constr = self.constraints.copy() priors = self.priors.copy() p = Param(self.name, self.view(numpy.ndarray).copy(), self._default_constraint_) + import ipdb;ipdb.set_trace() p.constraints = constr p.priors = priors return p @@ -180,21 +176,21 @@ class Param(OptimizationHandlable, ObservableArray): #=========================================================================== # Index Operations: #=========================================================================== - def _internal_offset(self): - internal_offset = 0 - extended_realshape = numpy.cumprod((1,) + self._realshape_[:0:-1])[::-1] - for i, si in enumerate(self._current_slice_[:self._realndim_]): - if numpy.all(si == Ellipsis): - continue - if isinstance(si, slice): - a = si.indices(self._realshape_[i])[0] - elif isinstance(si, (list,numpy.ndarray,tuple)): - a = si[0] - else: a = si - if a < 0: - a = self._realshape_[i] + a - internal_offset += a * extended_realshape[i] - return internal_offset + #def _internal_offset(self): + # internal_offset = 0 + # extended_realshape = numpy.cumprod((1,) + self._realshape_[:0:-1])[::-1] + # for i, si in enumerate(self._current_slice_[:self._realndim_]): + # if numpy.all(si == Ellipsis): + # continue + # if isinstance(si, slice): + # a = si.indices(self._realshape_[i])[0] + # elif isinstance(si, (list,numpy.ndarray,tuple)): + # a = si[0] + # else: a = si + # if a < 0: + # a = self._realshape_[i] + a + # internal_offset += a * extended_realshape[i] + # return internal_offset def _raveled_index(self, slice_index=None): # return an index array on the raveled array, which is formed by the current_slice @@ -204,6 +200,9 @@ class Param(OptimizationHandlable, ObservableArray): if ind.ndim < 2: ind = ind[:, None] return numpy.asarray(numpy.apply_along_axis(lambda x: numpy.sum(extended_realshape * x), 1, ind), dtype=int) + def _raveled_index_for(self, obj): + return self._raveled_index() + def _expand_index(self, slice_index=None): # this calculates the full indexing arrays from the slicing objects given by get_item for _real..._ attributes # it basically translates slices to their respective index arrays and turns negative indices around @@ -224,6 +223,11 @@ class Param(OptimizationHandlable, ObservableArray): return numpy.r_[a] return numpy.r_[:b] return itertools.imap(f, itertools.izip_longest(slice_index[:self._realndim_], self._realshape_, fillvalue=slice(self.size))) + #=========================================================================== + # Constrainable + #=========================================================================== + def _ensure_fixes(self): + if not self._has_fixes(): self._fixes_ = numpy.ones(self._realsize_, dtype=bool) #=========================================================================== # Convenience @@ -239,7 +243,6 @@ class Param(OptimizationHandlable, ObservableArray): #round.__doc__ = numpy.round.__doc__ def _get_original(self, param): return self - #=========================================================================== # Printing -> done #=========================================================================== @@ -266,23 +269,11 @@ class Param(OptimizationHandlable, ObservableArray): return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.iteritems()))] @property def _ties_str(self): - return [t._short() for t in self._tied_to_] or [''] + return [''] def __repr__(self, *args, **kwargs): name = "\033[1m{x:s}\033[0;0m:\n".format( x=self.hierarchy_name()) return name + super(Param, self).__repr__(*args, **kwargs) - def _ties_for(self, rav_index): - # size = sum(p.size for p in self._tied_to_) - ties = numpy.empty(shape=(len(self._tied_to_), numpy.size(rav_index)), dtype=Param) - for i, tied_to in enumerate(self._tied_to_): - for t, ind in tied_to._tied_to_me_.iteritems(): - if t._parent_index_ == self._parent_index_: - matches = numpy.where(rav_index[:, None] == t._raveled_index()[None, :]) - tt_rav_index = tied_to._raveled_index() - ind_rav_matches = numpy.where(tt_rav_index == numpy.array(list(ind)))[0] - if len(ind) != 1: ties[i, matches[0][ind_rav_matches]] = numpy.take(tt_rav_index, matches[1], mode='wrap')[ind_rav_matches] - else: ties[i, matches[0]] = numpy.take(tt_rav_index, matches[1], mode='wrap') - return map(lambda a: sum(a, []), zip(*[[[tie.flatten()] if tx != None else [] for tx in t] for t, tie in zip(ties, self._tied_to_)])) def _indices(self, slice_index=None): # get a int-array containing all indices in the first axis. if slice_index is None: @@ -322,8 +313,8 @@ class Param(OptimizationHandlable, ObservableArray): ravi = self._raveled_index(filter_) if constr_matrix is None: constr_matrix = self.constraints.properties_for(ravi) if prirs is None: prirs = self.priors.properties_for(ravi) - if ties is None: ties = self._ties_for(ravi) - ties = [' '.join(map(lambda x: x._short(), t)) for t in ties] + if ties is None: ties = [['N/A']]*self.size + ties = [' '.join(map(lambda x: x, t)) for t in ties] if lc is None: lc = self._max_len_names(constr_matrix, __constraints_name__) if lx is None: lx = self._max_len_values() if li is None: li = self._max_len_index(indices) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 51b6cddf..d2f066ff 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -16,7 +16,7 @@ Observable Pattern for patameterization from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED import numpy as np -__updated__ = '2014-03-13' +__updated__ = '2014-03-14' class HierarchyError(Exception): """ @@ -31,7 +31,71 @@ def adjust_name_for_printing(name): return name.replace(" ", "_").replace(".", "_").replace("-", "_m_").replace("+", "_p_").replace("!", "_I_").replace("**", "_xx_").replace("*", "_x_").replace("/", "_l_").replace("@",'_at_') return '' -class Observable(object): +class InterfacePickleFunctions(object): + def __init__(self, *a, **kw): + super(InterfacePickleFunctions, self).__init__() + + def _getstate(self): + """ + Returns the state of this class in a memento pattern. + The state must be a list-like structure of all the fields + this class needs to run. + + See python doc "pickling" (`__getstate__` and `__setstate__`) for details. + """ + raise NotImplementedError, "To be able to use pickling you need to implement this method" + def _setstate(self, state): + """ + Set the state (memento pattern) of this class to the given state. + Usually this is just the counterpart to _getstate, such that + an object is a copy of another when calling + + copy = .__new__(*args,**kw)._setstate(._getstate()) + + See python doc "pickling" (`__getstate__` and `__setstate__`) for details. + """ + raise NotImplementedError, "To be able to use pickling you need to implement this method" + +class Pickleable(object): + """ + Make an object pickleable (See python doc 'pickling'). + + This class allows for pickling support by Memento pattern. + _getstate returns a memento of the class, which gets pickled. + _setstate() (re-)sets the state of the class to the memento + """ + def __init__(self, *a, **kw): + super(Pickleable, self).__init__() + #=========================================================================== + # Pickling operations + #=========================================================================== + def pickle(self, f, protocol=-1): + """ + :param f: either filename or open file object to write to. + if it is an open buffer, you have to make sure to close + it properly. + :param protocol: pickling protocol to use, python-pickle for details. + """ + import cPickle + if isinstance(f, str): + with open(f, 'w') as f: + cPickle.dump(self, f, protocol) + else: + cPickle.dump(self, f, protocol) + def __getstate__(self): + if self._has_get_set_state(): + return self._getstate() + return self.__dict__ + def __setstate__(self, state): + if self._has_get_set_state(): + self._setstate(state) + # TODO: maybe parameters_changed() here? + return + self.__dict__ = state + def _has_get_set_state(self): + return '_getstate' in vars(self.__class__) and '_setstate' in vars(self.__class__) + +class Observable(InterfacePickleFunctions): """ Observable pattern for parameterization. @@ -41,7 +105,7 @@ class Observable(object): """ _updated = True def __init__(self, *args, **kwargs): - super(Observable, self).__init__() + super(Observable, self).__init__(*args, **kwargs) self._observer_callables_ = [] def add_observer(self, observer, callble, priority=0): @@ -89,68 +153,16 @@ class Observable(object): ins += 1 self._observer_callables_.insert(ins, (p, o, c)) -class Pickleable(object): - """ - Make an object pickleable (See python doc 'pickling'). - - This class allows for pickling support by Memento pattern. - _getstate returns a memento of the class, which gets pickled. - _setstate() (re-)sets the state of the class to the memento - """ - #=========================================================================== - # Pickling operations - #=========================================================================== - def pickle(self, f, protocol=-1): - """ - :param f: either filename or open file object to write to. - if it is an open buffer, you have to make sure to close - it properly. - :param protocol: pickling protocol to use, python-pickle for details. - """ - import cPickle - if isinstance(f, str): - with open(f, 'w') as f: - cPickle.dump(self, f, protocol) - else: - cPickle.dump(self, f, protocol) - def __getstate__(self): - if self._has_get_set_state(): - return self._getstate() - return self.__dict__ - def __setstate__(self, state): - if self._has_get_set_state(): - self._setstate(state) - # TODO: maybe parameters_changed() here? - return - self.__dict__ = state - def _has_get_set_state(self): - return '_getstate' in vars(self.__class__) and '_setstate' in vars(self.__class__) def _getstate(self): - """ - Returns the state of this class in a memento pattern. - The state must be a list-like structure of all the fields - this class needs to run. - - See python doc "pickling" (`__getstate__` and `__setstate__`) for details. - """ - raise NotImplementedError, "To be able to use pickling you need to implement this method" + return [self._observer_callables_] def _setstate(self, state): - """ - Set the state (memento pattern) of this class to the given state. - Usually this is just the counterpart to _getstate, such that - an object is a copy of another when calling - - copy = .__new__(*args,**kw)._setstate(._getstate()) - - See python doc "pickling" (`__getstate__` and `__setstate__`) for details. - """ - raise NotImplementedError, "To be able to use pickling you need to implement this method" + self._observer_callables_ = state.pop() #=============================================================================== # Foundation framework for parameterized and param objects: #=============================================================================== -class Parentable(object): +class Parentable(Observable): """ Enable an Object to have a parent. @@ -160,7 +172,7 @@ class Parentable(object): _parent_ = None _parent_index_ = None def __init__(self, *args, **kwargs): - super(Parentable, self).__init__() + super(Parentable, self).__init__(*args, **kwargs) def has_parent(self): """ @@ -284,13 +296,6 @@ class Indexable(object): """ raise NotImplementedError, "Need to be able to get the raveled Index" - def _internal_offset(self): - """ - The offset for this parameter inside its parent. - This has to account for shaped parameters! - """ - return 0 - def _offset_for(self, param): """ Return the offset of the param inside this parameterized object. @@ -308,7 +313,7 @@ class Indexable(object): raise NotImplementedError, "shouldnt happen, raveld index transformation required from non parameterization object?" -class Constrainable(Nameable, Indexable, Observable): +class Constrainable(Nameable, Indexable): """ Make an object constrainable with Priors and Transformations. TODO: Mappings!! @@ -367,21 +372,26 @@ class Constrainable(Nameable, Indexable, Observable): self._highest_parent_._set_unfixed(unconstrained) unfix = unconstrain_fixed - def _set_fixed(self, index): + def _ensure_fixes(self): + # Ensure that the fixes array is set: + # Parameterized: ones(self.size) + # Param: ones(self._realsize_ if not self._has_fixes(): self._fixes_ = np.ones(self.size, dtype=bool) + + def _set_fixed(self, index): + self._ensure_fixes() self._fixes_[index] = FIXED if np.all(self._fixes_): self._fixes_ = None # ==UNFIXED def _set_unfixed(self, index): - if not self._has_fixes(): self._fixes_ = np.ones(self.size, dtype=bool) - # rav_i = self._raveled_index_for(param)[index] + self._ensure_fixes() self._fixes_[index] = UNFIXED if np.all(self._fixes_): self._fixes_ = None # ==UNFIXED def _connect_fixes(self): fixed_indices = self.constraints[__fixed__] if fixed_indices.size > 0: - self._fixes_ = np.ones(self.size, dtype=bool) * UNFIXED + self._ensure_fixes() self._fixes_[fixed_indices] = FIXED else: self._fixes_ = None From 11ca793d1faeb0c2479b36b1847fa4adfb2b904a Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 11:45:28 +0000 Subject: [PATCH 080/116] fixes fixed and test updates --- GPy/core/parameterization/param.py | 1 - GPy/core/parameterization/parameter_core.py | 2 +- GPy/testing/parameterized_tests.py | 9 +++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index cd1ce39e..b5507dec 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -135,7 +135,6 @@ class Param(OptimizationHandlable, ObservableArray): constr = self.constraints.copy() priors = self.priors.copy() p = Param(self.name, self.view(numpy.ndarray).copy(), self._default_constraint_) - import ipdb;ipdb.set_trace() p.constraints = constr p.priors = priors return p diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index d2f066ff..6dd0b389 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -376,7 +376,7 @@ class Constrainable(Nameable, Indexable): # Ensure that the fixes array is set: # Parameterized: ones(self.size) # Param: ones(self._realsize_ - if not self._has_fixes(): self._fixes_ = np.ones(self.size, dtype=bool) + self._fixes_ = np.ones(self.size, dtype=bool) def _set_fixed(self, index): self._ensure_fixes() diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 9a74ea46..26afef41 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -51,7 +51,8 @@ class ParameterizedTest(unittest.TestCase): self.white.fix(warning=False) self.test1.remove_parameter(self.test1.param) self.assertTrue(self.test1._has_fixes()) - + import ipdb;ipdb.set_trace() + from GPy.core.parameterization.transformations import FIXED, UNFIXED self.assertListEqual(self.test1._fixes_.tolist(),[UNFIXED,UNFIXED,FIXED]) @@ -67,12 +68,12 @@ class ParameterizedTest(unittest.TestCase): self.assertListEqual(self.white._fixes_.tolist(), [FIXED]) self.assertEquals(self.white.constraints._offset, 0) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) - self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) + self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) self.test1.add_parameter(self.white, 0) self.assertIs(self.test1.constraints, self.white.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) - self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) + self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) self.assertListEqual(self.test1.constraints[__fixed__].tolist(), [0]) self.assertIs(self.white._fixes_,None) self.assertListEqual(self.test1._fixes_.tolist(),[FIXED] + [UNFIXED] * 52) @@ -85,7 +86,7 @@ class ParameterizedTest(unittest.TestCase): self.assertListEqual(self.test1.constraints[Logexp()].tolist(), [0,1]) def test_add_parameter_already_in_hirarchy(self): - self.assertRaises(HierarchyError, self.test1.add_parameter, self.white._parameters_[0]) + self.assertRaises(HierarchyError, self.test1.add_parameter, self.white._parameters_[0]) def test_default_constraints(self): self.assertIs(self.rbf.variance.constraints._param_index_ops, self.rbf.constraints._param_index_ops) From 2ec2eb84cec375e7a29a6fdb6d1e740c05119dfc Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 11:45:58 +0000 Subject: [PATCH 081/116] fixes fixed and test updates --- GPy/testing/parameterized_tests.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 26afef41..5b718cbd 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -51,8 +51,6 @@ class ParameterizedTest(unittest.TestCase): self.white.fix(warning=False) self.test1.remove_parameter(self.test1.param) self.assertTrue(self.test1._has_fixes()) - import ipdb;ipdb.set_trace() - from GPy.core.parameterization.transformations import FIXED, UNFIXED self.assertListEqual(self.test1._fixes_.tolist(),[UNFIXED,UNFIXED,FIXED]) From 77d08a7d6fac1834db23c7b93a88541d309d8907 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Fri, 14 Mar 2014 11:47:23 +0000 Subject: [PATCH 082/116] fixes to EP --- GPy/core/gp.py | 4 +- GPy/examples/classification.py | 2 +- GPy/examples/regression.py | 4 +- .../latent_function_inference/__init__.py | 2 +- .../{ep.py => expectation_propagation.py} | 43 ++++++++++--------- GPy/likelihoods/bernoulli.py | 11 ++--- GPy/models/gp_classification.py | 2 +- 7 files changed, 36 insertions(+), 32 deletions(-) rename GPy/inference/latent_function_inference/{ep.py => expectation_propagation.py} (73%) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 35a41cde..f6e960ed 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -10,7 +10,7 @@ from model import Model from parameterization import ObservableArray from .. import likelihoods from ..likelihoods.gaussian import Gaussian -from ..inference.latent_function_inference import exact_gaussian_inference +from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation from parameterization.variational import VariationalPosterior class GP(Model): @@ -56,7 +56,7 @@ class GP(Model): if isinstance(likelihood, likelihoods.Gaussian) or isinstance(likelihood, likelihoods.MixedNoise): inference_method = exact_gaussian_inference.ExactGaussianInference() else: - inference_method = expectation_propagation + inference_method = expectation_propagation.EP() print "defaulting to ", inference_method, "for latent function inference" self.inference_method = inference_method diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py index 8637cc35..9190d3f3 100644 --- a/GPy/examples/classification.py +++ b/GPy/examples/classification.py @@ -89,7 +89,7 @@ def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot= likelihood = GPy.likelihoods.Bernoulli() laplace_inf = GPy.inference.latent_function_inference.Laplace() - kernel = GPy.kern.rbf(1) + kernel = GPy.kern.RBF(1) # Model definition m = GPy.core.GP(data['X'], Y, kernel=kernel, likelihood=likelihood, inference_method=laplace_inf) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 7cd1e964..190af93b 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -318,7 +318,7 @@ def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize Y /= Y.std() if kernel_type == 'linear': - kernel = GPy.kern.linear(X.shape[1], ARD=1) + kernel = GPy.kern.Linear(X.shape[1], ARD=1) elif kernel_type == 'rbf_inv': kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1) else: @@ -357,7 +357,7 @@ def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, o Y /= Y.std() if kernel_type == 'linear': - kernel = GPy.kern.linear(X.shape[1], ARD=1) + kernel = GPy.kern.Linear(X.shape[1], ARD=1) elif kernel_type == 'rbf_inv': kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1) else: diff --git a/GPy/inference/latent_function_inference/__init__.py b/GPy/inference/latent_function_inference/__init__.py index 58d77c03..1c5a8ab9 100644 --- a/GPy/inference/latent_function_inference/__init__.py +++ b/GPy/inference/latent_function_inference/__init__.py @@ -27,8 +27,8 @@ etc. from exact_gaussian_inference import ExactGaussianInference from laplace import Laplace -expectation_propagation = 'foo' # TODO from GPy.inference.latent_function_inference.var_dtc import VarDTC +from expectation_propagation import EP from dtc import DTC from fitc import FITC diff --git a/GPy/inference/latent_function_inference/ep.py b/GPy/inference/latent_function_inference/expectation_propagation.py similarity index 73% rename from GPy/inference/latent_function_inference/ep.py rename to GPy/inference/latent_function_inference/expectation_propagation.py index 1904d48c..514a6dc7 100644 --- a/GPy/inference/latent_function_inference/ep.py +++ b/GPy/inference/latent_function_inference/expectation_propagation.py @@ -1,7 +1,7 @@ import numpy as np -from scipy import stats -from ..util.linalg import pdinv,mdot,jitchol,chol_inv,DSYR,tdot,dtrtrs -from likelihood import likelihood +from ...util.linalg import pdinv,jitchol,DSYR,tdot,dtrtrs, dpotrs +from posterior import Posterior +log_2_pi = np.log(2*np.pi) class EP(object): def __init__(self, epsilon=1e-6, eta=1., delta=1.): @@ -28,30 +28,30 @@ class EP(object): K = kern.K(X) - mu_tilde, tau_tilde = self.expectation_propagation() + mu, Sigma, mu_tilde, tau_tilde, Z_hat = self.expectation_propagation(K, Y, likelihood, Y_metadata) - Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde) + Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde)) alpha, _ = dpotrs(LW, mu_tilde, lower=1) - log_marginal = 0.5*(-num_data * log_2_pi - W_logdet - np.sum(alpha * mu_tilde)) + log_marginal = 0.5*(-num_data * log_2_pi - W_logdet - np.sum(alpha * mu_tilde)) # TODO: add log Z_hat?? dL_dK = 0.5 * (tdot(alpha[:,None]) - Wi) - #TODO: what abot derivatives of the likelihood parameters? + dL_dthetaL = np.zeros(likelihood.size)#TODO: derivatives of the likelihood parameters - return Posterior(woodbury_inv=Wi, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK} + return Posterior(woodbury_inv=Wi, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} - def expectation_propagation(self, K, Y, Y_metadata, likelihood) + def expectation_propagation(self, K, Y, likelihood, Y_metadata): num_data, data_dim = Y.shape assert data_dim == 1, "This EP methods only works for 1D outputs" #Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma) - mu = np.zeros(self.num_data) + mu = np.zeros(num_data) Sigma = K.copy() #Initial values - Marginal moments @@ -61,33 +61,32 @@ class EP(object): #initial values - Gaussian factors if self.old_mutilde is None: - tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data, num_data)) + tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data)) else: assert old_mutilde.size == num_data, "data size mis-match: did you change the data? try resetting!" mu_tilde, v_tilde = self.old_mutilde, self.old_vtilde tau_tilde = v_tilde/mu_tilde #Approximation - epsilon_np1 = self.epsilon + 1. - epsilon_np2 = self.epsilon + 1. + tau_diff = self.epsilon + 1. + v_diff = self.epsilon + 1. iterations = 0 - while (epsilon_np1 > self.epsilon) or (epsilon_np2 > self.epsilon): + while (tau_diff > self.epsilon) or (v_diff > self.epsilon): update_order = np.random.permutation(num_data) for i in update_order: #Cavity distribution parameters tau_cav = 1./Sigma[i,i] - self.eta*tau_tilde[i] v_cav = mu[i]/Sigma[i,i] - self.eta*v_tilde[i] #Marginal moments - Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match(Y[i], tau_cav, v_cav, Y_metadata=(None if Y_metadata is None else Y_metadata[i])) + Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav, v_cav)#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i])) #Site parameters update delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma[i,i]) delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma[i,i]) tau_tilde[i] += delta_tau v_tilde[i] += delta_v #Posterior distribution parameters update - DSYR(Sigma, Sigma[:,i].copy(), -Delta_tau/(1.+ Delta_tau*Sigma[i,i])) + DSYR(Sigma, Sigma[:,i].copy(), -delta_tau/(1.+ delta_tau*Sigma[i,i])) mu = np.dot(Sigma, v_tilde) - iterations += 1 #(re) compute Sigma and mu using full Cholesky decompy tau_tilde_root = np.sqrt(tau_tilde) @@ -99,10 +98,14 @@ class EP(object): mu = np.dot(Sigma,v_tilde) #monitor convergence - epsilon_np1 = np.mean(np.square(tau_tilde-tau_tilde_old)) - epsilon_np2 = np.mean(np.square(v_tilde-v_tilde_old)) + if iterations>0: + tau_diff = np.mean(np.square(tau_tilde-tau_tilde_old)) + v_diff = np.mean(np.square(v_tilde-v_tilde_old)) tau_tilde_old = tau_tilde.copy() v_tilde_old = v_tilde.copy() - return mu, Sigma, mu_tilde, tau_tilde + iterations += 1 + + mu_tilde = v_tilde/tau_tilde + return mu, Sigma, mu_tilde, tau_tilde, Z_hat diff --git a/GPy/likelihoods/bernoulli.py b/GPy/likelihoods/bernoulli.py index 42eaaa36..2e301fdd 100644 --- a/GPy/likelihoods/bernoulli.py +++ b/GPy/likelihoods/bernoulli.py @@ -5,6 +5,7 @@ import numpy as np from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf import link_functions from likelihood import Likelihood +from scipy import stats class Bernoulli(Likelihood): """ @@ -43,7 +44,7 @@ class Bernoulli(Likelihood): Y_prep[Y.flatten() == 0] = -1 return Y_prep - def moments_match_ep(self, data_i, tau_i, v_i): + def moments_match_ep(self, Y_i, tau_i, v_i): """ Moments match of the marginal approximation in EP algorithm @@ -51,9 +52,9 @@ class Bernoulli(Likelihood): :param tau_i: precision of the cavity distribution (float) :param v_i: mean/variance of the cavity distribution (float) """ - if data_i == 1: + if Y_i == 1: sign = 1. - elif data_i == 0: + elif Y_i == 0: sign = -1 else: raise ValueError("bad value for Bernouilli observation (0, 1)") @@ -76,7 +77,7 @@ class Bernoulli(Likelihood): return Z_hat, mu_hat, sigma2_hat - def predictive_mean(self, mu, variance): + def predictive_mean(self, mu, variance, Y_metadata=None): if isinstance(self.gp_link, link_functions.Probit): return stats.norm.cdf(mu/np.sqrt(1+variance)) @@ -87,7 +88,7 @@ class Bernoulli(Likelihood): else: raise NotImplementedError - def predictive_variance(self, mu, variance, pred_mean): + def predictive_variance(self, mu, variance, pred_mean, Y_metadata=None): if isinstance(self.gp_link, link_functions.Heaviside): return 0. diff --git a/GPy/models/gp_classification.py b/GPy/models/gp_classification.py index 634fd4e5..9d918cda 100644 --- a/GPy/models/gp_classification.py +++ b/GPy/models/gp_classification.py @@ -23,7 +23,7 @@ class GPClassification(GP): def __init__(self, X, Y, kernel=None): if kernel is None: - kernel = kern.rbf(X.shape[1]) + kernel = kern.RBF(X.shape[1]) likelihood = likelihoods.Bernoulli() From da2cceba870f892a5ee345b853ddf5921ab43850 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Fri, 14 Mar 2014 12:19:25 +0000 Subject: [PATCH 083/116] plotting now seems to work for Bernouilli --- GPy/core/gp.py | 3 +++ GPy/likelihoods/bernoulli.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index f6e960ed..05e3e671 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -94,6 +94,9 @@ class GP(Model): #var = Kxx - np.sum(LiKx*LiKx, 0) var = Kxx - np.sum(WiKx*Kx, 0) var = var.reshape(-1, 1) + + #force mu to be a column vector + if len(mu.shape)==1: mu = mu[:,None] return mu, var def predict(self, Xnew, full_cov=False, Y_metadata=None): diff --git a/GPy/likelihoods/bernoulli.py b/GPy/likelihoods/bernoulli.py index 2e301fdd..6c22e3d1 100644 --- a/GPy/likelihoods/bernoulli.py +++ b/GPy/likelihoods/bernoulli.py @@ -212,7 +212,7 @@ class Bernoulli(Likelihood): np.seterr(**state) return d3logpdf_dlink3 - def samples(self, gp): + def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. From a800f4b7ed7eca2fa0733fa4cc29f4119a7e59d2 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 12:31:28 +0000 Subject: [PATCH 084/116] prior tests renewed --- GPy/testing/prior_tests.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/GPy/testing/prior_tests.py b/GPy/testing/prior_tests.py index c16057db..db6cc685 100644 --- a/GPy/testing/prior_tests.py +++ b/GPy/testing/prior_tests.py @@ -15,7 +15,7 @@ class PriorTests(unittest.TestCase): X, y = X[:, None], y[:, None] m = GPy.models.GPRegression(X, y) lognormal = GPy.priors.LogGaussian(1, 2) - m.set_prior('rbf', lognormal) + m.rbf.set_prior(lognormal) m.randomize() self.assertTrue(m.checkgrad()) @@ -28,7 +28,7 @@ class PriorTests(unittest.TestCase): X, y = X[:, None], y[:, None] m = GPy.models.GPRegression(X, y) Gamma = GPy.priors.Gamma(1, 1) - m.set_prior('rbf', Gamma) + m.rbf.set_prior(Gamma) m.randomize() self.assertTrue(m.checkgrad()) @@ -41,16 +41,9 @@ class PriorTests(unittest.TestCase): X, y = X[:, None], y[:, None] m = GPy.models.GPRegression(X, y) gaussian = GPy.priors.Gaussian(1, 1) - success = False - # setting a Gaussian prior on non-negative parameters # should raise an assertionerror. - try: - m.set_prior('rbf', gaussian) - except AssertionError: - success = True - - self.assertTrue(success) + self.assertRaises(AssertionError, m.rbf.set_prior, gaussian) if __name__ == "__main__": From 3e93579e3dc9e6370b27f029e6e086d2e9750444 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 12:32:08 +0000 Subject: [PATCH 085/116] prior domain check --- GPy/core/parameterization/param.py | 2 +- GPy/core/parameterization/parameter_core.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index b5507dec..2ede8436 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -226,7 +226,7 @@ class Param(OptimizationHandlable, ObservableArray): # Constrainable #=========================================================================== def _ensure_fixes(self): - if not self._has_fixes(): self._fixes_ = numpy.ones(self._realsize_, dtype=bool) + self._fixes_ = numpy.ones(self._realsize_, dtype=bool) #=========================================================================== # Convenience diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 6dd0b389..f58143bd 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -411,6 +411,15 @@ class Constrainable(Nameable, Indexable): repriorized = self.unset_priors() self._add_to_index_operations(self.priors, repriorized, prior, warning) + from domains import _REAL, _POSITIVE, _NEGATIVE + if prior.domain is _POSITIVE: + self.constrain_positive(warning) + elif prior.domain is _NEGATIVE: + self.constrain_negative(warning) + elif prior.domain is _REAL: + rav_i = self._raveled_index() + assert all(all(c.domain is _REAL for c in con) for con in self.constraints.properties_for(rav_i)) + def unset_priors(self, *priors): """ Un-set all priors given from this parameter handle. @@ -421,14 +430,14 @@ class Constrainable(Nameable, Indexable): def log_prior(self): """evaluate the prior""" if self.priors.size > 0: - x = self._get_params() - return reduce(lambda a, b: a + b, [p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()], 0) + x = self._param_array_ + return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()), 0) return 0. def _log_prior_gradients(self): """evaluate the gradients of the priors""" if self.priors.size > 0: - x = self._get_params() + x = self._param_array_ ret = np.zeros(x.size) [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()] return ret From d02f212612a563ae62a4da94886f1a0e893a824a Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 12:32:47 +0000 Subject: [PATCH 086/116] independent output kernel gradients x --- GPy/kern/_src/independent_outputs.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index fcf94e74..1848bf6a 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -80,19 +80,19 @@ class IndependentOutputs(CombinationKernel): self.kern.gradient = target def gradients_X(self,dL_dK, X, X2=None): - target = np.zeros_like(X) + target = np.zeros(X.shape) slices = index_to_slices(X[:,self.index_dim]) if X2 is None: [[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X(dL_dK[s,ss],X[s],X[ss])) for s, ss in itertools.product(slices_i, slices_i)] for slices_i in slices] else: - X2,slices2 = X2[:,:self.index_dim],index_to_slices(X2[:,-1]) - [[[np.copyto(target[s,:self.index_dim], self.kern.gradients_X(dL_dK[s,s2], X[s], X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] + slices2 = index_to_slices(X2[:,self.index_dim]) + [[[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X(dL_dK[s,s2], X[s], X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] return target def gradients_X_diag(self, dL_dKdiag, X): slices = index_to_slices(X[:,self.index_dim]) target = np.zeros(X.shape) - [[np.copyto(target[s,:-1], self.kern.gradients_X_diag(dL_dKdiag[s],X[s])) for s in slices_i] for slices_i in slices] + [[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X_diag(dL_dKdiag[s],X[s])) for s in slices_i] for slices_i in slices] return target def update_gradients_diag(self, dL_dKdiag, X): From 0f0a0dae0af2d759b0c8fc35ca641f2d2f25a201 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 14 Mar 2014 12:33:26 +0000 Subject: [PATCH 087/116] mrd gradients --- GPy/models/mrd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 17949012..ac2ef9cd 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -118,7 +118,7 @@ class MRD(Model): self._log_marginal_likelihood += lml # likelihood gradients - l.update_gradients(grad_dict.pop('partial_for_likelihood')) + l.update_gradients(grad_dict.pop('dL_dthetaL')) #gradients wrt kernel dL_dKmm = grad_dict.pop('dL_dKmm') From 169fd9b8d4852b29f77ac05535f1cb8a311a7008 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 14 Mar 2014 12:42:36 +0000 Subject: [PATCH 088/116] Stablised exp link_function and quadrature variances --- GPy/likelihoods/likelihood.py | 8 +++++++- GPy/likelihoods/link_functions.py | 11 +++++++---- GPy/likelihoods/poisson.py | 2 +- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 3eafedb1..4b8881de 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -178,7 +178,13 @@ class Likelihood(Parameterized): #E( E(Y_star|f_star)**2 ) def int_pred_mean_sq(f,m,v,predictive_mean_sq): - return self.conditional_mean(f)**2*np.exp(-(0.5/v)*np.square(f - m)) + p = np.exp(-(0.5/v)*np.square(f - m)) + #If p is zero then conditional_mean**2 will overflow + if p < 1e-10: + return 0. + else: + return self.conditional_mean(f)**2*p + scaled_exp_exp2 = [quad(int_pred_mean_sq, -np.inf, np.inf,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)] exp_exp2 = np.array(scaled_exp_exp2)[:,None] / normalizer diff --git a/GPy/likelihoods/link_functions.py b/GPy/likelihoods/link_functions.py index 2a1bf147..942fe2f4 100644 --- a/GPy/likelihoods/link_functions.py +++ b/GPy/likelihoods/link_functions.py @@ -6,6 +6,9 @@ from scipy import stats import scipy as sp from GPy.util.univariate_Gaussian import std_norm_pdf,std_norm_cdf,inv_std_norm_cdf +_exp_lim_val = np.finfo(np.float64).max +_lim_val = np.log(_exp_lim_val) + class GPTransformation(object): """ Link function class for doing non-Gaussian likelihoods approximation @@ -92,16 +95,16 @@ class Log(GPTransformation): """ def transf(self,f): - return np.exp(f) + return np.exp(np.clip(f, -_lim_val, _lim_val)) def dtransf_df(self,f): - return np.exp(f) + return np.exp(np.clip(f, -_lim_val, _lim_val)) def d2transf_df2(self,f): - return np.exp(f) + return np.exp(np.clip(f, -_lim_val, _lim_val)) def d3transf_df3(self,f): - return np.exp(f) + return np.exp(np.clip(f, -_lim_val, _lim_val)) class Log_ex_1(GPTransformation): """ diff --git a/GPy/likelihoods/poisson.py b/GPy/likelihoods/poisson.py index 419514d1..c67a7e12 100644 --- a/GPy/likelihoods/poisson.py +++ b/GPy/likelihoods/poisson.py @@ -21,7 +21,7 @@ class Poisson(Likelihood): """ def __init__(self, gp_link=None): if gp_link is None: - gp_link = link_functions.Log_ex_1() + gp_link = link_functions.Log() super(Poisson, self).__init__(gp_link, name='Poisson') From 7cd75ccbf58c5f01441669503d5da82a5d1f33d4 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 14 Mar 2014 12:52:52 +0000 Subject: [PATCH 089/116] Stablised other quadrature (should speed things up also), added sampling ability to poisson --- GPy/likelihoods/likelihood.py | 14 ++++++++++++-- GPy/likelihoods/poisson.py | 2 +- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 4b8881de..98a856fd 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -142,7 +142,12 @@ class Likelihood(Parameterized): """ #conditional_mean: the edpected value of y given some f, under this likelihood def int_mean(f,m,v): - return self.conditional_mean(f)*np.exp(-(0.5/v)*np.square(f - m)) + p = np.exp(-(0.5/v)*np.square(f - m)) + #If p is zero then conditional_mean will overflow + if p < 1e-10: + return 0. + else: + return self.conditional_mean(f)*p scaled_mean = [quad(int_mean, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] mean = np.array(scaled_mean)[:,None] / np.sqrt(2*np.pi*(variance)) @@ -165,7 +170,12 @@ class Likelihood(Parameterized): # E( V(Y_star|f_star) ) def int_var(f,m,v): - return self.conditional_variance(f)*np.exp(-(0.5/v)*np.square(f - m)) + p = np.exp(-(0.5/v)*np.square(f - m)) + #If p is zero then conditional_variance will overflow + if p < 1e-10: + return 0. + else: + return self.conditional_variance(f)*p scaled_exp_variance = [quad(int_var, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] exp_var = np.array(scaled_exp_variance)[:,None] / normalizer diff --git a/GPy/likelihoods/poisson.py b/GPy/likelihoods/poisson.py index c67a7e12..c0e2c81f 100644 --- a/GPy/likelihoods/poisson.py +++ b/GPy/likelihoods/poisson.py @@ -143,7 +143,7 @@ class Poisson(Likelihood): """ return self.gp_link.transf(gp) - def samples(self, gp): + def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. From a12bdafad7d03e57bd40818cbcb49d671205d7b7 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Fri, 14 Mar 2014 14:28:34 +0000 Subject: [PATCH 090/116] added jitter to fitc --- GPy/inference/latent_function_inference/fitc.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/GPy/inference/latent_function_inference/fitc.py b/GPy/inference/latent_function_inference/fitc.py index 9294e25d..c4147d06 100644 --- a/GPy/inference/latent_function_inference/fitc.py +++ b/GPy/inference/latent_function_inference/fitc.py @@ -3,6 +3,7 @@ from posterior import Posterior from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv +from ...util import diag import numpy as np log_2_pi = np.log(2*np.pi) @@ -14,8 +15,7 @@ class FITC(object): the posterior. """ - def __init__(self): - self.const_jitter = 1e-6 + const_jitter = 1e-6 def inference(self, kern, X, Z, likelihood, Y): @@ -33,6 +33,7 @@ class FITC(object): U = Knm #factor Kmm + diag.add(Kmm, self.const_jitter) Kmmi, L, Li, _ = pdinv(Kmm) #compute beta_star, the effective noise precision From 4757265b240924af09ac0032984c5c8d85f4d805 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 14 Mar 2014 15:23:32 +0000 Subject: [PATCH 091/116] Added a hack fix as suggested by max, zeroing any negative values (should really be numerically negative values on diagonal) --- GPy/kern/_src/independent_outputs.py | 3 ++- GPy/kern/_src/stationary.py | 28 +++++++++++++++------------- GPy/testing/kernel_tests.py | 19 +++++++++++++------ 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 1848bf6a..85cb95bc 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -8,7 +8,7 @@ import itertools def index_to_slices(index): """ - take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index. + take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index. e.g. >>> index = np.asarray([0,0,0,1,1,1,2,2,2]) @@ -40,6 +40,7 @@ class IndependentOutputs(CombinationKernel): The index of the functions is given by the last column in the input X the rest of the columns of X are passed to the underlying kernel for computation (in blocks). + Kern is wrapped with a slicer metaclass """ def __init__(self, kern, index_dim=-1, name='independ'): assert isinstance(index_dim, int), "IndependentOutputs kernel is only defined with one input dimension being the indeces" diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index df7ba058..a9e837a9 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -15,21 +15,21 @@ class Stationary(Kern): """ Stationary kernels (covariance functions). - Stationary covariance fucntion depend only on r, where r is defined as + Stationary covariance fucntion depend only on r, where r is defined as r = \sqrt{ \sum_{q=1}^Q (x_q - x'_q)^2 } - The covariance function k(x, x' can then be written k(r). + The covariance function k(x, x' can then be written k(r). In this implementation, r is scaled by the lengthscales parameter(s): - r = \sqrt{ \sum_{q=1}^Q \frac{(x_q - x'_q)^2}{\ell_q^2} }. - + r = \sqrt{ \sum_{q=1}^Q \frac{(x_q - x'_q)^2}{\ell_q^2} }. + By default, there's only one lengthscale: seaprate lengthscales for each - dimension can be enables by setting ARD=True. + dimension can be enables by setting ARD=True. To implement a stationary covariance function using this class, one need - only define the covariance function k(r), and it derivative. + only define the covariance function k(r), and it derivative. ... def K_of_r(self, r): @@ -37,10 +37,10 @@ class Stationary(Kern): def dK_dr(self, r): return bar - The lengthscale(s) and variance parameters are added to the structure automatically. - + The lengthscale(s) and variance parameters are added to the structure automatically. + """ - + def __init__(self, input_dim, variance, lengthscale, ARD, active_dims, name): super(Stationary, self).__init__(input_dim, active_dims, name) self.ARD = ARD @@ -57,7 +57,7 @@ class Stationary(Kern): if lengthscale.size != input_dim: lengthscale = np.ones(input_dim)*lengthscale else: - lengthscale = np.ones(self.input_dim) + lengthscale = np.ones(self.input_dim) self.lengthscale = Param('lengthscale', lengthscale, Logexp()) self.variance = Param('variance', variance, Logexp()) assert self.variance.size==1 @@ -95,7 +95,9 @@ class Stationary(Kern): #X2, = self._slice_X(X2) X1sq = np.sum(np.square(X),1) X2sq = np.sum(np.square(X2),1) - return np.sqrt(-2.*np.dot(X, X2.T) + (X1sq[:,None] + X2sq[None,:])) + r2 = -2.*np.dot(X, X2.T) + X1sq[:,None] + X2sq[None,:] + r2[r2<0] = 0. # A bit hacky + return np.sqrt(r2) @Cache_this(limit=5, ignore_args=()) def _scaled_dist(self, X, X2=None): @@ -133,7 +135,7 @@ class Stationary(Kern): if self.ARD: #rinv = self._inv_dis# this is rather high memory? Should we loop instead?t(X, X2) #d = X[:, None, :] - X2[None, :, :] - #x_xl3 = np.square(d) + #x_xl3 = np.square(d) #self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum(0).sum(0)/self.lengthscale**3 tmp = dL_dr*self._inv_dist(X, X2) if X2 is None: X2 = X @@ -247,7 +249,7 @@ class Matern52(Stationary): .. math:: - k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r) + k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r) """ def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Mat52'): super(Matern52, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index b057f8ef..3b17a3d0 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -120,6 +120,8 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb if verbose: print("Checking covariance function is positive definite.") + #if isinstance(kern, GPy.kern.IndependentOutputs): + #import ipdb; ipdb.set_trace() # XXX BREAKPOINT result = Kern_check_model(kern, X=X).is_positive_semi_definite() if result and verbose: print("Check passed.") @@ -306,17 +308,22 @@ class KernelTestsNonContinuous(unittest.TestCase): D = self.D self.X = np.random.randn(N,D) self.X2 = np.random.randn(N1,D) - self.X_block = np.zeros((N+N1, D+D+1)) + #self.X_block = np.zeros((N+N1, D+D+1)) + #self.X_block[0:N, 0:D] = self.X + #self.X_block[N:N+N1, D:D+D] = self.X2 + #self.X_block[0:N, -1] = 0 + #self.X_block[N:N+N1, -1] = 1 + self.X_block = np.zeros((N+N1, D+1)) self.X_block[0:N, 0:D] = self.X - self.X_block[N:N+N1, D:D+D] = self.X2 - self.X_block[0:N, -1] = 1 - self.X_block[N:N+1, -1] = 2 + self.X_block[N:N+N1, 0:D] = self.X2 + self.X_block[0:N, -1] = 0 + self.X_block[N:N+N1, -1] = 1 self.X_block = self.X_block[self.X_block.argsort(0)[:, -1], :] - + def test_IndependentOutputs(self): k = GPy.kern.RBF(self.D) kern = GPy.kern.IndependentOutputs(k, -1) - self.assertTrue(check_kernel_gradient_functions(kern, X=self.X_block, X2=self.X_block, verbose=verbose)) + self.assertTrue(check_kernel_gradient_functions(kern, X=self.X_block, verbose=verbose)) if __name__ == "__main__": print "Running unit tests, please be (very) patient..." From bfa18ef6625770733f24d154f12505b5353e69a3 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 14 Mar 2014 15:34:00 +0000 Subject: [PATCH 092/116] Fixed Y_metadata bug --- GPy/core/gp.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 05e3e671..70b7d695 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -42,7 +42,10 @@ class GP(Model): assert Y.shape[0] == self.num_data _, self.output_dim = self.Y.shape - self.Y_metadata = Y_metadata or {} + if Y_metadata is None: + Y_metadata = {} + else: + self.Y_metadata = Y_metadata assert isinstance(kernel, kern.Kern) #assert self.input_dim == kernel.input_dim From 7ac0689156df48a798126bb6be67049a649a4a67 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 10:27:10 +0000 Subject: [PATCH 093/116] Changes in kernel parameters definition --- GPy/util/multioutput.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/GPy/util/multioutput.py b/GPy/util/multioutput.py index 79022a5f..d9e8b704 100644 --- a/GPy/util/multioutput.py +++ b/GPy/util/multioutput.py @@ -35,7 +35,8 @@ def build_likelihood(Y_list,noise_index,likelihoods_list=None): likelihoods_list = [GPy.likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for y,j in zip(Y_list,range(Ny))] else: assert len(likelihoods_list) == Ny - likelihood = GPy.likelihoods.mixed_noise.MixedNoise(likelihoods_list=likelihoods_list, noise_index=noise_index) + #likelihood = GPy.likelihoods.mixed_noise.MixedNoise(likelihoods_list=likelihoods_list, noise_index=noise_index) + likelihood = GPy.likelihoods.mixed_noise.MixedNoise(likelihoods_list=likelihoods_list) return likelihood @@ -43,7 +44,7 @@ def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='X'): """ Builds a kernel for an Intrinsic Coregionalization Model - :input_dim: Input dimensionality + :input_dim: Input dimensionality (does not include dimension of indices) :num_outputs: Number of outputs :param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B). :type kernel: a GPy kernel @@ -54,7 +55,8 @@ def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='X'): kernel.input_dim = input_dim warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") - K = kernel.prod(GPy.kern.Coregionalize([input_dim], num_outputs,W_rank,W,kappa,name='B'),name=name) + K = kernel.prod(GPy.kern.Coregionalize(1, num_outputs, active_dims=[input_dim], rank=W_rank,W=W,kappa=kappa,name='B'),name=name) + #K = kernel * GPy.kern.Coregionalize(1, num_outputs, active_dims=[input_dim], rank=W_rank,W=W,kappa=kappa,name='B') #K = kernel ** GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa, name= 'B') K['.*variance'] = 1. K['.*variance'].fix() @@ -65,7 +67,7 @@ def LCM(input_dim, num_outputs, kernels_list, W_rank=1,name='X'): """ Builds a kernel for an Linear Coregionalization Model - :input_dim: Input dimensionality + :input_dim: Input dimensionality (does not include dimension of indices) :num_outputs: Number of outputs :param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B). :type kernel: a GPy kernel From bb7c26b41699b084915dabd4114a4a48b7a79aa3 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 10:27:53 +0000 Subject: [PATCH 094/116] Y_metadata definition changed --- GPy/plotting/matplot_dep/models_plots.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 7507c376..ae79569b 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -6,6 +6,7 @@ import numpy as np import Tango from base_plots import gpplot, x_frame1D, x_frame2D from ...util.misc import param_to_array +from ...models.gp_coregionalized_regression import GPCoregionalizedRegression def plot_fit(model, plot_limits=None, which_data_rows='all', @@ -85,8 +86,9 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', lower = m - 2*np.sqrt(v) upper = m + 2*np.sqrt(v) else: - m, v = model.predict(Xgrid, full_cov=False, Y_metadata=Y_metadata) - lower, upper = model.predict_quantiles(Xgrid, Y_metadata=Y_metadata) + meta = {'output_index': Xgrid[:,-1:].astype(np.int)} if isinstance(model,GPCoregionalizedRegression) else None + m, v = model.predict(Xgrid, full_cov=False, Y_metadata=meta) + lower, upper = model.predict_quantiles(Xgrid, Y_metadata=meta) for d in which_data_ycols: From d4476b76c54969be105be43d207fe621e26e0283 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 10:28:24 +0000 Subject: [PATCH 095/116] Changes in likelihood definition --- GPy/models/gp_coregionalized_regression.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/models/gp_coregionalized_regression.py b/GPy/models/gp_coregionalized_regression.py index 313e09d4..6d478fd9 100644 --- a/GPy/models/gp_coregionalized_regression.py +++ b/GPy/models/gp_coregionalized_regression.py @@ -31,7 +31,7 @@ class GPCoregionalizedRegression(GP): def __init__(self, X_list, Y_list, kernel=None, likelihoods_list=None, name='GPCR',W_rank=1,kernel_name='X'): #Input and Output - X,Y,self.noise_index = util.multioutput.build_XY(X_list,Y_list) + X,Y,self.output_index = util.multioutput.build_XY(X_list,Y_list) Ny = len(Y_list) #Kernel @@ -39,6 +39,6 @@ class GPCoregionalizedRegression(GP): kernel = util.multioutput.ICM(input_dim=X.shape[1]-1, num_outputs=Ny, kernel=GPy.kern.rbf(X.shape[1]-1), W_rank=1,name=kernel_name) #Likelihood - likelihood = util.multioutput.build_likelihood(Y_list,self.noise_index,likelihoods_list) + likelihood = util.multioutput.build_likelihood(Y_list,self.output_index,likelihoods_list) - super(GPCoregionalizedRegression, self).__init__(X,Y,kernel,likelihood, noise_index=self.noise_index) + super(GPCoregionalizedRegression, self).__init__(X,Y,kernel,likelihood, Y_metadata={'output_index':self.output_index}) From f5a5d4b25ea432d9c1347fb083f705b19c9285ee Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 10:29:04 +0000 Subject: [PATCH 096/116] Re-definition of the week --- GPy/likelihoods/mixed_noise.py | 38 ++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/GPy/likelihoods/mixed_noise.py b/GPy/likelihoods/mixed_noise.py index 946cbaf6..bfcb5916 100644 --- a/GPy/likelihoods/mixed_noise.py +++ b/GPy/likelihoods/mixed_noise.py @@ -23,22 +23,22 @@ class MixedNoise(Likelihood): def exact_inference_gradients(self, dL_dKdiag, Y_metadata): assert all([isinstance(l, Gaussian) for l in self.likelihoods_list]) - ind = Y_metadata['output_index'] + ind = Y_metadata['output_index'].flatten() return np.array([dL_dKdiag[ind==i].sum() for i in range(len(self.likelihoods_list))]) def predictive_values(self, mu, var, full_cov=False, Y_metadata=None): if all([isinstance(l, Gaussian) for l in self.likelihoods_list]): - ind = Y_metadata['output_index'] + ind = Y_metadata['output_index'].flatten() _variance = np.array([self.likelihoods_list[j].variance for j in ind ]) if full_cov: var += np.eye(var.shape[0])*_variance - d = 2*np.sqrt(np.diag(var)) - low, up = mu - d, mu + d + #d = 2*np.sqrt(np.diag(var)) + #low, up = mu - d, mu + d else: var += _variance - d = 2*np.sqrt(var) - low, up = mu - d, mu + d - return mu, var, low, up + #d = 2*np.sqrt(var) + #low, up = mu - d, mu + d + return mu, var#, low, up else: raise NotImplementedError @@ -52,8 +52,28 @@ class MixedNoise(Likelihood): def covariance_matrix(self, Y, Y_metadata): assert all([isinstance(l, Gaussian) for l in self.likelihoods_list]) + ind = Y_metadata['output_index'].flatten() variance = np.zeros(Y.shape[0]) - for lik, ind in itertools.izip(self.likelihoods_list, self.likelihoods_indices): - variance[ind] = lik.variance + for lik, j in zip(self.likelihoods_list, range(len(self.likelihoods_list))): + variance[ind==j] = lik.variance return np.diag(variance) + + def samples(self, gp, Y_metadata): + """ + Returns a set of samples of observations based on a given value of the latent variable. + + :param gp: latent variable + """ + N1, N2 = gp.shape + Ysim = np.zeros((N1,N2)) + ind = Y_metadata['output_index'].flatten() + for j in np.unique(ind): + flt = ind==j + gp_filtered = gp[flt,:] + n1 = gp_filtered.shape[0] + lik = self.likelihoods_list[j] + _ysim = np.array([np.random.normal(lik.gp_link.transf(gpj), scale=np.sqrt(lik.variance), size=1) for gpj in gp_filtered.flatten()]) + Ysim[flt,:] = _ysim.reshape(n1,N2) + return Ysim + From 8d98652e8ba401cf20ce8dd9e159830a43c45125 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 10:29:25 +0000 Subject: [PATCH 097/116] Re-definition of the week --- GPy/likelihoods/likelihood.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 3eafedb1..f8fc17d2 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -387,13 +387,14 @@ class Likelihood(Parameterized): return pred_mean, pred_var - def predictive_quantiles(self, mu, var, quantiles, Y_metadata): + def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None): #compute the quantiles by sampling!!! N_samp = 1000 s = np.random.randn(mu.shape[0], N_samp)*np.sqrt(var) + mu - ss_f = s.flatten() - ss_y = self.samples(ss_f) - ss_y = ss_y.reshape(mu.shape[0], N_samp) + #ss_f = s.flatten() + #ss_y = self.samples(ss_f, Y_metadata) + ss_y = self.samples(s, Y_metadata) + #ss_y = ss_y.reshape(mu.shape[0], N_samp) return [np.percentile(ss_y ,q, axis=1)[:,None] for q in quantiles] From bb6f9378815e18496122bdcfcd400e87f2d89dac Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 10:30:18 +0000 Subject: [PATCH 098/116] Changes for compatiblity with changes in likelihood --- GPy/likelihoods/gaussian.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index aaa356b6..101aac4b 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -56,7 +56,7 @@ class Gaussian(Likelihood): def update_gradients(self, grad): self.variance.gradient = grad - def exact_inference_gradients(self, dL_dKdiag): + def exact_inference_gradients(self, dL_dKdiag,Y_metadata=None): return dL_dKdiag.sum() def _preprocess_values(self, Y): @@ -295,7 +295,7 @@ class Gaussian(Likelihood): """ return self.variance - def samples(self, gp): + def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. @@ -303,6 +303,8 @@ class Gaussian(Likelihood): """ orig_shape = gp.shape gp = gp.flatten() + #orig_shape = gp.shape + gp = gp.flatten() Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp]) return Ysim.reshape(orig_shape) From 4692880cef3d891d772803389d97e08d41009e8d Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 10:33:58 +0000 Subject: [PATCH 099/116] parameter missin in dL_dthetaL added --- .../latent_function_inference/exact_gaussian_inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index e76575c6..c4ed0a0f 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -49,6 +49,6 @@ class ExactGaussianInference(object): dL_dK = 0.5 * (tdot(alpha) - Y.shape[1] * Wi) - dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK)) + dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK),Y_metadata) return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} From d4735e54c83e0d0f7db5671ca3042cbc2f6544cc Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 10:34:08 +0000 Subject: [PATCH 100/116] minor changes --- GPy/examples/coreg_example.py | 37 +++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/GPy/examples/coreg_example.py b/GPy/examples/coreg_example.py index 967758c6..f0288f35 100644 --- a/GPy/examples/coreg_example.py +++ b/GPy/examples/coreg_example.py @@ -3,6 +3,42 @@ import pylab as pb import GPy pb.ion() +X1 = 100 * np.random.rand(3)[:,None] +X2 = 100 * np.random.rand(4)[:,None] +I1 = np.zeros_like(X1) +I2 = np.ones_like(X2) + +_X = np.vstack([ X1, X2 ]) +_I = np.vstack([ I1, I2 ]) + +X = np.hstack([ _X, _I ]) + +Bias = GPy.kern.Bias(1,active_dims=[0]) +Coreg = GPy.kern.Coregionalize(1,2,active_dims=[1]) +K = Bias.prod(Coreg,name='X') + +K.coregion.W = 0 +print K.coregion.W + +print Bias.K(_X,_X) +print K.K(X,X) + +pb.matshow(K.K(X,X)) + +stop + +Mlist = [GPy.kern.Matern32(1,lengthscale=20.,name="Mat")] +kern = GPy.util.multioutput.LCM(input_dim=1,num_outputs=12,kernels_list=Mlist,name='H') + + +m = GPy.models.GPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2], kernel=kern) +m.optimize() + + + + +""" + X1 = 100 * np.random.rand(100)[:,None] X2 = 100 * np.random.rand(100)[:,None] #X1.sort() @@ -28,3 +64,4 @@ slices = GPy.util.multioutput.get_slices([Y1,Y2]) m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],ax=ax0) m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],ax=ax1) +""" From 62d594d9776f013b8900bb541adc051aaf1facd2 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 17 Mar 2014 15:43:09 +0000 Subject: [PATCH 101/116] slicing now returns the right shape, when computing derivative wrt X or Z --- GPy/kern/_src/add.py | 14 +++++--- GPy/kern/_src/kernel_slice_operations.py | 46 ++++++++++++++++++------ GPy/kern/_src/prod.py | 8 ++--- 3 files changed, 50 insertions(+), 18 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 97afd1f0..ca1f4533 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -58,7 +58,13 @@ class Add(CombinationKernel): :type X2: np.ndarray (num_inducing x input_dim)""" target = np.zeros(X.shape) - [target.__setitem__([Ellipsis, p.active_dims], target[:, p.active_dims]+p.gradients_X(dL_dK, X, X2)) for p in self.parts] + [target.__iadd__(p.gradients_X(dL_dK, X, X2)) for p in self.parts] + return target + + def gradients_X_diag(self, dL_dKdiag, X): + target = np.zeros(X.shape) + [target.__iadd__(p.gradients_X_diag(dL_dKdiag, X)) for p in self.parts] + #[target.__setitem__([Ellipsis, p.active_dims], target[:, p.active_dims]+p.gradients_X(dL_dK, X, X2)) for p in self.parts] return target def psi0(self, Z, variational_posterior): @@ -131,7 +137,7 @@ class Add(CombinationKernel): eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2. else: eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2. - target[:, p1.active_dims] += p1.gradients_Z_expectations(eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) + target += p1.gradients_Z_expectations(eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) return target def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): @@ -151,8 +157,8 @@ class Add(CombinationKernel): else: eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2. a, b = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) - target_mu[:, p1.active_dims] += a - target_S[:, p1.active_dims] += b + target_mu += a + target_S += b return target_mu, target_S def _getstate(self): diff --git a/GPy/kern/_src/kernel_slice_operations.py b/GPy/kern/_src/kernel_slice_operations.py index ff33cc24..c355ccad 100644 --- a/GPy/kern/_src/kernel_slice_operations.py +++ b/GPy/kern/_src/kernel_slice_operations.py @@ -4,6 +4,7 @@ Created on 11 Mar 2014 @author: maxz ''' from ...core.parameterization.parameterized import ParametersChangedMeta +import numpy as np class KernCallsViaSlicerMeta(ParametersChangedMeta): def __call__(self, *args, **kw): @@ -12,18 +13,18 @@ class KernCallsViaSlicerMeta(ParametersChangedMeta): instance.Kdiag = _slice_wrapper(instance, instance.Kdiag, diag=True) instance.update_gradients_full = _slice_wrapper(instance, instance.update_gradients_full, diag=False, derivative=True) instance.update_gradients_diag = _slice_wrapper(instance, instance.update_gradients_diag, diag=True, derivative=True) - instance.gradients_X = _slice_wrapper(instance, instance.gradients_X, diag=False, derivative=True) - instance.gradients_X_diag = _slice_wrapper(instance, instance.gradients_X_diag, diag=True, derivative=True) + instance.gradients_X = _slice_wrapper(instance, instance.gradients_X, diag=False, derivative=True, ret_X=True) + instance.gradients_X_diag = _slice_wrapper(instance, instance.gradients_X_diag, diag=True, derivative=True, ret_X=True) instance.psi0 = _slice_wrapper(instance, instance.psi0, diag=False, derivative=False) instance.psi1 = _slice_wrapper(instance, instance.psi1, diag=False, derivative=False) instance.psi2 = _slice_wrapper(instance, instance.psi2, diag=False, derivative=False) instance.update_gradients_expectations = _slice_wrapper(instance, instance.update_gradients_expectations, derivative=True, psi_stat=True) - instance.gradients_Z_expectations = _slice_wrapper(instance, instance.gradients_Z_expectations, derivative=True, psi_stat_Z=True) - instance.gradients_qX_expectations = _slice_wrapper(instance, instance.gradients_qX_expectations, derivative=True, psi_stat=True) + instance.gradients_Z_expectations = _slice_wrapper(instance, instance.gradients_Z_expectations, derivative=True, psi_stat_Z=True, ret_X=True) + instance.gradients_qX_expectations = _slice_wrapper(instance, instance.gradients_qX_expectations, derivative=True, psi_stat=True, ret_X=True) instance.parameters_changed() return instance -def _slice_wrapper(kern, operation, diag=False, derivative=False, psi_stat=False, psi_stat_Z=False): +def _slice_wrapper(kern, operation, diag=False, derivative=False, psi_stat=False, psi_stat_Z=False, ret_X=False): """ This method wraps the functions in kernel to make sure all kernels allways see their respective input dimension. The different switches are: @@ -34,11 +35,16 @@ def _slice_wrapper(kern, operation, diag=False, derivative=False, psi_stat=False """ if derivative: if diag: - def x_slice_wrapper(dL_dK, X): + def x_slice_wrapper(dL_dKdiag, X): + ret_X_not_sliced = ret_X and kern._sliced_X == 0 + if ret_X_not_sliced: + ret = np.zeros(X.shape) X = kern._slice_X(X) if not kern._sliced_X else X + # if the return value is of shape X.shape, we need to make sure to return the right shape kern._sliced_X += 1 try: - ret = operation(dL_dK, X) + if ret_X_not_sliced: ret[:, kern.active_dims] = operation(dL_dKdiag, X) + else: ret = operation(dL_dKdiag, X) except: raise finally: @@ -46,10 +52,22 @@ def _slice_wrapper(kern, operation, diag=False, derivative=False, psi_stat=False return ret elif psi_stat: def x_slice_wrapper(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): + ret_X_not_sliced = ret_X and kern._sliced_X == 0 + if ret_X_not_sliced: + ret1, ret2 = np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape) Z, variational_posterior = kern._slice_X(Z) if not kern._sliced_X else Z, kern._slice_X(variational_posterior) if not kern._sliced_X else variational_posterior kern._sliced_X += 1 + # if the return value is of shape X.shape, we need to make sure to return the right shape try: - ret = operation(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior) + if ret_X_not_sliced: + ret = list(operation(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)) + r2 = ret[:2] + ret[0] = ret1 + ret[1] = ret2 + ret[0][:, kern.active_dims] = r2[0] + ret[1][:, kern.active_dims] = r2[1] + del r2 + else: ret = operation(dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior) except: raise finally: @@ -57,10 +75,14 @@ def _slice_wrapper(kern, operation, diag=False, derivative=False, psi_stat=False return ret elif psi_stat_Z: def x_slice_wrapper(dL_dpsi1, dL_dpsi2, Z, variational_posterior): + ret_X_not_sliced = ret_X and kern._sliced_X == 0 + if ret_X_not_sliced: ret = np.zeros(Z.shape) Z, variational_posterior = kern._slice_X(Z) if not kern._sliced_X else Z, kern._slice_X(variational_posterior) if not kern._sliced_X else variational_posterior kern._sliced_X += 1 try: - ret = operation(dL_dpsi1, dL_dpsi2, Z, variational_posterior) + if ret_X_not_sliced: + ret[:, kern.active_dims] = operation(dL_dpsi1, dL_dpsi2, Z, variational_posterior) + else: ret = operation(dL_dpsi1, dL_dpsi2, Z, variational_posterior) except: raise finally: @@ -68,10 +90,14 @@ def _slice_wrapper(kern, operation, diag=False, derivative=False, psi_stat=False return ret else: def x_slice_wrapper(dL_dK, X, X2=None): + ret_X_not_sliced = ret_X and kern._sliced_X == 0 + if ret_X_not_sliced: + ret = np.zeros(X.shape) X, X2 = kern._slice_X(X) if not kern._sliced_X else X, kern._slice_X(X2) if X2 is not None and not kern._sliced_X else X2 kern._sliced_X += 1 try: - ret = operation(dL_dK, X, X2) + if ret_X_not_sliced: ret[:, kern.active_dims] = operation(dL_dK, X, X2) + else: ret = operation(dL_dK, X, X2) except: raise finally: diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index f3b2b50f..e00f38c3 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -51,15 +51,15 @@ class Prod(CombinationKernel): def gradients_X(self, dL_dK, X, X2=None): target = np.zeros(X.shape) for k1,k2 in itertools.combinations(self.parts, 2): - target[:,k1.active_dims] += k1.gradients_X(dL_dK*k2.K(X, X2), X, X2) - target[:,k2.active_dims] += k2.gradients_X(dL_dK*k1.K(X, X2), X, X2) + target += k1.gradients_X(dL_dK*k2.K(X, X2), X, X2) + target += k2.gradients_X(dL_dK*k1.K(X, X2), X, X2) return target def gradients_X_diag(self, dL_dKdiag, X): target = np.zeros(X.shape) for k1,k2 in itertools.combinations(self.parts, 2): - target[:,k1.active_dims] += k1.gradients_X(dL_dKdiag*k2.Kdiag(X), X) - target[:,k2.active_dims] += k2.gradients_X(dL_dKdiag*k1.Kdiag(X), X) + target += k1.gradients_X(dL_dKdiag*k2.Kdiag(X), X) + target += k2.gradients_X(dL_dKdiag*k1.Kdiag(X), X) return target From f2d5ee42eb05d5aaf517656b46f9dd6a59dd378a Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Mon, 17 Mar 2014 16:09:25 +0000 Subject: [PATCH 102/116] fix the bug regarding to the change of the name dL_dthetaL --- GPy/models/bayesian_gplvm.py | 2 +- GPy/models/ss_gplvm.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 3617e260..e0818a2f 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -66,7 +66,7 @@ class BayesianGPLVM(SparseGP): super(BayesianGPLVM, self).parameters_changed() self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X) - self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict) + self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2']) # update for the KL divergence self.variational_prior.update_gradients_KL(self.X) diff --git a/GPy/models/ss_gplvm.py b/GPy/models/ss_gplvm.py index 5994814b..1c2ecf4c 100644 --- a/GPy/models/ss_gplvm.py +++ b/GPy/models/ss_gplvm.py @@ -61,7 +61,7 @@ class SSGPLVM(SparseGP): super(SSGPLVM, self).parameters_changed() self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X) - self.X.mean.gradient, self.X.variance.gradient, self.X.binary_prob.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict) + self.X.mean.gradient, self.X.variance.gradient, self.X.binary_prob.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2']) # update for the KL divergence self.variational_prior.update_gradients_KL(self.X) From 19dc7cecf45dba1a617e634531372bd899266bd2 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 17 Mar 2014 16:22:16 +0000 Subject: [PATCH 103/116] slicing finished with independent outputs --- GPy/kern/_src/add.py | 1 - GPy/kern/_src/independent_outputs.py | 88 +++++++++++++++++++--------- GPy/testing/kernel_tests.py | 80 +++++++++++++++++-------- 3 files changed, 115 insertions(+), 54 deletions(-) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index ca1f4533..d1fd7cb8 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -64,7 +64,6 @@ class Add(CombinationKernel): def gradients_X_diag(self, dL_dKdiag, X): target = np.zeros(X.shape) [target.__iadd__(p.gradients_X_diag(dL_dKdiag, X)) for p in self.parts] - #[target.__setitem__([Ellipsis, p.active_dims], target[:, p.active_dims]+p.gradients_X(dL_dK, X, X2)) for p in self.parts] return target def psi0(self, Z, variational_posterior): diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 1848bf6a..cf015d02 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -39,72 +39,102 @@ class IndependentOutputs(CombinationKernel): The index of the functions is given by the last column in the input X the rest of the columns of X are passed to the underlying kernel for computation (in blocks). - + + :param kernels: either a kernel, or list of kernels to work with. If it is a list of kernels + the indices in the index_dim, index the kernels you gave! """ - def __init__(self, kern, index_dim=-1, name='independ'): + def __init__(self, kernels, index_dim=-1, name='independ'): assert isinstance(index_dim, int), "IndependentOutputs kernel is only defined with one input dimension being the indeces" - super(IndependentOutputs, self).__init__(kernels=[kern], extra_dims=[index_dim], name=name) + if not isinstance(kernels, list): + self.single_kern = True + self.kern = kernels + kernels = [kernels] + else: + self.single_kern = False + self.kern = kernels + super(IndependentOutputs, self).__init__(kernels=kernels, extra_dims=[index_dim], name=name) self.index_dim = index_dim - self.kern = kern - #self.add_parameters(self.kern) + self.kerns = kernels if len(kernels) != 1 else itertools.repeat(kernels[0]) def K(self,X ,X2=None): slices = index_to_slices(X[:,self.index_dim]) if X2 is None: target = np.zeros((X.shape[0], X.shape[0])) - [[np.copyto(target[s,ss], self.kern.K(X[s,:], X[ss,:])) for s,ss in itertools.product(slices_i, slices_i)] for slices_i in slices] + [[target.__setitem__((s,ss), kern.K(X[s,:], X[ss,:])) for s,ss in itertools.product(slices_i, slices_i)] for kern, slices_i in zip(self.kerns, slices)] else: slices2 = index_to_slices(X2[:,self.index_dim]) target = np.zeros((X.shape[0], X2.shape[0])) - [[[np.copyto(target[s, s2], self.kern.K(X[s,:],X2[s2,:])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] + [[target.__setitem__((s,s2), kern.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices_i, slices_j)] for kern, slices_i,slices_j in zip(self.kerns, slices,slices2)] return target def Kdiag(self,X): slices = index_to_slices(X[:,self.index_dim]) target = np.zeros(X.shape[0]) - [[np.copyto(target[s], self.kern.Kdiag(X[s])) for s in slices_i] for slices_i in slices] + [[np.copyto(target[s], kern.Kdiag(X[s])) for s in slices_i] for kern, slices_i in zip(self.kerns, slices)] return target def update_gradients_full(self,dL_dK,X,X2=None): - target = np.zeros(self.kern.size) - def collate_grads(dL, X, X2): - self.kern.update_gradients_full(dL,X,X2) - target[:] += self.kern.gradient - slices = index_to_slices(X[:,self.index_dim]) + if self.single_kern: target = np.zeros(self.kern.size) + else: target = [np.zeros(kern.size) for kern, _ in zip(self.kerns, slices)] + def collate_grads(kern, i, dL, X, X2): + kern.update_gradients_full(dL,X,X2) + if self.single_kern: target[:] += kern.gradient + else: target[i][:] += kern.gradient if X2 is None: - [[collate_grads(dL_dK[s,ss], X[s], X[ss]) for s,ss in itertools.product(slices_i, slices_i)] for slices_i in slices] + [[collate_grads(kern, i, dL_dK[s,ss], X[s], X[ss]) for s,ss in itertools.product(slices_i, slices_i)] for i,(kern,slices_i) in enumerate(zip(self.kerns,slices))] else: slices2 = index_to_slices(X2[:,self.index_dim]) - [[[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] - self.kern.gradient = target + [[[collate_grads(kern, i, dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for i,(kern,slices_i,slices_j) in enumerate(zip(self.kerns,slices,slices2))] + if self.single_kern: kern.gradient = target + else:[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(self.kerns, slices))] def gradients_X(self,dL_dK, X, X2=None): target = np.zeros(X.shape) - slices = index_to_slices(X[:,self.index_dim]) if X2 is None: - [[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X(dL_dK[s,ss],X[s],X[ss])) for s, ss in itertools.product(slices_i, slices_i)] for slices_i in slices] + # TODO: make use of index_to_slices + values = np.unique(X[:,self.index_dim]) + slices = [X[:,self.index_dim]==i for i in values] + [target.__setitem__(s, kern.gradients_X(dL_dK[s,s],X[s],None)) + for kern, s in zip(self.kerns, slices)] + #slices = index_to_slices(X[:,self.index_dim]) + #[[np.add(target[s], kern.gradients_X(dL_dK[s,s], X[s]), out=target[s]) + # for s in slices_i] for kern, slices_i in zip(self.kerns, slices)] + #import ipdb;ipdb.set_trace() + #[[(np.add(target[s ], kern.gradients_X(dL_dK[s ,ss],X[s ], X[ss]), out=target[s ]), + # np.add(target[ss], kern.gradients_X(dL_dK[ss,s ],X[ss], X[s ]), out=target[ss])) + # for s, ss in itertools.combinations(slices_i, 2)] for kern, slices_i in zip(self.kerns, slices)] else: - slices2 = index_to_slices(X2[:,self.index_dim]) - [[[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X(dL_dK[s,s2], X[s], X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] + values = np.unique(X[:,self.index_dim]) + slices = [X[:,self.index_dim]==i for i in values] + slices2 = [X2[:,self.index_dim]==i for i in values] + [target.__setitem__(s, kern.gradients_X(dL_dK[s, :][:, s2],X[s],X2[s2])) + for kern, s, s2 in zip(self.kerns, slices, slices2)] + # TODO: make work with index_to_slices + #slices = index_to_slices(X[:,self.index_dim]) + #slices2 = index_to_slices(X2[:,self.index_dim]) + #[[target.__setitem__(s, target[s] + kern.gradients_X(dL_dK[s,s2], X[s], X2[s2])) for s, s2 in itertools.product(slices_i, slices_j)] for kern, slices_i,slices_j in zip(self.kerns, slices,slices2)] return target def gradients_X_diag(self, dL_dKdiag, X): slices = index_to_slices(X[:,self.index_dim]) target = np.zeros(X.shape) - [[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X_diag(dL_dKdiag[s],X[s])) for s in slices_i] for slices_i in slices] + [[target.__setitem__(s, kern.gradients_X_diag(dL_dKdiag[s],X[s])) for s in slices_i] for kern, slices_i in zip(self.kerns, slices)] return target def update_gradients_diag(self, dL_dKdiag, X): - target = np.zeros(self.kern.size) - def collate_grads(dL, X): - self.kern.update_gradients_diag(dL,X) - target[:] += self.kern.gradient slices = index_to_slices(X[:,self.index_dim]) - [[collate_grads(dL_dKdiag[s], X[s,:]) for s in slices_i] for slices_i in slices] - self.kern.gradient = target + if self.single_kern: target = np.zeros(self.kern.size) + else: target = [np.zeros(kern.size) for kern, _ in zip(self.kerns, slices)] + def collate_grads(kern, i, dL, X): + kern.update_gradients_diag(dL,X) + if self.single_kern: target[:] += kern.gradient + else: target[i][:] += kern.gradient + [[collate_grads(kern, i, dL_dKdiag[s], X[s,:]) for s in slices_i] for i, (kern, slices_i) in enumerate(zip(self.kerns, slices))] + if self.single_kern: kern.gradient = target + else:[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(self.kerns, slices))] -class Hierarchical(Kern): +class Hierarchical(CombinationKernel): """ A kernel which can reopresent a simple hierarchical model. @@ -115,7 +145,7 @@ class Hierarchical(Kern): The index of the functions is given by additional columns in the input X. """ - def __init__(self, kerns, name='hierarchy'): + def __init__(self, kern, name='hierarchy'): assert all([k.input_dim==kerns[0].input_dim for k in kerns]) super(Hierarchical, self).__init__(kerns[0].input_dim + len(kerns) - 1, name) self.kerns = kerns diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index b057f8ef..b45d9919 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -94,7 +94,7 @@ class Kern_check_dKdiag_dX(Kern_check_dK_dX): -def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verbose=False): +def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verbose=False, fixed_X_dims=None): """ This function runs on kernels to check the correctness of their implementation. It checks that the covariance function is positive definite @@ -109,11 +109,11 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb """ pass_checks = True - if X==None: + if X is None: X = np.random.randn(10, kern.input_dim) if output_ind is not None: X[:, output_ind] = np.random.randint(kern.output_dim, X.shape[0]) - if X2==None: + if X2 is None: X2 = np.random.randn(20, kern.input_dim) if output_ind is not None: X2[:, output_ind] = np.random.randint(kern.output_dim, X2.shape[0]) @@ -164,7 +164,10 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb if verbose: print("Checking gradients of K(X, X) wrt X.") try: - result = Kern_check_dK_dX(kern, X=X, X2=None).checkgrad(verbose=verbose) + testmodel = Kern_check_dK_dX(kern, X=X, X2=None) + if fixed_X_dims is not None: + testmodel.X[:,fixed_X_dims].fix() + result = testmodel.checkgrad(verbose=verbose) except NotImplementedError: result=True if verbose: @@ -173,14 +176,17 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb print("Check passed.") if not result: print("Gradient of K(X, X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") - Kern_check_dK_dX(kern, X=X, X2=None).checkgrad(verbose=True) + testmodel.checkgrad(verbose=True) pass_checks = False return False if verbose: print("Checking gradients of K(X, X2) wrt X.") try: - result = Kern_check_dK_dX(kern, X=X, X2=X2).checkgrad(verbose=verbose) + testmodel = Kern_check_dK_dX(kern, X=X, X2=X2) + if fixed_X_dims is not None: + testmodel.X[:,fixed_X_dims].fix() + result = testmodel.checkgrad(verbose=verbose) except NotImplementedError: result=True if verbose: @@ -188,8 +194,8 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb if result and verbose: print("Check passed.") if not result: - print("Gradient of K(X, X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") - Kern_check_dK_dX(kern, X=X, X2=X2).checkgrad(verbose=True) + print("Gradient of K(X, X2) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") + testmodel.checkgrad(verbose=True) pass_checks = False return False @@ -300,24 +306,50 @@ class KernelTestsMiscellaneous(unittest.TestCase): class KernelTestsNonContinuous(unittest.TestCase): def setUp(self): - N = 100 - N1 = 110 - self.D = 2 - D = self.D - self.X = np.random.randn(N,D) - self.X2 = np.random.randn(N1,D) - self.X_block = np.zeros((N+N1, D+D+1)) - self.X_block[0:N, 0:D] = self.X - self.X_block[N:N+N1, D:D+D] = self.X2 - self.X_block[0:N, -1] = 1 - self.X_block[N:N+1, -1] = 2 - self.X_block = self.X_block[self.X_block.argsort(0)[:, -1], :] - + N0 = 3 + N1 = 9 + N2 = 4 + N = N0+N1+N2 + self.D = 3 + self.X = np.random.randn(N, self.D+1) + indices = np.random.random_integers(0, 2, size=N) + self.X[indices==0, -1] = 0 + self.X[indices==1, -1] = 1 + self.X[indices==2, -1] = 2 + #self.X = self.X[self.X[:, -1].argsort(), :] + self.X2 = np.random.randn((N0+N1)*2, self.D+1) + self.X2[:(N0*2), -1] = 0 + self.X2[(N0*2):, -1] = 1 + def test_IndependentOutputs(self): k = GPy.kern.RBF(self.D) - kern = GPy.kern.IndependentOutputs(k, -1) - self.assertTrue(check_kernel_gradient_functions(kern, X=self.X_block, X2=self.X_block, verbose=verbose)) + kern = GPy.kern.IndependentOutputs(k, -1, 'ind_single') + self.assertTrue(check_kernel_gradient_functions(kern, X=self.X, X2=self.X2, verbose=verbose, fixed_X_dims=-1)) + k = [GPy.kern.RBF(1, active_dims=[1], name='rbf1'), GPy.kern.RBF(self.D, name='rbf012'), GPy.kern.RBF(2, active_dims=[0,2], name='rbf02')] + kern = GPy.kern.IndependentOutputs(k, -1, name='ind_split') + self.assertTrue(check_kernel_gradient_functions(kern, X=self.X, X2=self.X2, verbose=verbose, fixed_X_dims=-1)) if __name__ == "__main__": print "Running unit tests, please be (very) patient..." - unittest.main() + #unittest.main() + np.random.seed(0) + N0 = 3 + N1 = 9 + N2 = 4 + N = N0+N1+N2 + D = 3 + X = np.random.randn(N, D+1) + indices = np.random.random_integers(0, 2, size=N) + X[indices==0, -1] = 0 + X[indices==1, -1] = 1 + X[indices==2, -1] = 2 + #X = X[X[:, -1].argsort(), :] + X2 = np.random.randn((N0+N1)*2, D+1) + X2[:(N0*2), -1] = 0 + X2[(N0*2):, -1] = 1 + k = [GPy.kern.RBF(1, active_dims=[1], name='rbf1'), GPy.kern.RBF(D, name='rbf012'), GPy.kern.RBF(2, active_dims=[0,2], name='rbf02')] + kern = GPy.kern.IndependentOutputs(k, -1, name='ind_split') + assert(check_kernel_gradient_functions(kern, X=X, X2=X2, verbose=verbose, fixed_X_dims=-1)) + k = GPy.kern.RBF(D) + kern = GPy.kern.IndependentOutputs(k, -1, 'ind_single') + assert(check_kernel_gradient_functions(kern, X=X, X2=X2, verbose=verbose, fixed_X_dims=-1)) From caf1dc2609259d48af32d46009d09e8906961427 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Mon, 17 Mar 2014 16:38:04 +0000 Subject: [PATCH 104/116] dL_dthetaL in missing data vardtc --- GPy/inference/latent_function_inference/var_dtc.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 7a0c14e8..9b6e26c0 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -65,7 +65,7 @@ class VarDTC(object): _, output_dim = Y.shape #see whether we've got a different noise variance for each datum - beta = 1./np.fmax(likelihood.variance, 1e-6) + beta = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6) # VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency! #self.YYTfactor = self.get_YYTfactor(Y) #VVT_factor = self.get_VVTfactor(self.YYTfactor, beta) @@ -221,7 +221,7 @@ class VarDTCMissingData(object): psi2_all = None Ys, traces = self._Y(Y) - beta_all = 1./np.fmax(likelihood.variance, 1e-6) + beta_all = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6) het_noise = beta_all.size != 1 import itertools @@ -328,18 +328,20 @@ class VarDTCMissingData(object): diag.add(Bi, 1) woodbury_inv_all[:, :, ind] = backsub_both_sides(Lm, Bi)[:,:,None] + dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) + # gradients: if uncertain_inputs: grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dpsi0':dL_dpsi0_all, 'dL_dpsi1':dL_dpsi1_all, 'dL_dpsi2':dL_dpsi2_all, - 'dL_dR':dL_dR} + 'dL_dthetaL':dL_dthetaL} else: grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dKdiag':dL_dpsi0_all, 'dL_dKnm':dL_dpsi1_all, - 'dL_dR':dL_dR} + 'dL_dthetaL':dL_dthetaL} #get sufficient things for posterior prediction #TODO: do we really want to do this in the loop? From 2ce3a93b3f38be176815d74ea47c2cb9bf128b33 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 17 Mar 2014 16:55:21 +0000 Subject: [PATCH 105/116] pickling working for array-likes, but observers not yet connected back --- GPy/core/parameterization/array_core.py | 12 ++++++++-- GPy/core/parameterization/param.py | 4 +++- GPy/core/parameterization/parameter_core.py | 7 +++--- GPy/testing/observable_tests.py | 26 ++++++++++----------- GPy/testing/parameterized_tests.py | 7 +++++- 5 files changed, 35 insertions(+), 21 deletions(-) diff --git a/GPy/core/parameterization/array_core.py b/GPy/core/parameterization/array_core.py index e3a5b137..6920e894 100644 --- a/GPy/core/parameterization/array_core.py +++ b/GPy/core/parameterization/array_core.py @@ -1,7 +1,7 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -__updated__ = '2013-12-16' +__updated__ = '2014-03-17' import numpy as np from parameter_core import Observable @@ -18,7 +18,7 @@ class ObservableArray(np.ndarray, Observable): if not isinstance(input_array, ObservableArray): obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['W', 'C'])).view(cls) else: obj = input_array - cls.__name__ = "ObservableArray\n " + cls.__name__ = "ObsAr" # because of fixed printing of `array` in np printing super(ObservableArray, obj).__init__(*a, **kw) return obj @@ -30,6 +30,14 @@ class ObservableArray(np.ndarray, Observable): def __array_wrap__(self, out_arr, context=None): return out_arr.view(np.ndarray) + def __reduce__(self): + func, args, state = np.ndarray.__reduce__(self) + return func, args, (state, Observable._getstate(self)) + + def __setstate__(self, state): + np.ndarray.__setstate__(self, state[0]) + Observable._setstate(self, state[1]) + def _s_not_empty(self, s): # this checks whether there is something picked by this slice. return True diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 2ede8436..ed394806 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -269,6 +269,8 @@ class Param(OptimizationHandlable, ObservableArray): @property def _ties_str(self): return [''] + def _ties_for(self, ravi): + return [['N/A']]*ravi.size def __repr__(self, *args, **kwargs): name = "\033[1m{x:s}\033[0;0m:\n".format( x=self.hierarchy_name()) @@ -312,7 +314,7 @@ class Param(OptimizationHandlable, ObservableArray): ravi = self._raveled_index(filter_) if constr_matrix is None: constr_matrix = self.constraints.properties_for(ravi) if prirs is None: prirs = self.priors.properties_for(ravi) - if ties is None: ties = [['N/A']]*self.size + if ties is None: ties = self._ties_for(ravi) ties = [' '.join(map(lambda x: x, t)) for t in ties] if lc is None: lc = self._max_len_names(constr_matrix, __constraints_name__) if lx is None: lx = self._max_len_values() diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index f58143bd..0aab890c 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -16,7 +16,7 @@ Observable Pattern for patameterization from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED import numpy as np -__updated__ = '2014-03-14' +__updated__ = '2014-03-17' class HierarchyError(Exception): """ @@ -56,7 +56,7 @@ class InterfacePickleFunctions(object): """ raise NotImplementedError, "To be able to use pickling you need to implement this method" -class Pickleable(object): +class Pickleable(InterfacePickleFunctions): """ Make an object pickleable (See python doc 'pickling'). @@ -95,7 +95,7 @@ class Pickleable(object): def _has_get_set_state(self): return '_getstate' in vars(self.__class__) and '_setstate' in vars(self.__class__) -class Observable(InterfacePickleFunctions): +class Observable(Pickleable): """ Observable pattern for parameterization. @@ -155,6 +155,7 @@ class Observable(InterfacePickleFunctions): def _getstate(self): return [self._observer_callables_] + def _setstate(self, state): self._observer_callables_ = state.pop() diff --git a/GPy/testing/observable_tests.py b/GPy/testing/observable_tests.py index ebda1630..f8be4a48 100644 --- a/GPy/testing/observable_tests.py +++ b/GPy/testing/observable_tests.py @@ -8,7 +8,7 @@ from GPy.core.parameterization.parameterized import Parameterized from GPy.core.parameterization.param import Param import numpy -# One trigger in init +# One trigger in init _trigger_start = -1 class ParamTestParent(Parameterized): @@ -21,11 +21,9 @@ class ParameterizedTest(Parameterized): params_changed_count = _trigger_start def parameters_changed(self): self.params_changed_count += 1 - def _set_params(self, params, trigger_parent=True): - Parameterized._set_params(self, params, trigger_parent=trigger_parent) class Test(unittest.TestCase): - + def setUp(self): self.parent = ParamTestParent('test parent') self.par = ParameterizedTest('test model') @@ -41,12 +39,12 @@ class Test(unittest.TestCase): self.parent.add_parameter(self.par) self.parent.add_parameter(self.par2) - + self._observer_triggered = None self._trigger_count = 0 self._first = None self._second = None - + def _trigger(self, which): self._observer_triggered = float(which) self._trigger_count += 1 @@ -54,18 +52,18 @@ class Test(unittest.TestCase): self._second = self._trigger else: self._first = self._trigger - + def _trigger_priority(self, which): if self._first is not None: self._second = self._trigger_priority else: self._first = self._trigger_priority - + def test_observable(self): self.par.add_observer(self, self._trigger, -1) self.assertEqual(self.par.params_changed_count, 0, 'no params changed yet') self.assertEqual(self.par.params_changed_count, self.parent.parent_changed_count, 'parent should be triggered as often as param') - + self.p[0,1] = 3 # trigger observers self.assertEqual(self._observer_triggered, 3, 'observer should have triggered') self.assertEqual(self._trigger_count, 1, 'observer should have triggered once') @@ -78,14 +76,14 @@ class Test(unittest.TestCase): self.assertEqual(self._trigger_count, 1, 'observer should have triggered once') self.assertEqual(self.par.params_changed_count, 2, 'params changed second') self.assertEqual(self.par.params_changed_count, self.parent.parent_changed_count, 'parent should be triggered as often as param') - + self.par.add_observer(self, self._trigger, -1) self.p[2,1] = 4 self.assertEqual(self._observer_triggered, 4, 'observer should have triggered') self.assertEqual(self._trigger_count, 2, 'observer should have triggered once') self.assertEqual(self.par.params_changed_count, 3, 'params changed second') self.assertEqual(self.par.params_changed_count, self.parent.parent_changed_count, 'parent should be triggered as often as param') - + self.par.remove_observer(self, self._trigger) self.p[0,1] = 3 self.assertEqual(self._observer_triggered, 4, 'observer should not have triggered') @@ -99,7 +97,7 @@ class Test(unittest.TestCase): self.par._trigger_params_changed() self.assertEqual(self.par.params_changed_count, 1, 'now params changed') self.assertEqual(self.parent.parent_changed_count, self.par.params_changed_count) - + self.par._param_array_[:] = 2 self.par._trigger_params_changed() self.assertEqual(self.par.params_changed_count, 2, 'now params changed') @@ -125,13 +123,13 @@ class Test(unittest.TestCase): self.par.remove_observer(self) self._first = self._second = None - + self.par.add_observer(self, self._trigger, 1) self.par.add_observer(self, self._trigger_priority, 0) self.par.notify_observers(0) self.assertEqual(self._first, self._trigger, 'priority should be second') self.assertEqual(self._second, self._trigger_priority, 'priority should be second') - + if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 5b718cbd..754e95db 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -108,7 +108,7 @@ class ParameterizedTest(unittest.TestCase): self.assertEqual(self.param.constraints._offset, 3) def test_fixing_randomize(self): - self.white.fix(warning=False) + self.white.fix(warning=True) val = float(self.test1.white.variance) self.test1.randomize() self.assertEqual(val, self.white.variance) @@ -119,6 +119,11 @@ class ParameterizedTest(unittest.TestCase): self.testmodel.randomize() self.assertEqual(val, self.testmodel.kern.lengthscale) + def test_printing(self): + print self.test1 + print self.param + print self.test1[''] + if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.test_add_parameter'] unittest.main() \ No newline at end of file From 64f44cf1796eb8ba4c0e794ad4d2183e663ead4e Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 17 Mar 2014 17:10:06 +0000 Subject: [PATCH 106/116] ObservableArray -> ObsAr, because of pickling and ndarray printing --- GPy/core/gp.py | 10 +++++----- GPy/core/parameterization/__init__.py | 2 +- GPy/core/parameterization/array_core.py | 14 +++++++------- GPy/core/parameterization/param.py | 4 ++-- GPy/likelihoods/gaussian.py | 3 +++ GPy/models/gp_regression.py | 4 ++-- GPy/testing/parameterized_tests.py | 8 ++++---- 7 files changed, 24 insertions(+), 21 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 70b7d695..38019fa7 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -7,7 +7,7 @@ import warnings from .. import kern from ..util.linalg import dtrtrs from model import Model -from parameterization import ObservableArray +from parameterization import ObsAr from .. import likelihoods from ..likelihoods.gaussian import Gaussian from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation @@ -31,19 +31,19 @@ class GP(Model): super(GP, self).__init__(name) assert X.ndim == 2 - if isinstance(X, (ObservableArray, VariationalPosterior)): + if isinstance(X, (ObsAr, VariationalPosterior)): self.X = X - else: self.X = ObservableArray(X) + else: self.X = ObsAr(X) self.num_data, self.input_dim = self.X.shape assert Y.ndim == 2 - self.Y = ObservableArray(Y) + self.Y = ObsAr(Y) assert Y.shape[0] == self.num_data _, self.output_dim = self.Y.shape if Y_metadata is None: - Y_metadata = {} + self.Y_metadata = {} else: self.Y_metadata = Y_metadata diff --git a/GPy/core/parameterization/__init__.py b/GPy/core/parameterization/__init__.py index ccbac39d..8e9aa094 100644 --- a/GPy/core/parameterization/__init__.py +++ b/GPy/core/parameterization/__init__.py @@ -1,5 +1,5 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from param import Param, ObservableArray +from param import Param, ObsAr from parameterized import Parameterized diff --git a/GPy/core/parameterization/array_core.py b/GPy/core/parameterization/array_core.py index 6920e894..a120f004 100644 --- a/GPy/core/parameterization/array_core.py +++ b/GPy/core/parameterization/array_core.py @@ -6,20 +6,20 @@ __updated__ = '2014-03-17' import numpy as np from parameter_core import Observable -class ObservableArray(np.ndarray, Observable): +class ObsAr(np.ndarray, Observable): """ An ndarray which reports changes to its observers. The observers can add themselves with a callable, which will be called every time this array changes. The callable takes exactly one argument, which is this array itself. """ - __array_priority__ = -1 # Never give back ObservableArray + __array_priority__ = -1 # Never give back ObsAr def __new__(cls, input_array, *a, **kw): - if not isinstance(input_array, ObservableArray): + if not isinstance(input_array, ObsAr): obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['W', 'C'])).view(cls) else: obj = input_array - cls.__name__ = "ObsAr" # because of fixed printing of `array` in np printing - super(ObservableArray, obj).__init__(*a, **kw) + #cls.__name__ = "ObsAr" # because of fixed printing of `array` in np printing + super(ObsAr, obj).__init__(*a, **kw) return obj def __array_finalize__(self, obj): @@ -54,7 +54,7 @@ class ObservableArray(np.ndarray, Observable): def __setitem__(self, s, val): if self._s_not_empty(s): - super(ObservableArray, self).__setitem__(s, val) + super(ObsAr, self).__setitem__(s, val) self.notify_observers(self[s]) def __getslice__(self, start, stop): @@ -64,7 +64,7 @@ class ObservableArray(np.ndarray, Observable): return self.__setitem__(slice(start, stop), val) def __copy__(self, *args): - return ObservableArray(self.view(np.ndarray).copy()) + return ObsAr(self.view(np.ndarray).copy()) def copy(self, *args): return self.__copy__(*args) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index ed394806..324593f9 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -4,7 +4,7 @@ import itertools import numpy from parameter_core import OptimizationHandlable, adjust_name_for_printing -from array_core import ObservableArray +from array_core import ObsAr ###### printing __constraints_name__ = "Constraint" @@ -15,7 +15,7 @@ __precision__ = numpy.get_printoptions()['precision'] # numpy printing precision __print_threshold__ = 5 ###### -class Param(OptimizationHandlable, ObservableArray): +class Param(OptimizationHandlable, ObsAr): """ Parameter object for GPy models. diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 101aac4b..4a6c5735 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -50,6 +50,9 @@ class Gaussian(Likelihood): if isinstance(gp_link, link_functions.Identity): self.log_concave = True + def gaussian_variance(self): + return self.variance + def covariance_matrix(self, Y, Y_metadata=None): return np.eye(Y.shape[0]) * self.variance diff --git a/GPy/models/gp_regression.py b/GPy/models/gp_regression.py index 5e83db09..86e64a54 100644 --- a/GPy/models/gp_regression.py +++ b/GPy/models/gp_regression.py @@ -20,14 +20,14 @@ class GPRegression(GP): """ - def __init__(self, X, Y, kernel=None): + def __init__(self, X, Y, kernel=None, Y_metadata=None): if kernel is None: kernel = kern.RBF(X.shape[1]) likelihood = likelihoods.Gaussian() - super(GPRegression, self).__init__(X, Y, kernel, likelihood, name='GP regression') + super(GPRegression, self).__init__(X, Y, kernel, likelihood, name='GP regression', Y_metadata=Y_metadata) def _getstate(self): return GP._getstate(self) diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 754e95db..81c2dfdd 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -7,16 +7,16 @@ import unittest import GPy import numpy as np from GPy.core.parameterization.parameter_core import HierarchyError -from GPy.core.parameterization.array_core import ObservableArray +from GPy.core.parameterization.array_core import ObsAr class ArrayCoreTest(unittest.TestCase): def setUp(self): self.X = np.random.normal(1,1, size=(100,10)) - self.obsX = ObservableArray(self.X) + self.obsX = ObsAr(self.X) def test_init(self): - X = ObservableArray(self.X) - X2 = ObservableArray(X) + X = ObsAr(self.X) + X2 = ObsAr(X) self.assertIs(X, X2, "no new Observable array, when Observable is given") def test_slice(self): From fe82678d80a44d6195ca11a2b04ad464f58a62a0 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 18:57:34 +0000 Subject: [PATCH 107/116] Changes to allow heteroscedastic inference --- .../latent_function_inference/var_dtc.py | 37 ++++++++++--------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 7a0c14e8..f948ff38 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) from posterior import Posterior -from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify +from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify from ...util import diag from ...core.parameterization.variational import VariationalPosterior import numpy as np @@ -48,7 +48,7 @@ class VarDTC(object): def get_VVTfactor(self, Y, prec): return Y * prec # TODO chache this, and make it effective - def inference(self, kern, X, Z, likelihood, Y): + def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): if isinstance(X, VariationalPosterior): uncertain_inputs = True psi0 = kern.psi0(Z, X) @@ -65,7 +65,9 @@ class VarDTC(object): _, output_dim = Y.shape #see whether we've got a different noise variance for each datum - beta = 1./np.fmax(likelihood.variance, 1e-6) + #beta = 1./np.fmax(likelihood.variance, 1e-6) + beta = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6) + # VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency! #self.YYTfactor = self.get_YYTfactor(Y) #VVT_factor = self.get_VVTfactor(self.YYTfactor, beta) @@ -74,7 +76,7 @@ class VarDTC(object): trYYT = self.get_trYYT(Y) # do the inference: - het_noise = beta.size < 1 + het_noise = beta.size > 1 num_inducing = Z.shape[0] num_data = Y.shape[0] # kernel computations, using BGPLVM notation @@ -134,16 +136,16 @@ class VarDTC(object): # log marginal likelihood log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, - psi0, A, LB, trYYT, data_fit) + psi0, A, LB, trYYT, data_fit, Y) #put the gradients in the right places dL_dR = _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, - data_fit, num_data, output_dim, trYYT) + data_fit, num_data, output_dim, trYYT, Y) - dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) + dL_dthetaL = likelihood.exact_inference_gradients(dL_dR,Y_metadata) if uncertain_inputs: grad_dict = {'dL_dKmm': dL_dKmm, @@ -385,7 +387,7 @@ def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, C return dL_dpsi0, dL_dpsi1, dL_dpsi2 -def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT): +def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT, Y): # the partial derivative vector for the likelihood if likelihood.size == 0: # save computation here. @@ -394,19 +396,20 @@ def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, if uncertain_inputs: raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented" else: - from ...util.linalg import chol_inv - LBi = chol_inv(LB) + #from ...util.linalg import chol_inv + #LBi = chol_inv(LB) + LBi, _ = dtrtrs(LB,np.eye(LB.shape[0])) + Lmi_psi1, nil = dtrtrs(Lm, psi1.T, lower=1, trans=0) _LBi_Lmi_psi1, _ = dtrtrs(LB, Lmi_psi1, lower=1, trans=0) - dL_dR = -0.5 * beta + 0.5 * likelihood.V**2 + dL_dR = -0.5 * beta + 0.5 * (beta*Y)**2 dL_dR += 0.5 * output_dim * (psi0 - np.sum(Lmi_psi1**2,0))[:,None] * beta**2 dL_dR += 0.5*np.sum(mdot(LBi.T,LBi,Lmi_psi1)*Lmi_psi1,0)[:,None]*beta**2 - dL_dR += -np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * likelihood.Y * beta**2 + dL_dR += -np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * Y * beta**2 dL_dR += 0.5*np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T**2 * beta**2 - else: # likelihood is not heteroscedatic dL_dR = -0.5 * num_data * output_dim * beta + 0.5 * trYYT * beta ** 2 @@ -414,11 +417,11 @@ def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, dL_dR += beta * (0.5 * np.sum(A * DBi_plus_BiPBi) - data_fit) return dL_dR -def _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit): -#compute log marginal likelihood +def _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit,Y): + #compute log marginal likelihood if het_noise: - lik_1 = -0.5 * num_data * output_dim * np.log(2. * np.pi) + 0.5 * np.sum(np.log(beta)) - 0.5 * np.sum(likelihood.V * likelihood.Y) - lik_2 = -0.5 * output_dim * (np.sum(beta * psi0) - np.trace(A)) + lik_1 = -0.5 * num_data * output_dim * np.log(2. * np.pi) + 0.5 * np.sum(np.log(beta)) - 0.5 * np.sum(beta * Y**2) + lik_2 = -0.5 * output_dim * (np.sum(beta.flatten() * psi0) - np.trace(A)) else: lik_1 = -0.5 * num_data * output_dim * (np.log(2. * np.pi) - np.log(beta)) - 0.5 * beta * trYYT lik_2 = -0.5 * output_dim * (np.sum(beta * psi0) - np.trace(A)) From 7fdecf5e317a40461d1f5e31f3a9d912d74972c4 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 18:57:58 +0000 Subject: [PATCH 108/116] bug fixed --- GPy/core/gp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 70b7d695..d7f07a47 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -43,7 +43,7 @@ class GP(Model): _, self.output_dim = self.Y.shape if Y_metadata is None: - Y_metadata = {} + self.Y_metadata = {} else: self.Y_metadata = Y_metadata From 15901a48d43566eef9995eeeed7ee9df6eb3c146 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 18:58:31 +0000 Subject: [PATCH 109/116] Changes to allow compatibility with mixed noise likelihoods --- GPy/core/sparse_gp.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index d137ceff..a0b09564 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -31,7 +31,7 @@ class SparseGP(GP): """ - def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None, name='sparse gp'): + def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None, name='sparse gp', Y_metadata=None): #pick a sensible inference method if inference_method is None: @@ -45,7 +45,7 @@ class SparseGP(GP): self.Z = Param('inducing inputs', Z) self.num_inducing = Z.shape[0] - GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name) + GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name, Y_metadata=Y_metadata) self.add_parameter(self.Z, index=0) @@ -53,7 +53,7 @@ class SparseGP(GP): return isinstance(self.X, VariationalPosterior) def parameters_changed(self): - self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y) + self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y, self.Y_metadata) self.likelihood.update_gradients(self.grad_dict['dL_dthetaL']) if isinstance(self.X, VariationalPosterior): #gradients wrt kernel @@ -75,7 +75,6 @@ class SparseGP(GP): target += self.kern.gradient self.kern.update_gradients_full(self.grad_dict['dL_dKmm'], self.Z, None) self.kern.gradient += target - #gradients wrt Z self.Z.gradient[:,self.kern.active_dims] = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z) self.Z.gradient[:,self.kern.active_dims] += self.kern.gradients_X(self.grad_dict['dL_dKnm'].T, self.Z, self.X) From bea74ac5231e47cce34170273771fd61a39e6b73 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 18:59:37 +0000 Subject: [PATCH 110/116] new function added --- GPy/likelihoods/gaussian.py | 6 ++++++ GPy/likelihoods/mixed_noise.py | 30 +++++++++++++++++++----------- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 101aac4b..0c73e485 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -50,6 +50,12 @@ class Gaussian(Likelihood): if isinstance(gp_link, link_functions.Identity): self.log_concave = True + def betaY(self,Y,Y_metadata=None): + return Y/self.gaussian_variance(Y_metadata) + + def gaussian_variance(self, Y_metadata=None): + return self.variance + def covariance_matrix(self, Y, Y_metadata=None): return np.eye(Y.shape[0]) * self.variance diff --git a/GPy/likelihoods/mixed_noise.py b/GPy/likelihoods/mixed_noise.py index bfcb5916..b4960f3a 100644 --- a/GPy/likelihoods/mixed_noise.py +++ b/GPy/likelihoods/mixed_noise.py @@ -18,6 +18,17 @@ class MixedNoise(Likelihood): self.likelihoods_list = likelihoods_list self.log_concave = False + def gaussian_variance(self, Y_metadata): + assert all([isinstance(l, Gaussian) for l in self.likelihoods_list]) + ind = Y_metadata['output_index'].flatten() + variance = np.zeros(ind.size) + for lik, j in zip(self.likelihoods_list, range(len(self.likelihoods_list))): + variance[ind==j] = lik.variance + return variance[:,None] + + def betaY(self,Y,Y_metadata): + return Y/self.gaussian_variance(Y_metadata=Y_metadata) + def update_gradients(self, gradients): self.gradient = gradients @@ -32,13 +43,9 @@ class MixedNoise(Likelihood): _variance = np.array([self.likelihoods_list[j].variance for j in ind ]) if full_cov: var += np.eye(var.shape[0])*_variance - #d = 2*np.sqrt(np.diag(var)) - #low, up = mu - d, mu + d else: var += _variance - #d = 2*np.sqrt(var) - #low, up = mu - d, mu + d - return mu, var#, low, up + return mu, var else: raise NotImplementedError @@ -51,12 +58,13 @@ class MixedNoise(Likelihood): def covariance_matrix(self, Y, Y_metadata): - assert all([isinstance(l, Gaussian) for l in self.likelihoods_list]) - ind = Y_metadata['output_index'].flatten() - variance = np.zeros(Y.shape[0]) - for lik, j in zip(self.likelihoods_list, range(len(self.likelihoods_list))): - variance[ind==j] = lik.variance - return np.diag(variance) + #assert all([isinstance(l, Gaussian) for l in self.likelihoods_list]) + #ind = Y_metadata['output_index'].flatten() + #variance = np.zeros(Y.shape[0]) + #for lik, j in zip(self.likelihoods_list, range(len(self.likelihoods_list))): + # variance[ind==j] = lik.variance + #return np.diag(variance) + return np.diag(self.gaussian_variance(Y_metadata).flatten()) def samples(self, gp, Y_metadata): From 385ce7d344ed00a79ca79b2b8ba792bb4256cc68 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 19:00:16 +0000 Subject: [PATCH 111/116] Changes to allow mixed noise likelihoods --- GPy/plotting/matplot_dep/models_plots.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index ae79569b..cbb213b1 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -7,6 +7,7 @@ import Tango from base_plots import gpplot, x_frame1D, x_frame2D from ...util.misc import param_to_array from ...models.gp_coregionalized_regression import GPCoregionalizedRegression +from ...models.sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression def plot_fit(model, plot_limits=None, which_data_rows='all', @@ -86,7 +87,10 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', lower = m - 2*np.sqrt(v) upper = m + 2*np.sqrt(v) else: - meta = {'output_index': Xgrid[:,-1:].astype(np.int)} if isinstance(model,GPCoregionalizedRegression) else None + if isinstance(model,GPCoregionalizedRegression) or isinstance(model,SparseGPCoregionalizedRegression): + meta = {'output_index': Xgrid[:,-1:].astype(np.int)} + else: + meta = None m, v = model.predict(Xgrid, full_cov=False, Y_metadata=meta) lower, upper = model.predict_quantiles(Xgrid, Y_metadata=meta) From d506604c58e6277044e083e2a0f14392ebfc02db Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 19:00:35 +0000 Subject: [PATCH 112/116] minor changes --- GPy/examples/coreg_example.py | 39 +++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/GPy/examples/coreg_example.py b/GPy/examples/coreg_example.py index f0288f35..8f4cfcc6 100644 --- a/GPy/examples/coreg_example.py +++ b/GPy/examples/coreg_example.py @@ -2,9 +2,10 @@ import numpy as np import pylab as pb import GPy pb.ion() +pb.close('all') -X1 = 100 * np.random.rand(3)[:,None] -X2 = 100 * np.random.rand(4)[:,None] +X1 = np.arange(3)[:,None] +X2 = np.arange(4)[:,None] I1 = np.zeros_like(X1) I2 = np.ones_like(X2) @@ -13,27 +14,39 @@ _I = np.vstack([ I1, I2 ]) X = np.hstack([ _X, _I ]) +Y1 = np.sin(X1/8.) +Y2 = np.cos(X2/8.) + Bias = GPy.kern.Bias(1,active_dims=[0]) Coreg = GPy.kern.Coregionalize(1,2,active_dims=[1]) K = Bias.prod(Coreg,name='X') -K.coregion.W = 0 -print K.coregion.W +#K.coregion.W = 0 +#print K.coregion.W -print Bias.K(_X,_X) -print K.K(X,X) +#print Bias.K(_X,_X) +#print K.K(X,X) -pb.matshow(K.K(X,X)) +#pb.matshow(K.K(X,X)) -stop Mlist = [GPy.kern.Matern32(1,lengthscale=20.,name="Mat")] -kern = GPy.util.multioutput.LCM(input_dim=1,num_outputs=12,kernels_list=Mlist,name='H') - - -m = GPy.models.GPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2], kernel=kern) -m.optimize() +kern = GPy.util.multioutput.LCM(input_dim=1,num_outputs=2,kernels_list=Mlist,name='H') +kern.B.W = 0 +kern.B.kappa = 1. +#kern.B.W.fix() +#kern.B.kappa.fix() +#m = GPy.models.GPCoregionalizedRegression(X_list=[X1,X2], Y_list=[Y1,Y2], kernel=kern) +m = GPy.models.SparseGPCoregionalizedRegression(X_list=[X1], Y_list=[Y1], kernel=kern) +#m.optimize() +m.checkgrad(verbose=1) +fig = pb.figure() +ax0 = fig.add_subplot(211) +ax1 = fig.add_subplot(212) +slices = GPy.util.multioutput.get_slices([Y1,Y2]) +m.plot(fixed_inputs=[(1,0)],which_data_rows=slices[0],ax=ax0) +#m.plot(fixed_inputs=[(1,1)],which_data_rows=slices[1],ax=ax1) From 9cda9f991489332b51dc731bd2019deb24e6659c Mon Sep 17 00:00:00 2001 From: Ricardo Date: Mon, 17 Mar 2014 19:01:16 +0000 Subject: [PATCH 113/116] New model SparseGPCoregionalizedRegression --- GPy/models/__init__.py | 1 + .../sparse_gp_coregionalized_regression.py | 66 +++++++++++++++++++ 2 files changed, 67 insertions(+) create mode 100644 GPy/models/sparse_gp_coregionalized_regression.py diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index 34e5a17e..d0988c9e 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -15,4 +15,5 @@ from mrd import MRD from gradient_checker import GradientChecker from ss_gplvm import SSGPLVM from gp_coregionalized_regression import GPCoregionalizedRegression +from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression #.py file not included!!! #from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression diff --git a/GPy/models/sparse_gp_coregionalized_regression.py b/GPy/models/sparse_gp_coregionalized_regression.py new file mode 100644 index 00000000..a97696d2 --- /dev/null +++ b/GPy/models/sparse_gp_coregionalized_regression.py @@ -0,0 +1,66 @@ +# Copyright (c) 2012 - 2014 the GPy Austhors (see AUTHORS.txt) +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import numpy as np +from ..core import SparseGP +from ..inference.latent_function_inference import VarDTC +from .. import likelihoods +from .. import kern +from .. import util + +class SparseGPCoregionalizedRegression(SparseGP): + """ + Sparse Gaussian Process model for heteroscedastic multioutput regression + + This is a thin wrapper around the SparseGP class, with a set of sensible defaults + + :param X_list: list of input observations corresponding to each output + :type X_list: list of numpy arrays + :param Y_list: list of observed values related to the different noise models + :type Y_list: list of numpy arrays + :param Z_list: list of inducing inputs (optional) + :type Z_list: empty list | list of numpy arrays + :param kernel: a GPy kernel, defaults to RBF ** Coregionalized + :type kernel: None | GPy.kernel defaults + :likelihoods_list: a list of likelihoods, defaults to list of Gaussian likelihoods + :type likelihoods_list: None | a list GPy.likelihoods + :param num_inducing: number of inducing inputs, defaults to 10 per output (ignored if Z_list is not empty) + :type num_inducing: integer | list of integers + + :param name: model name + :type name: string + :param W_rank: number tuples of the corregionalization parameters 'W' (see coregionalize kernel documentation) + :type W_rank: integer + :param kernel_name: name of the kernel + :type kernel_name: string + """ + + def __init__(self, X_list, Y_list, Z_list=[], kernel=None, likelihoods_list=None, num_inducing=10, X_variance=None, name='SGPCR',W_rank=1,kernel_name='X'): + + #Input and Output + X,Y,self.output_index = util.multioutput.build_XY(X_list,Y_list) + Ny = len(Y_list) + + #Kernel + if kernel is None: + kernel = util.multioutput.ICM(input_dim=X.shape[1]-1, num_outputs=Ny, kernel=GPy.kern.rbf(X.shape[1]-1), W_rank=1,name=kernel_name) + + #Likelihood + likelihood = util.multioutput.build_likelihood(Y_list,self.output_index,likelihoods_list) + + #Inducing inputs list + if len(Z_list): + assert len(Z_list) == self.output_dim, 'Number of outputs do not match length of inducing inputs list.' + else: + if isinstance(num_inducing,np.int): + num_inducing = [num_inducing] * Ny + num_inducing = np.asarray(num_inducing) + assert num_inducing.size == Ny, 'Number of outputs do not match length of inducing inputs list.' + for ni,Xi in zip(num_inducing,X_list): + i = np.random.permutation(Xi.shape[0])[:ni] + Z_list.append(Xi[i].copy()) + + Z, _, Iz = util.multioutput.build_XY(Z_list) + + super(SparseGPCoregionalizedRegression, self).__init__(X, Y, Z, kernel, likelihood, inference_method=VarDTC(), Y_metadata={'output_index':self.output_index}) + self['.*inducing'][:,-1].fix() From 9680a139d43ac208063a309afc9bae36b4a46978 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Tue, 18 Mar 2014 12:28:46 +0000 Subject: [PATCH 114/116] changed the way the Gaussian likelihood interfaces, to enable mixed_noise things --- GPy/core/gp.py | 6 ++-- .../latent_function_inference/dtc.py | 32 ++++++++----------- .../exact_gaussian_inference.py | 5 ++- .../latent_function_inference/var_dtc.py | 4 +-- GPy/likelihoods/gaussian.py | 4 +-- 5 files changed, 23 insertions(+), 28 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 70b7d695..e052ff35 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -42,10 +42,8 @@ class GP(Model): assert Y.shape[0] == self.num_data _, self.output_dim = self.Y.shape - if Y_metadata is None: - Y_metadata = {} - else: - self.Y_metadata = Y_metadata + #TODO: check the type of this is okay? + self.Y_metadata = Y_metadata assert isinstance(kernel, kern.Kern) #assert self.input_dim == kernel.input_dim diff --git a/GPy/inference/latent_function_inference/dtc.py b/GPy/inference/latent_function_inference/dtc.py index 5ebc5e53..89140ce2 100644 --- a/GPy/inference/latent_function_inference/dtc.py +++ b/GPy/inference/latent_function_inference/dtc.py @@ -91,12 +91,8 @@ class vDTC(object): def __init__(self): self.const_jitter = 1e-6 - def inference(self, kern, X, X_variance, Z, likelihood, Y): - assert X_variance is None, "cannot use X_variance with DTC. Try varDTC." - - #TODO: MAX! fix this! - from ...util.misc import param_to_array - Y = param_to_array(Y) + def inference(self, kern, X, Z, likelihood, Y): + #assert X_variance is None, "cannot use X_variance with DTC. Try varDTC." num_inducing, _ = Z.shape num_data, output_dim = Y.shape @@ -109,15 +105,14 @@ class vDTC(object): Kmm = kern.K(Z) Knn = kern.Kdiag(X) Knm = kern.K(X, Z) - U = Knm - Uy = np.dot(U.T,Y) + KnmY = np.dot(Knm.T,Y) - #factor Kmm + #factor Kmm Kmmi, L, Li, _ = pdinv(Kmm) # Compute A - LiUTbeta = np.dot(Li, U.T)*np.sqrt(beta) - A_ = tdot(LiUTbeta) + LiKmnbeta = np.dot(Li, Knm.T)*np.sqrt(beta) + A_ = tdot(LiKmnbeta) trace_term = -0.5*(np.sum(Knn)*beta - np.trace(A_)) A = A_ + np.eye(num_inducing) @@ -125,7 +120,7 @@ class vDTC(object): LA = jitchol(A) # back substutue to get b, P, v - tmp, _ = dtrtrs(L, Uy, lower=1) + tmp, _ = dtrtrs(L, KnmY, lower=1) b, _ = dtrtrs(LA, tmp*beta, lower=1) tmp, _ = dtrtrs(LA, b, lower=1, trans=1) v, _ = dtrtrs(L, tmp, lower=1, trans=1) @@ -145,19 +140,18 @@ class vDTC(object): LAL = Li.T.dot(A).dot(Li) dL_dK = Kmmi - 0.5*(vvT_P + LAL) - # Compute dL_dU + # Compute dL_dKnm vY = np.dot(v.reshape(-1,1),Y.T) - #dL_dU = vY - np.dot(vvT_P, U.T) - dL_dU = vY - np.dot(vvT_P - Kmmi, U.T) - dL_dU *= beta + dL_dKmn = vY - np.dot(vvT_P - Kmmi, Knm.T) + dL_dKmn *= beta #compute dL_dR - Uv = np.dot(U, v) - dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./beta + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) )*beta**2 + Knmv = np.dot(Knm, v) + dL_dR = 0.5*(np.sum(Knm*np.dot(Knm,P), 1) - 1./beta + np.sum(np.square(Y), 1) - 2.*np.sum(Knmv*Y, 1) + np.sum(np.square(Knmv), 1) )*beta**2 dL_dR -=beta*trace_term/num_data dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) - grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*beta, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL} + grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*beta, 'dL_dKnm':dL_dKmn.T, 'dL_dthetaL':dL_dthetaL} #construct a posterior object post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L) diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index e76575c6..ca1b92c6 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -3,6 +3,7 @@ from posterior import Posterior from ...util.linalg import pdinv, dpotrs, tdot +from ...util import diag import numpy as np log_2_pi = np.log(2*np.pi) @@ -41,7 +42,9 @@ class ExactGaussianInference(object): K = kern.K(X) - Wi, LW, LWi, W_logdet = pdinv(K + likelihood.covariance_matrix(Y, Y_metadata)) + Ky = K.copy() + diag.add(Ky, likelihood.gaussian_variance(Y, Y_metadata)) + Wi, LW, LWi, W_logdet = pdinv(Ky) alpha, _ = dpotrs(LW, YYT_factor, lower=1) diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 9b6e26c0..97d54624 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -48,7 +48,7 @@ class VarDTC(object): def get_VVTfactor(self, Y, prec): return Y * prec # TODO chache this, and make it effective - def inference(self, kern, X, Z, likelihood, Y): + def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): if isinstance(X, VariationalPosterior): uncertain_inputs = True psi0 = kern.psi0(Z, X) @@ -65,7 +65,7 @@ class VarDTC(object): _, output_dim = Y.shape #see whether we've got a different noise variance for each datum - beta = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6) + beta = 1./np.fmax(likelihood.gaussian_variance(Y, Y_metadata), 1e-6) # VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency! #self.YYTfactor = self.get_YYTfactor(Y) #VVT_factor = self.get_VVTfactor(self.YYTfactor, beta) diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 101aac4b..79d62bb7 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -50,8 +50,8 @@ class Gaussian(Likelihood): if isinstance(gp_link, link_functions.Identity): self.log_concave = True - def covariance_matrix(self, Y, Y_metadata=None): - return np.eye(Y.shape[0]) * self.variance + def gaussian_variance(self, Y, Y_metadata=None): + return self.variance def update_gradients(self, grad): self.variance.gradient = grad From 5acb66cf78f082dab3c96b58c426d1f01930204c Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Tue, 18 Mar 2014 12:35:28 +0000 Subject: [PATCH 115/116] bug fix w.r.t. var_dtc.py --- GPy/core/parameterization/variational.py | 21 +++++++++++++++++++ .../latent_function_inference/var_dtc.py | 8 +++---- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index 01706922..5b3c4bca 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -126,6 +126,27 @@ class SpikeAndSlabPosterior(VariationalPosterior): super(SpikeAndSlabPosterior, self).__init__(means, variances, name) self.gamma = Param("binary_prob",binary_prob, Logistic(1e-10,1.-1e-10)) self.add_parameter(self.gamma) + + def __getitem__(self, s): + if isinstance(s, (int, slice, tuple, list, np.ndarray)): + import copy + n = self.__new__(self.__class__, self.name) + dc = self.__dict__.copy() + dc['mean'] = self.mean[s] + dc['variance'] = self.variance[s] + dc['binary_prob'] = self.binary_prob[s] + dc['_parameters_'] = copy.copy(self._parameters_) + n.__dict__.update(dc) + n._parameters_[dc['mean']._parent_index_] = dc['mean'] + n._parameters_[dc['variance']._parent_index_] = dc['variance'] + n._parameters_[dc['binary_prob']._parent_index_] = dc['binary_prob'] + n.ndim = n.mean.ndim + n.shape = n.mean.shape + n.num_data = n.mean.shape[0] + n.input_dim = n.mean.shape[1] if n.ndim != 1 else 1 + return n + else: + return super(VariationalPrior, self).__getitem__(s) def plot(self, *args): """ diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 82f6c2b9..e2aa95f5 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -134,7 +134,7 @@ class VarDTC(object): # log marginal likelihood log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, - psi0, A, LB, trYYT, data_fit, Y) + psi0, A, LB, trYYT, data_fit, VVT_factor) #put the gradients in the right places dL_dR = _compute_dL_dR(likelihood, @@ -208,7 +208,7 @@ class VarDTCMissingData(object): self._subarray_indices = [[slice(None),slice(None)]] return [Y], [(Y**2).sum()] - def inference(self, kern, X, Z, likelihood, Y): + def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): if isinstance(X, VariationalPosterior): uncertain_inputs = True psi0_all = kern.psi0(Z, X) @@ -305,7 +305,7 @@ class VarDTCMissingData(object): # log marginal likelihood log_marginal += _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, - psi0, A, LB, trYYT, data_fit) + psi0, A, LB, trYYT, data_fit,VVT_factor) #put the gradients in the right places dL_dR += _compute_dL_dR(likelihood, @@ -420,7 +420,7 @@ def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, def _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT, data_fit,Y): #compute log marginal likelihood if het_noise: - lik_1 = -0.5 * num_data * output_dim * np.log(2. * np.pi) + 0.5 * np.sum(np.log(beta)) - 0.5 * np.sum(beta * Y**2) + lik_1 = -0.5 * num_data * output_dim * np.log(2. * np.pi) + 0.5 * np.sum(np.log(beta)) - 0.5 * np.sum(beta * np.square(Y).sum(axis=-1)) lik_2 = -0.5 * output_dim * (np.sum(beta.flatten() * psi0) - np.trace(A)) else: lik_1 = -0.5 * num_data * output_dim * (np.log(2. * np.pi) - np.log(beta)) - 0.5 * beta * trYYT From 24b43c490caa1d22703959e537ada28edb74cae2 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Tue, 18 Mar 2014 16:30:46 +0000 Subject: [PATCH 116/116] fixes now hierarchical, maybe need to be restructured as lookup from constraints --- GPy/core/parameterization/param.py | 2 +- GPy/core/parameterization/parameter_core.py | 47 ++++++++++++++------- GPy/testing/parameterized_tests.py | 46 +++++++++++++------- 3 files changed, 62 insertions(+), 33 deletions(-) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 324593f9..b73e7dfa 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -226,7 +226,7 @@ class Param(OptimizationHandlable, ObsAr): # Constrainable #=========================================================================== def _ensure_fixes(self): - self._fixes_ = numpy.ones(self._realsize_, dtype=bool) + if not self._has_fixes(): self._fixes_ = numpy.ones(self._realsize_, dtype=bool) #=========================================================================== # Convenience diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 0aab890c..d4779127 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -16,7 +16,7 @@ Observable Pattern for patameterization from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED import numpy as np -__updated__ = '2014-03-17' +__updated__ = '2014-03-18' class HierarchyError(Exception): """ @@ -377,7 +377,7 @@ class Constrainable(Nameable, Indexable): # Ensure that the fixes array is set: # Parameterized: ones(self.size) # Param: ones(self._realsize_ - self._fixes_ = np.ones(self.size, dtype=bool) + if not self._has_fixes(): self._fixes_ = np.ones(self.size, dtype=bool) def _set_fixed(self, index): self._ensure_fixes() @@ -398,7 +398,7 @@ class Constrainable(Nameable, Indexable): self._fixes_ = None def _has_fixes(self): - return hasattr(self, "_fixes_") and self._fixes_ is not None + return hasattr(self, "_fixes_") and self._fixes_ is not None and self._fixes_.size == self.size #=========================================================================== # Prior Operations @@ -576,14 +576,22 @@ class OptimizationHandlable(Constrainable): # transformed parameters (apply transformation rules) p = self._param_array_.copy() [np.put(p, ind, c.finv(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] - if self._has_fixes(): + if self.has_parent() and self.constraints[__fixed__].size != 0: + fixes = np.ones(self.size).astype(bool) + fixes[self.constraints[__fixed__]] = FIXED + return p[fixes] + elif self._has_fixes(): return p[self._fixes_] return p def _set_params_transformed(self, p): if p is self._param_array_: p = p.copy() - if self._has_fixes(): self._param_array_[self._fixes_] = p + if self.has_parent() and self.constraints[__fixed__].size != 0: + fixes = np.ones(self.size).astype(bool) + fixes[self.constraints[__fixed__]] = FIXED + self._param_array_[fixes] = p + elif self._has_fixes(): self._param_array_[self._fixes_] = p else: self._param_array_[:] = p self.untransform() self._trigger_params_changed() @@ -770,11 +778,11 @@ class Parameterizable(OptimizationHandlable): Add all parameters to this param class, you can insert parameters at any given index using the :func:`list.insert` syntax """ - # if param.has_parent(): - # raise AttributeError, "parameter {} already in another model, create new object (or copy) for adding".format(param._short()) if param in self._parameters_ and index is not None: self.remove_parameter(param) self.add_parameter(param, index) + elif param.has_parent(): + raise HierarchyError, "parameter {} already in another model ({}), create new object (or copy) for adding".format(param._short(), param._highest_parent_._short()) elif param not in self._parameters_: if param.has_parent(): parent = param._parent_ @@ -798,13 +806,19 @@ class Parameterizable(OptimizationHandlable): param.add_observer(self, self._pass_through_notify_observers, -np.inf) - self.size += param.size + parent = self + while parent is not None: + parent.size += param.size + parent = parent._parent_ + + self._connect_parameters() + + self._highest_parent_._connect_parameters(ignore_added_names=_ignore_added_names) + self._highest_parent_._notify_parent_change() + self._highest_parent_._connect_fixes() - self._connect_parameters(ignore_added_names=_ignore_added_names) - self._notify_parent_change() - self._connect_fixes() else: - raise RuntimeError, """Parameter exists already added and no copy made""" + raise HierarchyError, """Parameter exists already and no copy made""" def add_parameters(self, *parameters): @@ -830,17 +844,18 @@ class Parameterizable(OptimizationHandlable): param.remove_observer(self, self._pass_through_notify_observers) self.constraints.shift_left(start, param.size) - self._connect_fixes() self._connect_parameters() self._notify_parent_change() parent = self._parent_ while parent is not None: - parent._connect_fixes() - parent._connect_parameters() - parent._notify_parent_change() + parent.size -= param.size parent = parent._parent_ + self._highest_parent_._connect_parameters() + self._highest_parent_._connect_fixes() + self._highest_parent_._notify_parent_change() + def _connect_parameters(self, ignore_added_names=False): # connect parameterlist to this parameterized object # This just sets up the right connection for the params objects diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 81c2dfdd..cd5127c8 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -34,9 +34,9 @@ class ParameterizedTest(unittest.TestCase): self.param = Param('param', np.random.rand(25,2), Logistic(0, 1)) self.test1 = GPy.core.Parameterized("test model") - self.test1.add_parameter(self.white) - self.test1.add_parameter(self.rbf, 0) - self.test1.add_parameter(self.param) + self.test1.kern = self.rbf+self.white + self.test1.add_parameter(self.test1.kern) + self.test1.add_parameter(self.param, 0) x = np.linspace(-2,6,4)[:,None] y = np.sin(x) @@ -45,22 +45,24 @@ class ParameterizedTest(unittest.TestCase): def test_add_parameter(self): self.assertEquals(self.rbf._parent_index_, 0) self.assertEquals(self.white._parent_index_, 1) + self.assertEquals(self.param._parent_index_, 0) pass def test_fixes(self): self.white.fix(warning=False) - self.test1.remove_parameter(self.test1.param) + self.test1.remove_parameter(self.param) self.assertTrue(self.test1._has_fixes()) from GPy.core.parameterization.transformations import FIXED, UNFIXED self.assertListEqual(self.test1._fixes_.tolist(),[UNFIXED,UNFIXED,FIXED]) - - self.test1.add_parameter(self.white, 0) + self.test1.kern.add_parameter(self.white, 0) self.assertListEqual(self.test1._fixes_.tolist(),[FIXED,UNFIXED,UNFIXED]) + self.test1.kern.rbf.fix() + self.assertListEqual(self.test1._fixes_.tolist(),[FIXED]*3) def test_remove_parameter(self): from GPy.core.parameterization.transformations import FIXED, UNFIXED, __fixed__, Logexp self.white.fix() - self.test1.remove_parameter(self.white) + self.test1.kern.remove_parameter(self.white) self.assertIs(self.test1._fixes_,None) self.assertListEqual(self.white._fixes_.tolist(), [FIXED]) @@ -81,7 +83,12 @@ class ParameterizedTest(unittest.TestCase): self.assertListEqual(self.white._fixes_.tolist(), [FIXED]) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) - self.assertListEqual(self.test1.constraints[Logexp()].tolist(), [0,1]) + self.assertListEqual(self.test1.constraints[Logexp()].tolist(), range(self.param.size, self.param.size+self.rbf.size)) + + def test_remove_parameter_param_array_grad_array(self): + val = self.test1.kern._param_array_.copy() + self.test1.kern.remove_parameter(self.white) + self.assertListEqual(self.test1.kern._param_array_.tolist(), val[:2].tolist()) def test_add_parameter_already_in_hirarchy(self): self.assertRaises(HierarchyError, self.test1.add_parameter, self.white._parameters_[0]) @@ -91,28 +98,35 @@ class ParameterizedTest(unittest.TestCase): self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) self.assertListEqual(self.rbf.constraints.indices()[0].tolist(), range(2)) from GPy.core.parameterization.transformations import Logexp - kern = self.rbf+self.white + kern = self.test1.kern + self.test1.remove_parameter(kern) self.assertListEqual(kern.constraints[Logexp()].tolist(), range(3)) def test_constraints(self): self.rbf.constrain(GPy.transformations.Square(), False) - self.assertListEqual(self.test1.constraints[GPy.transformations.Square()].tolist(), range(2)) - self.assertListEqual(self.test1.constraints[GPy.transformations.Logexp()].tolist(), [2]) + self.assertListEqual(self.test1.constraints[GPy.transformations.Square()].tolist(), range(self.param.size, self.param.size+self.rbf.size)) + self.assertListEqual(self.test1.constraints[GPy.transformations.Logexp()].tolist(), [self.param.size+self.rbf.size]) - self.test1.remove_parameter(self.rbf) + self.test1.kern.remove_parameter(self.rbf) self.assertListEqual(self.test1.constraints[GPy.transformations.Square()].tolist(), []) def test_constraints_views(self): - self.assertEqual(self.white.constraints._offset, 2) - self.assertEqual(self.rbf.constraints._offset, 0) - self.assertEqual(self.param.constraints._offset, 3) + self.assertEqual(self.white.constraints._offset, self.param.size+self.rbf.size) + self.assertEqual(self.rbf.constraints._offset, self.param.size) + self.assertEqual(self.param.constraints._offset, 0) def test_fixing_randomize(self): self.white.fix(warning=True) - val = float(self.test1.white.variance) + val = float(self.white.variance) self.test1.randomize() self.assertEqual(val, self.white.variance) + def test_fixing_randomize_parameter_handling(self): + self.rbf.fix(warning=True) + val = float(self.rbf.variance) + self.test1.kern.randomize() + self.assertEqual(val, self.rbf.variance) + def test_fixing_optimize(self): self.testmodel.kern.lengthscale.fix() val = float(self.testmodel.kern.lengthscale)