diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 5be3e944..5b41f6d0 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -216,15 +216,16 @@ class GP(Model): """ - return Model._getstate(self) + [self.X, - self.num_data, - self.input_dim, - self.kern, - self.likelihood, - self.output_dim, - ] + return []#Model._getstate(self) + [self.X, +# self.num_data, +# self.input_dim, +# self.kern, +# self.likelihood, +# self.output_dim, +# ] def _setstate(self, state): + return self.output_dim = state.pop() self.likelihood = state.pop() self.kern = state.pop() diff --git a/GPy/core/parameterization/lists_and_dicts.py b/GPy/core/parameterization/lists_and_dicts.py index ca0589c9..31235952 100644 --- a/GPy/core/parameterization/lists_and_dicts.py +++ b/GPy/core/parameterization/lists_and_dicts.py @@ -28,4 +28,11 @@ class ArrayList(list): return True return False + def index(self, item): + index = 0 + for el in self: + if el is item: + return index + index += 1 + raise ValueError, "{} is not in list".format(item) pass diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 1cdeee0b..b804a61a 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -902,15 +902,19 @@ class Parameterizable(OptimizationHandlable): #=========================================================================== def copy(self): """Returns a (deep) copy of the current model""" - raise NotImplementedError, "Copy is not yet implemented, TODO: Observable hierarchy" + #raise NotImplementedError, "Copy is not yet implemented, TODO: Observable hierarchy" import copy from .index_operations import ParameterIndexOperations, ParameterIndexOperationsView from .lists_and_dicts import ArrayList + + param_mapping = [[] for _ in range(self.num_params)] dc = dict() for k, v in self.__dict__.iteritems(): if k not in ['_parent_', '_parameters_', '_parent_index_', '_observer_callables_'] + self.parameter_names(recursive=False): - if isinstance(v, (Constrainable, ParameterIndexOperations, ParameterIndexOperationsView)): + if v in self._parameters_: + param_mapping[self._parameters_.index(v)] += [k] + elif isinstance(v, (Constrainable, ParameterIndexOperations, ParameterIndexOperationsView)): dc[k] = v.copy() else: dc[k] = copy.deepcopy(v) @@ -928,9 +932,10 @@ class Parameterizable(OptimizationHandlable): s = self.__new__(self.__class__) s.__dict__ = dc - for p in params: + for p, mlist in zip(params, param_mapping): s.add_parameter(p, _ignore_added_names=True) - + for m in mlist: + setattr(s, m, p) return s #=========================================================================== diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index bc83d8c8..529d3733 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -110,29 +110,15 @@ class Parameterized(Parameterizable, Pickleable): Allways append the state of the inherited object and call down to the inherited object in _setstate!! """ - return [ - self._fixes_, - self.priors, - self.constraints, - self._parameters_, - self._name, - self._added_names_, - ] + return [] def _setstate(self, state): - self._added_names_ = state.pop() - self._name = state.pop() - self._parameters_ = state.pop() - self.constraints = state.pop() - self.priors = state.pop() - self._fixes_ = state.pop() - self._connect_parameters() self.parameters_changed() #=========================================================================== # Override copy to handle programmatically added observers #=========================================================================== def copy(self): - c = super(Pickleable, self).copy() + c = super(Parameterized, self).copy() c.add_observer(c, c._parameters_changed_notification, -100) return c diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index 0e265a64..55b69bd7 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -1,6 +1,6 @@ from _src.kern import Kern from _src.rbf import RBF -from _src.linear import Linear +from _src.linear import Linear, LinearFull from _src.static import Bias, White from _src.brownian import Brownian from _src.sympykern import Sympykern diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 31fa8690..9d8d3f7b 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -11,7 +11,7 @@ from ...util.caching import Cache_this class Kern(Parameterized): #=========================================================================== - # This adds input slice support. The rather ugly code for slicing can be + # This adds input slice support. The rather ugly code for slicing can be # found in kernel_slice_operations __metaclass__ = KernCallsViaSlicerMeta #=========================================================================== diff --git a/GPy/kern/_src/kernel_slice_operations.py b/GPy/kern/_src/kernel_slice_operations.py index 7fa98763..6620f28c 100644 --- a/GPy/kern/_src/kernel_slice_operations.py +++ b/GPy/kern/_src/kernel_slice_operations.py @@ -5,30 +5,27 @@ Created on 11 Mar 2014 ''' from ...core.parameterization.parameterized import ParametersChangedMeta import numpy as np -import functools +def put_clean(dct, name, *args, **kw): + if name in dct: + dct['_clean_{}'.format(name)] = dct[name] + dct[name] = _slice_wrapper(None, dct[name], *args, **kw) + class KernCallsViaSlicerMeta(ParametersChangedMeta): - def __call__(self, *args, **kw): - instance = super(ParametersChangedMeta, self).__call__(*args, **kw) - instance.K = _Slice_wrapper(instance, instance.K) - instance.Kdiag = _Slice_wrapper_diag(instance, instance.Kdiag) - - instance.update_gradients_full = _Slice_wrapper_derivative(instance, instance.update_gradients_full) - instance.update_gradients_diag = _Slice_wrapper_diag_derivative(instance, instance.update_gradients_diag) - - instance.gradients_X = _Slice_wrapper_grad_X(instance, instance.gradients_X) - instance.gradients_X_diag = _Slice_wrapper_grad_X_diag(instance, instance.gradients_X_diag) - - instance.psi0 = _Slice_wrapper(instance, instance.psi0) - instance.psi1 = _Slice_wrapper(instance, instance.psi1) - instance.psi2 = _Slice_wrapper(instance, instance.psi2) - - instance.update_gradients_expectations = _Slice_wrapper_psi_stat_derivative_no_ret(instance, instance.update_gradients_expectations) - instance.gradients_Z_expectations = _Slice_wrapper_psi_stat_derivative_Z(instance, instance.gradients_Z_expectations) - instance.gradients_qX_expectations = _Slice_wrapper_psi_stat_derivative(instance, instance.gradients_qX_expectations) - instance.parameters_changed() - return instance - + def __new__(cls, name, bases, dct): + put_clean(dct, 'K') + put_clean(dct, 'Kdiag', diag=True) + put_clean(dct, 'update_gradients_full', diag=False, derivative=True) + put_clean(dct, 'gradients_X', diag=False, derivative=True, ret_X=True) + put_clean(dct, 'gradients_X_diag', diag=True, derivative=True, ret_X=True) + put_clean(dct, 'psi0', diag=False, derivative=False) + put_clean(dct, 'psi1', diag=False, derivative=False) + put_clean(dct, 'psi2', diag=False, derivative=False) + put_clean(dct, 'update_gradients_expectations', derivative=True, psi_stat=True) + put_clean(dct, 'gradients_Z_expectations', derivative=True, psi_stat_Z=True, ret_X=True) + put_clean(dct, 'gradients_qX_expectations', derivative=True, psi_stat=True, ret_X=True) + return super(KernCallsViaSlicerMeta, cls).__new__(cls, name, bases, dct) + class _Slice_wrap(object): def __init__(self, instance, f): self.k = instance diff --git a/GPy/kern/_src/linear.py b/GPy/kern/_src/linear.py index 609903aa..f9dacf02 100644 --- a/GPy/kern/_src/linear.py +++ b/GPy/kern/_src/linear.py @@ -313,3 +313,47 @@ class Linear(Kern): def input_sensitivity(self): return np.ones(self.input_dim) * self.variances + +class LinearFull(Kern): + def __init__(self, input_dim, rank, W=None, kappa=None, active_dims=None, name='linear_full'): + super(LinearFull, self).__init__(input_dim, active_dims, name) + if W is None: + W = np.ones((input_dim, rank)) + if kappa is None: + kappa = np.ones(input_dim) + assert W.shape == (input_dim, rank) + assert kappa.shape == (input_dim,) + + self.W = Param('W', W) + self.kappa = Param('kappa', kappa, Logexp()) + self.add_parameters(self.W, self.kappa) + + def K(self, X, X2=None): + P = np.dot(self.W, self.W.T) + np.diag(self.kappa) + return np.einsum('ij,jk,lk->il', X, P, X if X2 is None else X2) + + def update_gradients_full(self, dL_dK, X, X2=None): + self.kappa.gradient = np.einsum('ij,ik,kj->j', X, dL_dK, X if X2 is None else X2) + self.W.gradient = np.einsum('ij,kl,ik,lm->jm', X, X if X2 is None else X2, dL_dK, self.W) + self.W.gradient += np.einsum('ij,kl,ik,jm->lm', X, X if X2 is None else X2, dL_dK, self.W) + + def Kdiag(self, X): + P = np.dot(self.W, self.W.T) + np.diag(self.kappa) + return np.einsum('ij,jk,ik->i', X, P, X) + + def update_gradients_diag(self, dL_dKdiag, X): + self.kappa.gradient = np.einsum('ij,i->j', np.square(X), dL_dKdiag) + self.W.gradient = 2.*np.einsum('ij,ik,jl,i->kl', X, X, self.W, dL_dKdiag) + + def gradients_X(self, dL_dK, X, X2=None): + P = np.dot(self.W, self.W.T) + np.diag(self.kappa) + if X2 is None: + return 2.*np.einsum('ij,jk,kl->il', dL_dK, X, P) + else: + return np.einsum('ij,jk,kl->il', dL_dK, X2, P) + + def gradients_X_diag(self, dL_dKdiag, X): + P = np.dot(self.W, self.W.T) + np.diag(self.kappa) + return 2.*np.einsum('jk,i,ij->ik', P, dL_dKdiag, X) + + diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index c2877d06..0f19dbd1 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -64,7 +64,7 @@ class RBF(Stationary): if self.ARD: self.lengthscale.gradient = (dL_dpsi1[:,:,None]*_dpsi1_dlengthscale).reshape(-1,self.input_dim).sum(axis=0) else: - self.lengthscale.gradient = (dL_dpsi1[:,:,None]*_dpsi1_dlengthscale).sum() + self.lengthscale.gradient = (dL_dpsi1[:,:,None]*_dpsi1_dlengthscale).sum() #from psi2 self.variance.gradient += (dL_dpsi2 * _dpsi2_dvariance).sum() diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 36d55645..944a054f 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -288,6 +288,11 @@ class KernelGradientTestsContinuous(unittest.TestCase): k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + def test_LinearFull(self): + k = GPy.kern.LinearFull(self.D, self.D-1) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + #TODO: turn off grad checkingwrt X for indexed kernels like coregionalize # class KernelGradientTestsContinuous1D(unittest.TestCase): # def setUp(self): diff --git a/GPy/util/caching.py b/GPy/util/caching.py index 282c9f8c..fcb0b726 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -48,7 +48,7 @@ class Cacher(object): if k in kw and kw[k] is not None: return self.operation(*args, **kw) # TODO: WARNING !!! Cache OFFSWITCH !!! WARNING - #return self.operation(*args) + # return self.operation(*args, **kw) #if the result is cached, return the cached computation state = [all(a is b for a, b in itertools.izip_longest(args, cached_i)) for cached_i in self.cached_inputs]