From 29921e1c69afc9b3b195f5984196cb11ab286be3 Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Fri, 16 Oct 2015 15:06:10 +0100 Subject: [PATCH] Revert "[pickling] _src -> src" This reverts commit 4cd16a86b48b03d4a6edd56a969242296ab66f4d. --- GPy/__init__.py | 36 ++++++++-------------- GPy/core/gp.py | 6 +--- GPy/core/model.py | 4 --- GPy/core/parameterization/param.py | 2 +- GPy/core/parameterization/updateable.py | 9 ------ GPy/inference/optimization/optimization.py | 12 ++++---- GPy/kern/__init__.py | 4 ++- GPy/kern/src/kern.py | 25 +++++---------- GPy/kern/src/rbf.py | 5 ++- 9 files changed, 34 insertions(+), 69 deletions(-) diff --git a/GPy/__init__.py b/GPy/__init__.py index d044b2c0..32f0c1c4 100644 --- a/GPy/__init__.py +++ b/GPy/__init__.py @@ -40,28 +40,18 @@ def load(file_or_path): :param file_name: path/to/file.pickle """ - # This is the pickling pain when changing _src -> src try: - try: - import cPickle as pickle - if isinstance(file_or_path, basestring): - with open(file_or_path, 'rb') as f: - m = pickle.load(f) - else: - m = pickle.load(file_or_path) - except: - import pickle - if isinstance(file_or_path, str): - with open(file_or_path, 'rb') as f: - m = pickle.load(f) - else: - m = pickle.load(file_or_path) - except ImportError: - import sys - import inspect - sys.modules['GPy.kern._src'] = kern.src - for name, module in inspect.getmembers(kern.src): - if not name.startswith('_'): - sys.modules['GPy.kern._src.{}'.format(name)] = module - m = load(file_or_path) + import cPickle as pickle + if isinstance(file_or_path, basestring): + with open(file_or_path, 'rb') as f: + m = pickle.load(f) + else: + m = pickle.load(file_or_path) + except: + import pickle + if isinstance(file_or_path, str): + with open(file_or_path, 'rb') as f: + m = pickle.load(f) + else: + m = pickle.load(file_or_path) return m diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 6d66cd68..c08e7906 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -121,10 +121,6 @@ class GP(Model): # W_{pp} := \texttt{Woodbury inv} # p := _predictive_variable - def __setstate__(self, state): - self.mean_function = None - super(GP, self).__setstate__(state) - @property def _predictive_variable(self): return self.X @@ -463,7 +459,7 @@ class GP(Model): m, v = self._raw_predict(X, full_cov=full_cov, **predict_kwargs) if self.normalizer is not None: m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v) - + def sim_one_dim(m, v): if not full_cov: return np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T diff --git a/GPy/core/model.py b/GPy/core/model.py index ada0ef5f..c79c5465 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -30,10 +30,6 @@ class Model(Parameterized): self.obj_grads = None self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500) - def __setstate__(self, state): - self.obj_grads = None - super(Model, self).__setstate__(state) - def log_likelihood(self): raise NotImplementedError("this needs to be implemented to use the model class") def _log_likelihood_gradients(self): diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 52bb28d1..8fdd744e 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -90,7 +90,7 @@ class Param(Parameterizable, ObsAr): self._original_ = getattr(obj, '_original_', None) self._name = getattr(obj, '_name', None) self._gradient_array_ = getattr(obj, '_gradient_array_', None) - self.__update_on = getattr(obj, '__update_on', None) + self._update_on = getattr(obj, '_update_on', None) self.constraints = getattr(obj, 'constraints', None) self.priors = getattr(obj, 'priors', None) diff --git a/GPy/core/parameterization/updateable.py b/GPy/core/parameterization/updateable.py index 22a57d41..07083ce0 100644 --- a/GPy/core/parameterization/updateable.py +++ b/GPy/core/parameterization/updateable.py @@ -34,15 +34,6 @@ class Updateable(Observable): p.traverse(turn_updates) self.trigger_update() - @property - def _update_on(self): - if not hasattr(self, '__update_on'): - self.__update_on = True - return self.__update_on - @_update_on.setter - def _update_on(self, update): - self.__update_on = update - def toggle_update(self): print("deprecated: toggle_update was renamed to update_toggle for easier access") self.update_toggle() diff --git a/GPy/inference/optimization/optimization.py b/GPy/inference/optimization/optimization.py index 7f80971c..1052e909 100644 --- a/GPy/inference/optimization/optimization.py +++ b/GPy/inference/optimization/optimization.py @@ -27,7 +27,7 @@ class Optimizer(object): :rtype: optimizer object. """ - def __init__(self, x_init=None, messages=False, model=None, max_f_eval=1e4, max_iters=1e3, + def __init__(self, x_init, messages=False, model=None, max_f_eval=1e4, max_iters=1e3, ftol=None, gtol=None, xtol=None, bfgs_factor=None): self.opt_name = None self.x_init = x_init @@ -133,7 +133,7 @@ class opt_lbfgsb(Optimizer): #a more helpful error message is available in opt_result in the Error case if opt_result[2]['warnflag']==2: self.status = 'Error' + str(opt_result[2]['task']) - + class opt_bfgs(Optimizer): def __init__(self, *args, **kwargs): Optimizer.__init__(self, *args, **kwargs) @@ -245,7 +245,7 @@ class opt_SCG(Optimizer): self.f_opt = self.trace[-1] self.funct_eval = opt_result[2] self.status = opt_result[3] - + class Opt_Adadelta(Optimizer): def __init__(self, step_rate=0.1, decay=0.9, momentum=0, *args, **kwargs): Optimizer.__init__(self, *args, **kwargs) @@ -256,11 +256,11 @@ class Opt_Adadelta(Optimizer): def opt(self, f_fp=None, f=None, fp=None): assert not fp is None - + import climin - + opt = climin.adadelta.Adadelta(self.x_init, fp, step_rate=self.step_rate, decay=self.decay, momentum=self.momentum) - + for info in opt: if info['n_iter']>=self.max_iters: self.x_opt = opt.wrt diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index 05540a8c..f8f7d016 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -1,8 +1,10 @@ """ Kernel module the kernels to sit in. +.. automodule:: .src + :members: + :private-members: """ -from . import src from .src.kern import Kern from .src.add import Add from .src.prod import Prod diff --git a/GPy/kern/src/kern.py b/GPy/kern/src/kern.py index da4c17d5..4d535b60 100644 --- a/GPy/kern/src/kern.py +++ b/GPy/kern/src/kern.py @@ -54,22 +54,13 @@ class Kern(Parameterized): self.active_dims = active_dims self._all_dims_active = np.atleast_1d(active_dims).astype(int) - assert self._all_dims_active.size == self.input_dim, "input_dim={} does not match len(active_dim)={}, active_dim={}".format(self.input_dim, self._all_dims_active.size, self._all_dims_active) + assert self._all_dims_active.size == self.input_dim, "input_dim={} does not match len(active_dim)={}, _all_dims_active={}".format(self.input_dim, self._all_dims_active.size, self._all_dims_active) self._sliced_X = 0 self.useGPU = self._support_GPU and useGPU from .psi_comp import PSICOMP_GH - self.psicomp = PSICOMP_GH() - - @property - def _all_dims_active(self): - if not hasattr(self, '__all_dims_active'): - self.__all_dims_active = np.asanyarray(self.active_dims) - return self.__all_dims_active - @_all_dims_active.setter - def _all_dims_active(self, active_dims): - self.__all_dims_active = np.asanyarray(active_dims) + self.psicomp = PSICOMP_GH() @property def _effective_input_dim(self): @@ -220,15 +211,15 @@ class Kern(Parameterized): def get_most_significant_input_dimensions(self, which_indices=None): """ Determine which dimensions should be plotted - + Returns the top three most signification input dimensions - + if less then three dimensions, the non existing dimensions are labeled as None, so for a 1 dimensional input this returns (0, None, None). - - :param which_indices: force the indices to be the given indices. - :type which_indices: int or tuple(int,int) or tuple(int,int,int) + + :param which_indices: force the indices to be the given indices. + :type which_indices: int or tuple(int,int) or tuple(int,int,int) """ if which_indices is None: which_indices = np.argsort(self.input_sensitivity())[::-1][:3] @@ -244,7 +235,7 @@ class Kern(Parameterized): input_1, input_2 = which_indices, None except ValueError: # which_indices was a list or array like with only one int - input_1, input_2 = which_indices[0], None + input_1, input_2 = which_indices[0], None return input_1, input_2, input_3 diff --git a/GPy/kern/src/rbf.py b/GPy/kern/src/rbf.py index ce422816..3607bea9 100644 --- a/GPy/kern/src/rbf.py +++ b/GPy/kern/src/rbf.py @@ -47,13 +47,12 @@ class RBF(Stationary): return dc def __setstate__(self, state): - self.use_invLengthscale = False return super(RBF, self).__setstate__(state) def spectrum(self, omega): assert self.input_dim == 1 #TODO: higher dim spectra? return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2) - + def parameters_changed(self): if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200) super(RBF,self).parameters_changed() @@ -86,7 +85,7 @@ class RBF(Stationary): def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:] - + def update_gradients_diag(self, dL_dKdiag, X): super(RBF,self).update_gradients_diag(dL_dKdiag, X) if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)