From 850c10beaadaec12d18a140a342ee7622179a5ed Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 9 Nov 2015 10:09:07 +0000 Subject: [PATCH] [pickle] load errors bc of kernel changes, backwards compatibility issues fixed --- GPy/__init__.py | 23 +--------- GPy/inference/optimization/optimization.py | 49 ++++++++++++---------- GPy/kern/src/kern.py | 18 ++++---- GPy/kern/src/rbf.py | 5 ++- 4 files changed, 42 insertions(+), 53 deletions(-) diff --git a/GPy/__init__.py b/GPy/__init__.py index ea4fe4c5..fab9bc32 100644 --- a/GPy/__init__.py +++ b/GPy/__init__.py @@ -52,24 +52,5 @@ def load(file_or_path): for name, module in inspect.getmembers(kern.src): # @UndefinedVariable if not name.startswith('_'): sys.modules['GPy.kern._src.{}'.format(name)] = module - try: - import cPickle as pickle - if isinstance(file_or_path, basestring): - with open(file_or_path, 'rb') as f: - m = pickle.load(f) - else: - m = pickle.load(file_or_path) - except: # python3 - import pickle # @Reimport - if isinstance(file_or_path, str): - with open(file_or_path, 'rb') as f: - #u = pickle._Unpickler(f) # @UndefinedVariable - #u.encoding = 'latin1' - #m = u.load() - m = pickle.load(f, encoding='latin1')# - else: - #u = pickle._Unpickler(file_or_path) # @UndefinedVariable - #u.encoding = 'latin1' - #m = u.load(protocol=2) - m = pickle.load(f, encoding='latin1')# - return m + import paramz + return paramz.load(file_or_path) \ No newline at end of file diff --git a/GPy/inference/optimization/optimization.py b/GPy/inference/optimization/optimization.py index 1052e909..27b036c1 100644 --- a/GPy/inference/optimization/optimization.py +++ b/GPy/inference/optimization/optimization.py @@ -27,10 +27,10 @@ class Optimizer(object): :rtype: optimizer object. """ - def __init__(self, x_init, messages=False, model=None, max_f_eval=1e4, max_iters=1e3, + def __init__(self, x_init=None, messages=False, max_f_eval=1e4, max_iters=1e3, ftol=None, gtol=None, xtol=None, bfgs_factor=None): self.opt_name = None - self.x_init = x_init + #x_init = x_init # Turning messages off and using internal structure for print outs: self.messages = False #messages self.f_opt = None @@ -45,7 +45,6 @@ class Optimizer(object): self.xtol = xtol self.gtol = gtol self.ftol = ftol - self.model = model def run(self, **kwargs): start = dt.datetime.now() @@ -53,7 +52,7 @@ class Optimizer(object): end = dt.datetime.now() self.time = str(end - start) - def opt(self, f_fp=None, f=None, fp=None): + def opt(self, x_init, f_fp=None, f=None, fp=None): raise NotImplementedError("this needs to be implemented to use the optimizer class") def __str__(self): @@ -64,12 +63,16 @@ class Optimizer(object): diagnostics += "Time elapsed: \t\t\t\t %s\n" % self.time return diagnostics + def __getstate__(self): + return [] + + class opt_tnc(Optimizer): def __init__(self, *args, **kwargs): Optimizer.__init__(self, *args, **kwargs) self.opt_name = "TNC (Scipy implementation)" - def opt(self, f_fp=None, f=None, fp=None): + def opt(self, x_init, f_fp=None, f=None, fp=None): """ Run the TNC optimizer @@ -87,7 +90,7 @@ class opt_tnc(Optimizer): if self.gtol is not None: opt_dict['pgtol'] = self.gtol - opt_result = optimize.fmin_tnc(f_fp, self.x_init, messages=self.messages, + opt_result = optimize.fmin_tnc(f_fp, x_init, messages=self.messages, maxfun=self.max_f_eval, **opt_dict) self.x_opt = opt_result[0] self.f_opt = f_fp(self.x_opt)[0] @@ -99,7 +102,7 @@ class opt_lbfgsb(Optimizer): Optimizer.__init__(self, *args, **kwargs) self.opt_name = "L-BFGS-B (Scipy implementation)" - def opt(self, f_fp=None, f=None, fp=None): + def opt(self, x_init, f_fp=None, f=None, fp=None): """ Run the optimizer @@ -123,7 +126,7 @@ class opt_lbfgsb(Optimizer): if self.bfgs_factor is not None: opt_dict['factr'] = self.bfgs_factor - opt_result = optimize.fmin_l_bfgs_b(f_fp, self.x_init, iprint=iprint, + opt_result = optimize.fmin_l_bfgs_b(f_fp, x_init, iprint=iprint, maxfun=self.max_iters, **opt_dict) self.x_opt = opt_result[0] self.f_opt = f_fp(self.x_opt)[0] @@ -133,13 +136,13 @@ class opt_lbfgsb(Optimizer): #a more helpful error message is available in opt_result in the Error case if opt_result[2]['warnflag']==2: self.status = 'Error' + str(opt_result[2]['task']) - + class opt_bfgs(Optimizer): def __init__(self, *args, **kwargs): Optimizer.__init__(self, *args, **kwargs) self.opt_name = "BFGS (Scipy implementation)" - def opt(self, f_fp=None, f=None, fp=None): + def opt(self, x_init, f_fp=None, f=None, fp=None): """ Run the optimizer @@ -154,7 +157,7 @@ class opt_bfgs(Optimizer): if self.gtol is not None: opt_dict['pgtol'] = self.gtol - opt_result = optimize.fmin_bfgs(f, self.x_init, fp, disp=self.messages, + opt_result = optimize.fmin_bfgs(f, x_init, fp, disp=self.messages, maxiter=self.max_iters, full_output=True, **opt_dict) self.x_opt = opt_result[0] self.f_opt = f_fp(self.x_opt)[0] @@ -166,7 +169,7 @@ class opt_simplex(Optimizer): Optimizer.__init__(self, *args, **kwargs) self.opt_name = "Nelder-Mead simplex routine (via Scipy)" - def opt(self, f_fp=None, f=None, fp=None): + def opt(self, x_init, f_fp=None, f=None, fp=None): """ The simplex optimizer does not require gradients. """ @@ -181,7 +184,7 @@ class opt_simplex(Optimizer): if self.gtol is not None: print("WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it") - opt_result = optimize.fmin(f, self.x_init, (), disp=self.messages, + opt_result = optimize.fmin(f, x_init, (), disp=self.messages, maxfun=self.max_f_eval, full_output=True, **opt_dict) self.x_opt = opt_result[0] @@ -196,7 +199,7 @@ class opt_rasm(Optimizer): Optimizer.__init__(self, *args, **kwargs) self.opt_name = "Rasmussen's Conjugate Gradient" - def opt(self, f_fp=None, f=None, fp=None): + def opt(self, x_init, f_fp=None, f=None, fp=None): """ Run Rasmussen's Conjugate Gradient optimizer """ @@ -213,7 +216,7 @@ class opt_rasm(Optimizer): if self.gtol is not None: print("WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it") - opt_result = rasm.minimize(self.x_init, f_fp, (), messages=self.messages, + opt_result = rasm.minimize(x_init, f_fp, (), messages=self.messages, maxnumfuneval=self.max_f_eval) self.x_opt = opt_result[0] self.f_opt = opt_result[1][-1] @@ -230,11 +233,11 @@ class opt_SCG(Optimizer): self.opt_name = "Scaled Conjugate Gradients" - def opt(self, f_fp=None, f=None, fp=None): + def opt(self, x_init, f_fp=None, f=None, fp=None): assert not f is None assert not fp is None - opt_result = SCG(f, fp, self.x_init, display=self.messages, + opt_result = SCG(f, fp, x_init, display=self.messages, maxiters=self.max_iters, max_f_eval=self.max_f_eval, xtol=self.xtol, ftol=self.ftol, @@ -245,7 +248,7 @@ class opt_SCG(Optimizer): self.f_opt = self.trace[-1] self.funct_eval = opt_result[2] self.status = opt_result[3] - + class Opt_Adadelta(Optimizer): def __init__(self, step_rate=0.1, decay=0.9, momentum=0, *args, **kwargs): Optimizer.__init__(self, *args, **kwargs) @@ -254,13 +257,13 @@ class Opt_Adadelta(Optimizer): self.decay = decay self.momentum = momentum - def opt(self, f_fp=None, f=None, fp=None): + def opt(self, x_init, f_fp=None, f=None, fp=None): assert not fp is None - + import climin - - opt = climin.adadelta.Adadelta(self.x_init, fp, step_rate=self.step_rate, decay=self.decay, momentum=self.momentum) - + + opt = climin.adadelta.Adadelta(x_init, fp, step_rate=self.step_rate, decay=self.decay, momentum=self.momentum) + for info in opt: if info['n_iter']>=self.max_iters: self.x_opt = opt.wrt diff --git a/GPy/kern/src/kern.py b/GPy/kern/src/kern.py index 8e7ab4a4..ad41355f 100644 --- a/GPy/kern/src/kern.py +++ b/GPy/kern/src/kern.py @@ -58,7 +58,11 @@ class Kern(Parameterized): self.useGPU = self._support_GPU and useGPU from .psi_comp import PSICOMP_GH - self.psicomp = PSICOMP_GH() + self.psicomp = PSICOMP_GH() + + def __setstate__(self, state): + self._all_dims_active = range(0, max(state['active_dims'])+1) + super(Kern, self).__setstate__(state) @property def _effective_input_dim(self): @@ -209,15 +213,15 @@ class Kern(Parameterized): def get_most_significant_input_dimensions(self, which_indices=None): """ Determine which dimensions should be plotted - + Returns the top three most signification input dimensions - + if less then three dimensions, the non existing dimensions are labeled as None, so for a 1 dimensional input this returns (0, None, None). - - :param which_indices: force the indices to be the given indices. - :type which_indices: int or tuple(int,int) or tuple(int,int,int) + + :param which_indices: force the indices to be the given indices. + :type which_indices: int or tuple(int,int) or tuple(int,int,int) """ if which_indices is None: which_indices = np.argsort(self.input_sensitivity())[::-1][:3] @@ -233,7 +237,7 @@ class Kern(Parameterized): input_1, input_2 = which_indices, None except ValueError: # which_indices was a list or array like with only one int - input_1, input_2 = which_indices[0], None + input_1, input_2 = which_indices[0], None return input_1, input_2, input_3 diff --git a/GPy/kern/src/rbf.py b/GPy/kern/src/rbf.py index ff3ee277..ff86561d 100644 --- a/GPy/kern/src/rbf.py +++ b/GPy/kern/src/rbf.py @@ -47,12 +47,13 @@ class RBF(Stationary): return dc def __setstate__(self, state): + self.use_invLengthscale = False return super(RBF, self).__setstate__(state) def spectrum(self, omega): assert self.input_dim == 1 #TODO: higher dim spectra? return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2) - + def parameters_changed(self): if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200) super(RBF,self).parameters_changed() @@ -85,7 +86,7 @@ class RBF(Stationary): def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:] - + def update_gradients_diag(self, dL_dKdiag, X): super(RBF,self).update_gradients_diag(dL_dKdiag, X) if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)