[pickling] _src -> src

This commit is contained in:
Max Zwiessele 2015-10-16 14:56:32 +01:00
parent b236896fbd
commit 4cd16a86b4
9 changed files with 69 additions and 34 deletions

View file

@ -40,18 +40,28 @@ def load(file_or_path):
:param file_name: path/to/file.pickle
"""
# This is the pickling pain when changing _src -> src
try:
import cPickle as pickle
if isinstance(file_or_path, basestring):
with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
except:
import pickle
if isinstance(file_or_path, str):
with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
try:
import cPickle as pickle
if isinstance(file_or_path, basestring):
with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
except:
import pickle
if isinstance(file_or_path, str):
with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
except ImportError:
import sys
import inspect
sys.modules['GPy.kern._src'] = kern.src
for name, module in inspect.getmembers(kern.src):
if not name.startswith('_'):
sys.modules['GPy.kern._src.{}'.format(name)] = module
m = load(file_or_path)
return m

View file

@ -121,6 +121,10 @@ class GP(Model):
# W_{pp} := \texttt{Woodbury inv}
# p := _predictive_variable
def __setstate__(self, state):
self.mean_function = None
super(GP, self).__setstate__(state)
@property
def _predictive_variable(self):
return self.X
@ -459,7 +463,7 @@ class GP(Model):
m, v = self._raw_predict(X, full_cov=full_cov, **predict_kwargs)
if self.normalizer is not None:
m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
def sim_one_dim(m, v):
if not full_cov:
return np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T

View file

@ -30,6 +30,10 @@ class Model(Parameterized):
self.obj_grads = None
self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500)
def __setstate__(self, state):
self.obj_grads = None
super(Model, self).__setstate__(state)
def log_likelihood(self):
raise NotImplementedError("this needs to be implemented to use the model class")
def _log_likelihood_gradients(self):

View file

@ -90,7 +90,7 @@ class Param(Parameterizable, ObsAr):
self._original_ = getattr(obj, '_original_', None)
self._name = getattr(obj, '_name', None)
self._gradient_array_ = getattr(obj, '_gradient_array_', None)
self._update_on = getattr(obj, '_update_on', None)
self.__update_on = getattr(obj, '__update_on', None)
self.constraints = getattr(obj, 'constraints', None)
self.priors = getattr(obj, 'priors', None)

View file

@ -34,6 +34,15 @@ class Updateable(Observable):
p.traverse(turn_updates)
self.trigger_update()
@property
def _update_on(self):
if not hasattr(self, '__update_on'):
self.__update_on = True
return self.__update_on
@_update_on.setter
def _update_on(self, update):
self.__update_on = update
def toggle_update(self):
print("deprecated: toggle_update was renamed to update_toggle for easier access")
self.update_toggle()

View file

@ -27,7 +27,7 @@ class Optimizer(object):
:rtype: optimizer object.
"""
def __init__(self, x_init, messages=False, model=None, max_f_eval=1e4, max_iters=1e3,
def __init__(self, x_init=None, messages=False, model=None, max_f_eval=1e4, max_iters=1e3,
ftol=None, gtol=None, xtol=None, bfgs_factor=None):
self.opt_name = None
self.x_init = x_init
@ -133,7 +133,7 @@ class opt_lbfgsb(Optimizer):
#a more helpful error message is available in opt_result in the Error case
if opt_result[2]['warnflag']==2:
self.status = 'Error' + str(opt_result[2]['task'])
class opt_bfgs(Optimizer):
def __init__(self, *args, **kwargs):
Optimizer.__init__(self, *args, **kwargs)
@ -245,7 +245,7 @@ class opt_SCG(Optimizer):
self.f_opt = self.trace[-1]
self.funct_eval = opt_result[2]
self.status = opt_result[3]
class Opt_Adadelta(Optimizer):
def __init__(self, step_rate=0.1, decay=0.9, momentum=0, *args, **kwargs):
Optimizer.__init__(self, *args, **kwargs)
@ -256,11 +256,11 @@ class Opt_Adadelta(Optimizer):
def opt(self, f_fp=None, f=None, fp=None):
assert not fp is None
import climin
opt = climin.adadelta.Adadelta(self.x_init, fp, step_rate=self.step_rate, decay=self.decay, momentum=self.momentum)
for info in opt:
if info['n_iter']>=self.max_iters:
self.x_opt = opt.wrt

View file

@ -1,10 +1,8 @@
"""
Kernel module the kernels to sit in.
.. automodule:: .src
:members:
:private-members:
"""
from . import src
from .src.kern import Kern
from .src.add import Add
from .src.prod import Prod

View file

@ -54,13 +54,22 @@ class Kern(Parameterized):
self.active_dims = active_dims
self._all_dims_active = np.atleast_1d(active_dims).astype(int)
assert self._all_dims_active.size == self.input_dim, "input_dim={} does not match len(active_dim)={}, _all_dims_active={}".format(self.input_dim, self._all_dims_active.size, self._all_dims_active)
assert self._all_dims_active.size == self.input_dim, "input_dim={} does not match len(active_dim)={}, active_dim={}".format(self.input_dim, self._all_dims_active.size, self._all_dims_active)
self._sliced_X = 0
self.useGPU = self._support_GPU and useGPU
from .psi_comp import PSICOMP_GH
self.psicomp = PSICOMP_GH()
self.psicomp = PSICOMP_GH()
@property
def _all_dims_active(self):
if not hasattr(self, '__all_dims_active'):
self.__all_dims_active = np.asanyarray(self.active_dims)
return self.__all_dims_active
@_all_dims_active.setter
def _all_dims_active(self, active_dims):
self.__all_dims_active = np.asanyarray(active_dims)
@property
def _effective_input_dim(self):
@ -211,15 +220,15 @@ class Kern(Parameterized):
def get_most_significant_input_dimensions(self, which_indices=None):
"""
Determine which dimensions should be plotted
Returns the top three most signification input dimensions
if less then three dimensions, the non existing dimensions are
labeled as None, so for a 1 dimensional input this returns
(0, None, None).
:param which_indices: force the indices to be the given indices.
:type which_indices: int or tuple(int,int) or tuple(int,int,int)
:param which_indices: force the indices to be the given indices.
:type which_indices: int or tuple(int,int) or tuple(int,int,int)
"""
if which_indices is None:
which_indices = np.argsort(self.input_sensitivity())[::-1][:3]
@ -235,7 +244,7 @@ class Kern(Parameterized):
input_1, input_2 = which_indices, None
except ValueError:
# which_indices was a list or array like with only one int
input_1, input_2 = which_indices[0], None
input_1, input_2 = which_indices[0], None
return input_1, input_2, input_3

View file

@ -47,12 +47,13 @@ class RBF(Stationary):
return dc
def __setstate__(self, state):
self.use_invLengthscale = False
return super(RBF, self).__setstate__(state)
def spectrum(self, omega):
assert self.input_dim == 1 #TODO: higher dim spectra?
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
def parameters_changed(self):
if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200)
super(RBF,self).parameters_changed()
@ -85,7 +86,7 @@ class RBF(Stationary):
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]
def update_gradients_diag(self, dL_dKdiag, X):
super(RBF,self).update_gradients_diag(dL_dKdiag, X)
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)