mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-07 19:12:40 +02:00
[pickling] _src -> src
This commit is contained in:
parent
b236896fbd
commit
4cd16a86b4
9 changed files with 69 additions and 34 deletions
|
|
@ -40,18 +40,28 @@ def load(file_or_path):
|
||||||
|
|
||||||
:param file_name: path/to/file.pickle
|
:param file_name: path/to/file.pickle
|
||||||
"""
|
"""
|
||||||
|
# This is the pickling pain when changing _src -> src
|
||||||
try:
|
try:
|
||||||
import cPickle as pickle
|
try:
|
||||||
if isinstance(file_or_path, basestring):
|
import cPickle as pickle
|
||||||
with open(file_or_path, 'rb') as f:
|
if isinstance(file_or_path, basestring):
|
||||||
m = pickle.load(f)
|
with open(file_or_path, 'rb') as f:
|
||||||
else:
|
m = pickle.load(f)
|
||||||
m = pickle.load(file_or_path)
|
else:
|
||||||
except:
|
m = pickle.load(file_or_path)
|
||||||
import pickle
|
except:
|
||||||
if isinstance(file_or_path, str):
|
import pickle
|
||||||
with open(file_or_path, 'rb') as f:
|
if isinstance(file_or_path, str):
|
||||||
m = pickle.load(f)
|
with open(file_or_path, 'rb') as f:
|
||||||
else:
|
m = pickle.load(f)
|
||||||
m = pickle.load(file_or_path)
|
else:
|
||||||
|
m = pickle.load(file_or_path)
|
||||||
|
except ImportError:
|
||||||
|
import sys
|
||||||
|
import inspect
|
||||||
|
sys.modules['GPy.kern._src'] = kern.src
|
||||||
|
for name, module in inspect.getmembers(kern.src):
|
||||||
|
if not name.startswith('_'):
|
||||||
|
sys.modules['GPy.kern._src.{}'.format(name)] = module
|
||||||
|
m = load(file_or_path)
|
||||||
return m
|
return m
|
||||||
|
|
|
||||||
|
|
@ -121,6 +121,10 @@ class GP(Model):
|
||||||
# W_{pp} := \texttt{Woodbury inv}
|
# W_{pp} := \texttt{Woodbury inv}
|
||||||
# p := _predictive_variable
|
# p := _predictive_variable
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
self.mean_function = None
|
||||||
|
super(GP, self).__setstate__(state)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _predictive_variable(self):
|
def _predictive_variable(self):
|
||||||
return self.X
|
return self.X
|
||||||
|
|
@ -459,7 +463,7 @@ class GP(Model):
|
||||||
m, v = self._raw_predict(X, full_cov=full_cov, **predict_kwargs)
|
m, v = self._raw_predict(X, full_cov=full_cov, **predict_kwargs)
|
||||||
if self.normalizer is not None:
|
if self.normalizer is not None:
|
||||||
m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
|
m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
|
||||||
|
|
||||||
def sim_one_dim(m, v):
|
def sim_one_dim(m, v):
|
||||||
if not full_cov:
|
if not full_cov:
|
||||||
return np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T
|
return np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,10 @@ class Model(Parameterized):
|
||||||
self.obj_grads = None
|
self.obj_grads = None
|
||||||
self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500)
|
self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500)
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
self.obj_grads = None
|
||||||
|
super(Model, self).__setstate__(state)
|
||||||
|
|
||||||
def log_likelihood(self):
|
def log_likelihood(self):
|
||||||
raise NotImplementedError("this needs to be implemented to use the model class")
|
raise NotImplementedError("this needs to be implemented to use the model class")
|
||||||
def _log_likelihood_gradients(self):
|
def _log_likelihood_gradients(self):
|
||||||
|
|
|
||||||
|
|
@ -90,7 +90,7 @@ class Param(Parameterizable, ObsAr):
|
||||||
self._original_ = getattr(obj, '_original_', None)
|
self._original_ = getattr(obj, '_original_', None)
|
||||||
self._name = getattr(obj, '_name', None)
|
self._name = getattr(obj, '_name', None)
|
||||||
self._gradient_array_ = getattr(obj, '_gradient_array_', None)
|
self._gradient_array_ = getattr(obj, '_gradient_array_', None)
|
||||||
self._update_on = getattr(obj, '_update_on', None)
|
self.__update_on = getattr(obj, '__update_on', None)
|
||||||
self.constraints = getattr(obj, 'constraints', None)
|
self.constraints = getattr(obj, 'constraints', None)
|
||||||
self.priors = getattr(obj, 'priors', None)
|
self.priors = getattr(obj, 'priors', None)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,15 @@ class Updateable(Observable):
|
||||||
p.traverse(turn_updates)
|
p.traverse(turn_updates)
|
||||||
self.trigger_update()
|
self.trigger_update()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _update_on(self):
|
||||||
|
if not hasattr(self, '__update_on'):
|
||||||
|
self.__update_on = True
|
||||||
|
return self.__update_on
|
||||||
|
@_update_on.setter
|
||||||
|
def _update_on(self, update):
|
||||||
|
self.__update_on = update
|
||||||
|
|
||||||
def toggle_update(self):
|
def toggle_update(self):
|
||||||
print("deprecated: toggle_update was renamed to update_toggle for easier access")
|
print("deprecated: toggle_update was renamed to update_toggle for easier access")
|
||||||
self.update_toggle()
|
self.update_toggle()
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ class Optimizer(object):
|
||||||
:rtype: optimizer object.
|
:rtype: optimizer object.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, x_init, messages=False, model=None, max_f_eval=1e4, max_iters=1e3,
|
def __init__(self, x_init=None, messages=False, model=None, max_f_eval=1e4, max_iters=1e3,
|
||||||
ftol=None, gtol=None, xtol=None, bfgs_factor=None):
|
ftol=None, gtol=None, xtol=None, bfgs_factor=None):
|
||||||
self.opt_name = None
|
self.opt_name = None
|
||||||
self.x_init = x_init
|
self.x_init = x_init
|
||||||
|
|
@ -133,7 +133,7 @@ class opt_lbfgsb(Optimizer):
|
||||||
#a more helpful error message is available in opt_result in the Error case
|
#a more helpful error message is available in opt_result in the Error case
|
||||||
if opt_result[2]['warnflag']==2:
|
if opt_result[2]['warnflag']==2:
|
||||||
self.status = 'Error' + str(opt_result[2]['task'])
|
self.status = 'Error' + str(opt_result[2]['task'])
|
||||||
|
|
||||||
class opt_bfgs(Optimizer):
|
class opt_bfgs(Optimizer):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
Optimizer.__init__(self, *args, **kwargs)
|
Optimizer.__init__(self, *args, **kwargs)
|
||||||
|
|
@ -245,7 +245,7 @@ class opt_SCG(Optimizer):
|
||||||
self.f_opt = self.trace[-1]
|
self.f_opt = self.trace[-1]
|
||||||
self.funct_eval = opt_result[2]
|
self.funct_eval = opt_result[2]
|
||||||
self.status = opt_result[3]
|
self.status = opt_result[3]
|
||||||
|
|
||||||
class Opt_Adadelta(Optimizer):
|
class Opt_Adadelta(Optimizer):
|
||||||
def __init__(self, step_rate=0.1, decay=0.9, momentum=0, *args, **kwargs):
|
def __init__(self, step_rate=0.1, decay=0.9, momentum=0, *args, **kwargs):
|
||||||
Optimizer.__init__(self, *args, **kwargs)
|
Optimizer.__init__(self, *args, **kwargs)
|
||||||
|
|
@ -256,11 +256,11 @@ class Opt_Adadelta(Optimizer):
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, f_fp=None, f=None, fp=None):
|
||||||
assert not fp is None
|
assert not fp is None
|
||||||
|
|
||||||
import climin
|
import climin
|
||||||
|
|
||||||
opt = climin.adadelta.Adadelta(self.x_init, fp, step_rate=self.step_rate, decay=self.decay, momentum=self.momentum)
|
opt = climin.adadelta.Adadelta(self.x_init, fp, step_rate=self.step_rate, decay=self.decay, momentum=self.momentum)
|
||||||
|
|
||||||
for info in opt:
|
for info in opt:
|
||||||
if info['n_iter']>=self.max_iters:
|
if info['n_iter']>=self.max_iters:
|
||||||
self.x_opt = opt.wrt
|
self.x_opt = opt.wrt
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,8 @@
|
||||||
"""
|
"""
|
||||||
Kernel module the kernels to sit in.
|
Kernel module the kernels to sit in.
|
||||||
|
|
||||||
.. automodule:: .src
|
|
||||||
:members:
|
|
||||||
:private-members:
|
|
||||||
"""
|
"""
|
||||||
|
from . import src
|
||||||
from .src.kern import Kern
|
from .src.kern import Kern
|
||||||
from .src.add import Add
|
from .src.add import Add
|
||||||
from .src.prod import Prod
|
from .src.prod import Prod
|
||||||
|
|
|
||||||
|
|
@ -54,13 +54,22 @@ class Kern(Parameterized):
|
||||||
self.active_dims = active_dims
|
self.active_dims = active_dims
|
||||||
self._all_dims_active = np.atleast_1d(active_dims).astype(int)
|
self._all_dims_active = np.atleast_1d(active_dims).astype(int)
|
||||||
|
|
||||||
assert self._all_dims_active.size == self.input_dim, "input_dim={} does not match len(active_dim)={}, _all_dims_active={}".format(self.input_dim, self._all_dims_active.size, self._all_dims_active)
|
assert self._all_dims_active.size == self.input_dim, "input_dim={} does not match len(active_dim)={}, active_dim={}".format(self.input_dim, self._all_dims_active.size, self._all_dims_active)
|
||||||
|
|
||||||
self._sliced_X = 0
|
self._sliced_X = 0
|
||||||
self.useGPU = self._support_GPU and useGPU
|
self.useGPU = self._support_GPU and useGPU
|
||||||
|
|
||||||
from .psi_comp import PSICOMP_GH
|
from .psi_comp import PSICOMP_GH
|
||||||
self.psicomp = PSICOMP_GH()
|
self.psicomp = PSICOMP_GH()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _all_dims_active(self):
|
||||||
|
if not hasattr(self, '__all_dims_active'):
|
||||||
|
self.__all_dims_active = np.asanyarray(self.active_dims)
|
||||||
|
return self.__all_dims_active
|
||||||
|
@_all_dims_active.setter
|
||||||
|
def _all_dims_active(self, active_dims):
|
||||||
|
self.__all_dims_active = np.asanyarray(active_dims)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _effective_input_dim(self):
|
def _effective_input_dim(self):
|
||||||
|
|
@ -211,15 +220,15 @@ class Kern(Parameterized):
|
||||||
def get_most_significant_input_dimensions(self, which_indices=None):
|
def get_most_significant_input_dimensions(self, which_indices=None):
|
||||||
"""
|
"""
|
||||||
Determine which dimensions should be plotted
|
Determine which dimensions should be plotted
|
||||||
|
|
||||||
Returns the top three most signification input dimensions
|
Returns the top three most signification input dimensions
|
||||||
|
|
||||||
if less then three dimensions, the non existing dimensions are
|
if less then three dimensions, the non existing dimensions are
|
||||||
labeled as None, so for a 1 dimensional input this returns
|
labeled as None, so for a 1 dimensional input this returns
|
||||||
(0, None, None).
|
(0, None, None).
|
||||||
|
|
||||||
:param which_indices: force the indices to be the given indices.
|
:param which_indices: force the indices to be the given indices.
|
||||||
:type which_indices: int or tuple(int,int) or tuple(int,int,int)
|
:type which_indices: int or tuple(int,int) or tuple(int,int,int)
|
||||||
"""
|
"""
|
||||||
if which_indices is None:
|
if which_indices is None:
|
||||||
which_indices = np.argsort(self.input_sensitivity())[::-1][:3]
|
which_indices = np.argsort(self.input_sensitivity())[::-1][:3]
|
||||||
|
|
@ -235,7 +244,7 @@ class Kern(Parameterized):
|
||||||
input_1, input_2 = which_indices, None
|
input_1, input_2 = which_indices, None
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# which_indices was a list or array like with only one int
|
# which_indices was a list or array like with only one int
|
||||||
input_1, input_2 = which_indices[0], None
|
input_1, input_2 = which_indices[0], None
|
||||||
return input_1, input_2, input_3
|
return input_1, input_2, input_3
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -47,12 +47,13 @@ class RBF(Stationary):
|
||||||
return dc
|
return dc
|
||||||
|
|
||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
|
self.use_invLengthscale = False
|
||||||
return super(RBF, self).__setstate__(state)
|
return super(RBF, self).__setstate__(state)
|
||||||
|
|
||||||
def spectrum(self, omega):
|
def spectrum(self, omega):
|
||||||
assert self.input_dim == 1 #TODO: higher dim spectra?
|
assert self.input_dim == 1 #TODO: higher dim spectra?
|
||||||
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
|
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200)
|
if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200)
|
||||||
super(RBF,self).parameters_changed()
|
super(RBF,self).parameters_changed()
|
||||||
|
|
@ -85,7 +86,7 @@ class RBF(Stationary):
|
||||||
|
|
||||||
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||||
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]
|
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]
|
||||||
|
|
||||||
def update_gradients_diag(self, dL_dKdiag, X):
|
def update_gradients_diag(self, dL_dKdiag, X):
|
||||||
super(RBF,self).update_gradients_diag(dL_dKdiag, X)
|
super(RBF,self).update_gradients_diag(dL_dKdiag, X)
|
||||||
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
|
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue