mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-07 19:12:40 +02:00
[pickle] load errors bc of kernel changes, backwards compatibility issues fixed
This commit is contained in:
parent
6f9c5042f9
commit
850c10beaa
4 changed files with 42 additions and 53 deletions
|
|
@ -52,24 +52,5 @@ def load(file_or_path):
|
||||||
for name, module in inspect.getmembers(kern.src): # @UndefinedVariable
|
for name, module in inspect.getmembers(kern.src): # @UndefinedVariable
|
||||||
if not name.startswith('_'):
|
if not name.startswith('_'):
|
||||||
sys.modules['GPy.kern._src.{}'.format(name)] = module
|
sys.modules['GPy.kern._src.{}'.format(name)] = module
|
||||||
try:
|
import paramz
|
||||||
import cPickle as pickle
|
return paramz.load(file_or_path)
|
||||||
if isinstance(file_or_path, basestring):
|
|
||||||
with open(file_or_path, 'rb') as f:
|
|
||||||
m = pickle.load(f)
|
|
||||||
else:
|
|
||||||
m = pickle.load(file_or_path)
|
|
||||||
except: # python3
|
|
||||||
import pickle # @Reimport
|
|
||||||
if isinstance(file_or_path, str):
|
|
||||||
with open(file_or_path, 'rb') as f:
|
|
||||||
#u = pickle._Unpickler(f) # @UndefinedVariable
|
|
||||||
#u.encoding = 'latin1'
|
|
||||||
#m = u.load()
|
|
||||||
m = pickle.load(f, encoding='latin1')#
|
|
||||||
else:
|
|
||||||
#u = pickle._Unpickler(file_or_path) # @UndefinedVariable
|
|
||||||
#u.encoding = 'latin1'
|
|
||||||
#m = u.load(protocol=2)
|
|
||||||
m = pickle.load(f, encoding='latin1')#
|
|
||||||
return m
|
|
||||||
|
|
@ -27,10 +27,10 @@ class Optimizer(object):
|
||||||
:rtype: optimizer object.
|
:rtype: optimizer object.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, x_init, messages=False, model=None, max_f_eval=1e4, max_iters=1e3,
|
def __init__(self, x_init=None, messages=False, max_f_eval=1e4, max_iters=1e3,
|
||||||
ftol=None, gtol=None, xtol=None, bfgs_factor=None):
|
ftol=None, gtol=None, xtol=None, bfgs_factor=None):
|
||||||
self.opt_name = None
|
self.opt_name = None
|
||||||
self.x_init = x_init
|
#x_init = x_init
|
||||||
# Turning messages off and using internal structure for print outs:
|
# Turning messages off and using internal structure for print outs:
|
||||||
self.messages = False #messages
|
self.messages = False #messages
|
||||||
self.f_opt = None
|
self.f_opt = None
|
||||||
|
|
@ -45,7 +45,6 @@ class Optimizer(object):
|
||||||
self.xtol = xtol
|
self.xtol = xtol
|
||||||
self.gtol = gtol
|
self.gtol = gtol
|
||||||
self.ftol = ftol
|
self.ftol = ftol
|
||||||
self.model = model
|
|
||||||
|
|
||||||
def run(self, **kwargs):
|
def run(self, **kwargs):
|
||||||
start = dt.datetime.now()
|
start = dt.datetime.now()
|
||||||
|
|
@ -53,7 +52,7 @@ class Optimizer(object):
|
||||||
end = dt.datetime.now()
|
end = dt.datetime.now()
|
||||||
self.time = str(end - start)
|
self.time = str(end - start)
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, x_init, f_fp=None, f=None, fp=None):
|
||||||
raise NotImplementedError("this needs to be implemented to use the optimizer class")
|
raise NotImplementedError("this needs to be implemented to use the optimizer class")
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
|
@ -64,12 +63,16 @@ class Optimizer(object):
|
||||||
diagnostics += "Time elapsed: \t\t\t\t %s\n" % self.time
|
diagnostics += "Time elapsed: \t\t\t\t %s\n" % self.time
|
||||||
return diagnostics
|
return diagnostics
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
class opt_tnc(Optimizer):
|
class opt_tnc(Optimizer):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
Optimizer.__init__(self, *args, **kwargs)
|
Optimizer.__init__(self, *args, **kwargs)
|
||||||
self.opt_name = "TNC (Scipy implementation)"
|
self.opt_name = "TNC (Scipy implementation)"
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, x_init, f_fp=None, f=None, fp=None):
|
||||||
"""
|
"""
|
||||||
Run the TNC optimizer
|
Run the TNC optimizer
|
||||||
|
|
||||||
|
|
@ -87,7 +90,7 @@ class opt_tnc(Optimizer):
|
||||||
if self.gtol is not None:
|
if self.gtol is not None:
|
||||||
opt_dict['pgtol'] = self.gtol
|
opt_dict['pgtol'] = self.gtol
|
||||||
|
|
||||||
opt_result = optimize.fmin_tnc(f_fp, self.x_init, messages=self.messages,
|
opt_result = optimize.fmin_tnc(f_fp, x_init, messages=self.messages,
|
||||||
maxfun=self.max_f_eval, **opt_dict)
|
maxfun=self.max_f_eval, **opt_dict)
|
||||||
self.x_opt = opt_result[0]
|
self.x_opt = opt_result[0]
|
||||||
self.f_opt = f_fp(self.x_opt)[0]
|
self.f_opt = f_fp(self.x_opt)[0]
|
||||||
|
|
@ -99,7 +102,7 @@ class opt_lbfgsb(Optimizer):
|
||||||
Optimizer.__init__(self, *args, **kwargs)
|
Optimizer.__init__(self, *args, **kwargs)
|
||||||
self.opt_name = "L-BFGS-B (Scipy implementation)"
|
self.opt_name = "L-BFGS-B (Scipy implementation)"
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, x_init, f_fp=None, f=None, fp=None):
|
||||||
"""
|
"""
|
||||||
Run the optimizer
|
Run the optimizer
|
||||||
|
|
||||||
|
|
@ -123,7 +126,7 @@ class opt_lbfgsb(Optimizer):
|
||||||
if self.bfgs_factor is not None:
|
if self.bfgs_factor is not None:
|
||||||
opt_dict['factr'] = self.bfgs_factor
|
opt_dict['factr'] = self.bfgs_factor
|
||||||
|
|
||||||
opt_result = optimize.fmin_l_bfgs_b(f_fp, self.x_init, iprint=iprint,
|
opt_result = optimize.fmin_l_bfgs_b(f_fp, x_init, iprint=iprint,
|
||||||
maxfun=self.max_iters, **opt_dict)
|
maxfun=self.max_iters, **opt_dict)
|
||||||
self.x_opt = opt_result[0]
|
self.x_opt = opt_result[0]
|
||||||
self.f_opt = f_fp(self.x_opt)[0]
|
self.f_opt = f_fp(self.x_opt)[0]
|
||||||
|
|
@ -133,13 +136,13 @@ class opt_lbfgsb(Optimizer):
|
||||||
#a more helpful error message is available in opt_result in the Error case
|
#a more helpful error message is available in opt_result in the Error case
|
||||||
if opt_result[2]['warnflag']==2:
|
if opt_result[2]['warnflag']==2:
|
||||||
self.status = 'Error' + str(opt_result[2]['task'])
|
self.status = 'Error' + str(opt_result[2]['task'])
|
||||||
|
|
||||||
class opt_bfgs(Optimizer):
|
class opt_bfgs(Optimizer):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
Optimizer.__init__(self, *args, **kwargs)
|
Optimizer.__init__(self, *args, **kwargs)
|
||||||
self.opt_name = "BFGS (Scipy implementation)"
|
self.opt_name = "BFGS (Scipy implementation)"
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, x_init, f_fp=None, f=None, fp=None):
|
||||||
"""
|
"""
|
||||||
Run the optimizer
|
Run the optimizer
|
||||||
|
|
||||||
|
|
@ -154,7 +157,7 @@ class opt_bfgs(Optimizer):
|
||||||
if self.gtol is not None:
|
if self.gtol is not None:
|
||||||
opt_dict['pgtol'] = self.gtol
|
opt_dict['pgtol'] = self.gtol
|
||||||
|
|
||||||
opt_result = optimize.fmin_bfgs(f, self.x_init, fp, disp=self.messages,
|
opt_result = optimize.fmin_bfgs(f, x_init, fp, disp=self.messages,
|
||||||
maxiter=self.max_iters, full_output=True, **opt_dict)
|
maxiter=self.max_iters, full_output=True, **opt_dict)
|
||||||
self.x_opt = opt_result[0]
|
self.x_opt = opt_result[0]
|
||||||
self.f_opt = f_fp(self.x_opt)[0]
|
self.f_opt = f_fp(self.x_opt)[0]
|
||||||
|
|
@ -166,7 +169,7 @@ class opt_simplex(Optimizer):
|
||||||
Optimizer.__init__(self, *args, **kwargs)
|
Optimizer.__init__(self, *args, **kwargs)
|
||||||
self.opt_name = "Nelder-Mead simplex routine (via Scipy)"
|
self.opt_name = "Nelder-Mead simplex routine (via Scipy)"
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, x_init, f_fp=None, f=None, fp=None):
|
||||||
"""
|
"""
|
||||||
The simplex optimizer does not require gradients.
|
The simplex optimizer does not require gradients.
|
||||||
"""
|
"""
|
||||||
|
|
@ -181,7 +184,7 @@ class opt_simplex(Optimizer):
|
||||||
if self.gtol is not None:
|
if self.gtol is not None:
|
||||||
print("WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it")
|
print("WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it")
|
||||||
|
|
||||||
opt_result = optimize.fmin(f, self.x_init, (), disp=self.messages,
|
opt_result = optimize.fmin(f, x_init, (), disp=self.messages,
|
||||||
maxfun=self.max_f_eval, full_output=True, **opt_dict)
|
maxfun=self.max_f_eval, full_output=True, **opt_dict)
|
||||||
|
|
||||||
self.x_opt = opt_result[0]
|
self.x_opt = opt_result[0]
|
||||||
|
|
@ -196,7 +199,7 @@ class opt_rasm(Optimizer):
|
||||||
Optimizer.__init__(self, *args, **kwargs)
|
Optimizer.__init__(self, *args, **kwargs)
|
||||||
self.opt_name = "Rasmussen's Conjugate Gradient"
|
self.opt_name = "Rasmussen's Conjugate Gradient"
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, x_init, f_fp=None, f=None, fp=None):
|
||||||
"""
|
"""
|
||||||
Run Rasmussen's Conjugate Gradient optimizer
|
Run Rasmussen's Conjugate Gradient optimizer
|
||||||
"""
|
"""
|
||||||
|
|
@ -213,7 +216,7 @@ class opt_rasm(Optimizer):
|
||||||
if self.gtol is not None:
|
if self.gtol is not None:
|
||||||
print("WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it")
|
print("WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it")
|
||||||
|
|
||||||
opt_result = rasm.minimize(self.x_init, f_fp, (), messages=self.messages,
|
opt_result = rasm.minimize(x_init, f_fp, (), messages=self.messages,
|
||||||
maxnumfuneval=self.max_f_eval)
|
maxnumfuneval=self.max_f_eval)
|
||||||
self.x_opt = opt_result[0]
|
self.x_opt = opt_result[0]
|
||||||
self.f_opt = opt_result[1][-1]
|
self.f_opt = opt_result[1][-1]
|
||||||
|
|
@ -230,11 +233,11 @@ class opt_SCG(Optimizer):
|
||||||
|
|
||||||
self.opt_name = "Scaled Conjugate Gradients"
|
self.opt_name = "Scaled Conjugate Gradients"
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, x_init, f_fp=None, f=None, fp=None):
|
||||||
assert not f is None
|
assert not f is None
|
||||||
assert not fp is None
|
assert not fp is None
|
||||||
|
|
||||||
opt_result = SCG(f, fp, self.x_init, display=self.messages,
|
opt_result = SCG(f, fp, x_init, display=self.messages,
|
||||||
maxiters=self.max_iters,
|
maxiters=self.max_iters,
|
||||||
max_f_eval=self.max_f_eval,
|
max_f_eval=self.max_f_eval,
|
||||||
xtol=self.xtol, ftol=self.ftol,
|
xtol=self.xtol, ftol=self.ftol,
|
||||||
|
|
@ -245,7 +248,7 @@ class opt_SCG(Optimizer):
|
||||||
self.f_opt = self.trace[-1]
|
self.f_opt = self.trace[-1]
|
||||||
self.funct_eval = opt_result[2]
|
self.funct_eval = opt_result[2]
|
||||||
self.status = opt_result[3]
|
self.status = opt_result[3]
|
||||||
|
|
||||||
class Opt_Adadelta(Optimizer):
|
class Opt_Adadelta(Optimizer):
|
||||||
def __init__(self, step_rate=0.1, decay=0.9, momentum=0, *args, **kwargs):
|
def __init__(self, step_rate=0.1, decay=0.9, momentum=0, *args, **kwargs):
|
||||||
Optimizer.__init__(self, *args, **kwargs)
|
Optimizer.__init__(self, *args, **kwargs)
|
||||||
|
|
@ -254,13 +257,13 @@ class Opt_Adadelta(Optimizer):
|
||||||
self.decay = decay
|
self.decay = decay
|
||||||
self.momentum = momentum
|
self.momentum = momentum
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, x_init, f_fp=None, f=None, fp=None):
|
||||||
assert not fp is None
|
assert not fp is None
|
||||||
|
|
||||||
import climin
|
import climin
|
||||||
|
|
||||||
opt = climin.adadelta.Adadelta(self.x_init, fp, step_rate=self.step_rate, decay=self.decay, momentum=self.momentum)
|
opt = climin.adadelta.Adadelta(x_init, fp, step_rate=self.step_rate, decay=self.decay, momentum=self.momentum)
|
||||||
|
|
||||||
for info in opt:
|
for info in opt:
|
||||||
if info['n_iter']>=self.max_iters:
|
if info['n_iter']>=self.max_iters:
|
||||||
self.x_opt = opt.wrt
|
self.x_opt = opt.wrt
|
||||||
|
|
|
||||||
|
|
@ -58,7 +58,11 @@ class Kern(Parameterized):
|
||||||
self.useGPU = self._support_GPU and useGPU
|
self.useGPU = self._support_GPU and useGPU
|
||||||
|
|
||||||
from .psi_comp import PSICOMP_GH
|
from .psi_comp import PSICOMP_GH
|
||||||
self.psicomp = PSICOMP_GH()
|
self.psicomp = PSICOMP_GH()
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
self._all_dims_active = range(0, max(state['active_dims'])+1)
|
||||||
|
super(Kern, self).__setstate__(state)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _effective_input_dim(self):
|
def _effective_input_dim(self):
|
||||||
|
|
@ -209,15 +213,15 @@ class Kern(Parameterized):
|
||||||
def get_most_significant_input_dimensions(self, which_indices=None):
|
def get_most_significant_input_dimensions(self, which_indices=None):
|
||||||
"""
|
"""
|
||||||
Determine which dimensions should be plotted
|
Determine which dimensions should be plotted
|
||||||
|
|
||||||
Returns the top three most signification input dimensions
|
Returns the top three most signification input dimensions
|
||||||
|
|
||||||
if less then three dimensions, the non existing dimensions are
|
if less then three dimensions, the non existing dimensions are
|
||||||
labeled as None, so for a 1 dimensional input this returns
|
labeled as None, so for a 1 dimensional input this returns
|
||||||
(0, None, None).
|
(0, None, None).
|
||||||
|
|
||||||
:param which_indices: force the indices to be the given indices.
|
:param which_indices: force the indices to be the given indices.
|
||||||
:type which_indices: int or tuple(int,int) or tuple(int,int,int)
|
:type which_indices: int or tuple(int,int) or tuple(int,int,int)
|
||||||
"""
|
"""
|
||||||
if which_indices is None:
|
if which_indices is None:
|
||||||
which_indices = np.argsort(self.input_sensitivity())[::-1][:3]
|
which_indices = np.argsort(self.input_sensitivity())[::-1][:3]
|
||||||
|
|
@ -233,7 +237,7 @@ class Kern(Parameterized):
|
||||||
input_1, input_2 = which_indices, None
|
input_1, input_2 = which_indices, None
|
||||||
except ValueError:
|
except ValueError:
|
||||||
# which_indices was a list or array like with only one int
|
# which_indices was a list or array like with only one int
|
||||||
input_1, input_2 = which_indices[0], None
|
input_1, input_2 = which_indices[0], None
|
||||||
return input_1, input_2, input_3
|
return input_1, input_2, input_3
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -47,12 +47,13 @@ class RBF(Stationary):
|
||||||
return dc
|
return dc
|
||||||
|
|
||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
|
self.use_invLengthscale = False
|
||||||
return super(RBF, self).__setstate__(state)
|
return super(RBF, self).__setstate__(state)
|
||||||
|
|
||||||
def spectrum(self, omega):
|
def spectrum(self, omega):
|
||||||
assert self.input_dim == 1 #TODO: higher dim spectra?
|
assert self.input_dim == 1 #TODO: higher dim spectra?
|
||||||
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
|
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200)
|
if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200)
|
||||||
super(RBF,self).parameters_changed()
|
super(RBF,self).parameters_changed()
|
||||||
|
|
@ -85,7 +86,7 @@ class RBF(Stationary):
|
||||||
|
|
||||||
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||||
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]
|
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]
|
||||||
|
|
||||||
def update_gradients_diag(self, dL_dKdiag, X):
|
def update_gradients_diag(self, dL_dKdiag, X):
|
||||||
super(RBF,self).update_gradients_diag(dL_dKdiag, X)
|
super(RBF,self).update_gradients_diag(dL_dKdiag, X)
|
||||||
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
|
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue