mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 11:32:39 +02:00
merge GPy upstream
This commit is contained in:
commit
00e4ac152a
9 changed files with 53 additions and 312 deletions
|
|
@ -1 +1 @@
|
|||
__version__ = "0.9.5"
|
||||
__version__ = "0.9.6"
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ class GP(Model):
|
|||
def parameters_changed(self):
|
||||
"""
|
||||
Method that is called upon any changes to :class:`~GPy.core.parameterization.param.Param` variables within the model.
|
||||
In particular in the GP class this method reperforms inference, recalculating the posterior and log marginal likelihood and gradients of the model
|
||||
In particular in the GP class this method re-performs inference, recalculating the posterior and log marginal likelihood and gradients of the model
|
||||
|
||||
.. warning::
|
||||
This method is not designed to be called manually, the framework is set up to automatically call this method upon changes to parameters, if you call
|
||||
|
|
@ -371,13 +371,14 @@ class GP(Model):
|
|||
mean_jac[:,:,i] = kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1].T, Xnew, self._predictive_variable)
|
||||
|
||||
dK_dXnew_full = np.empty((self._predictive_variable.shape[0], Xnew.shape[0], Xnew.shape[1]))
|
||||
one = np.ones((1,1))
|
||||
for i in range(self._predictive_variable.shape[0]):
|
||||
dK_dXnew_full[i] = kern.gradients_X([[1.]], Xnew, self._predictive_variable[[i]])
|
||||
dK_dXnew_full[i] = kern.gradients_X(one, Xnew, self._predictive_variable[[i]])
|
||||
|
||||
if full_cov:
|
||||
dK2_dXdX = kern.gradients_XX([[1.]], Xnew)
|
||||
dK2_dXdX = kern.gradients_XX(one, Xnew)
|
||||
else:
|
||||
dK2_dXdX = kern.gradients_XX_diag([[1.]], Xnew)
|
||||
dK2_dXdX = kern.gradients_XX_diag(one, Xnew)
|
||||
|
||||
def compute_cov_inner(wi):
|
||||
if full_cov:
|
||||
|
|
@ -464,7 +465,7 @@ class GP(Model):
|
|||
m, v = self._raw_predict(X, full_cov=full_cov, **predict_kwargs)
|
||||
if self.normalizer is not None:
|
||||
m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
|
||||
|
||||
|
||||
def sim_one_dim(m, v):
|
||||
if not full_cov:
|
||||
return np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T
|
||||
|
|
|
|||
|
|
@ -1,286 +0,0 @@
|
|||
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import datetime as dt
|
||||
from scipy import optimize
|
||||
from warnings import warn
|
||||
|
||||
try:
|
||||
import rasmussens_minimize as rasm
|
||||
rasm_available = True
|
||||
except ImportError:
|
||||
rasm_available = False
|
||||
from .scg import SCG
|
||||
|
||||
class Optimizer(object):
|
||||
"""
|
||||
Superclass for all the optimizers.
|
||||
|
||||
:param x_init: initial set of parameters
|
||||
:param f_fp: function that returns the function AND the gradients at the same time
|
||||
:param f: function to optimize
|
||||
:param fp: gradients
|
||||
:param messages: print messages from the optimizer?
|
||||
:type messages: (True | False)
|
||||
:param max_f_eval: maximum number of function evaluations
|
||||
|
||||
:rtype: optimizer object.
|
||||
|
||||
"""
|
||||
def __init__(self, x_init, messages=False, model=None, max_f_eval=1e4, max_iters=1e3,
|
||||
ftol=None, gtol=None, xtol=None, bfgs_factor=None):
|
||||
self.opt_name = None
|
||||
self.x_init = x_init
|
||||
# Turning messages off and using internal structure for print outs:
|
||||
self.messages = False #messages
|
||||
self.f_opt = None
|
||||
self.x_opt = None
|
||||
self.funct_eval = None
|
||||
self.status = None
|
||||
self.max_f_eval = int(max_iters)
|
||||
self.max_iters = int(max_iters)
|
||||
self.bfgs_factor = bfgs_factor
|
||||
self.trace = None
|
||||
self.time = "Not available"
|
||||
self.xtol = xtol
|
||||
self.gtol = gtol
|
||||
self.ftol = ftol
|
||||
self.model = model
|
||||
|
||||
def run(self, **kwargs):
|
||||
start = dt.datetime.now()
|
||||
self.opt(**kwargs)
|
||||
end = dt.datetime.now()
|
||||
self.time = str(end - start)
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
raise NotImplementedError("this needs to be implemented to use the optimizer class")
|
||||
|
||||
def __str__(self):
|
||||
diagnostics = "Optimizer: \t\t\t\t %s\n" % self.opt_name
|
||||
diagnostics += "f(x_opt): \t\t\t\t %.3f\n" % self.f_opt
|
||||
diagnostics += "Number of function evaluations: \t %d\n" % self.funct_eval
|
||||
diagnostics += "Optimization status: \t\t\t %s\n" % self.status
|
||||
diagnostics += "Time elapsed: \t\t\t\t %s\n" % self.time
|
||||
return diagnostics
|
||||
|
||||
class opt_tnc(Optimizer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Optimizer.__init__(self, *args, **kwargs)
|
||||
self.opt_name = "TNC (Scipy implementation)"
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
"""
|
||||
Run the TNC optimizer
|
||||
|
||||
"""
|
||||
tnc_rcstrings = ['Local minimum', 'Converged', 'XConverged', 'Maximum number of f evaluations reached',
|
||||
'Line search failed', 'Function is constant']
|
||||
|
||||
assert f_fp != None, "TNC requires f_fp"
|
||||
|
||||
opt_dict = {}
|
||||
if self.xtol is not None:
|
||||
opt_dict['xtol'] = self.xtol
|
||||
if self.ftol is not None:
|
||||
opt_dict['ftol'] = self.ftol
|
||||
if self.gtol is not None:
|
||||
opt_dict['pgtol'] = self.gtol
|
||||
|
||||
opt_result = optimize.fmin_tnc(f_fp, self.x_init, messages=self.messages,
|
||||
maxfun=self.max_f_eval, **opt_dict)
|
||||
self.x_opt = opt_result[0]
|
||||
self.f_opt = f_fp(self.x_opt)[0]
|
||||
self.funct_eval = opt_result[1]
|
||||
self.status = tnc_rcstrings[opt_result[2]]
|
||||
|
||||
class opt_lbfgsb(Optimizer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Optimizer.__init__(self, *args, **kwargs)
|
||||
self.opt_name = "L-BFGS-B (Scipy implementation)"
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
"""
|
||||
Run the optimizer
|
||||
|
||||
"""
|
||||
rcstrings = ['Converged', 'Maximum number of f evaluations reached', 'Error']
|
||||
|
||||
assert f_fp != None, "BFGS requires f_fp"
|
||||
|
||||
if self.messages:
|
||||
iprint = 1
|
||||
else:
|
||||
iprint = -1
|
||||
|
||||
opt_dict = {}
|
||||
if self.xtol is not None:
|
||||
print("WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it")
|
||||
if self.ftol is not None:
|
||||
print("WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it")
|
||||
if self.gtol is not None:
|
||||
opt_dict['pgtol'] = self.gtol
|
||||
if self.bfgs_factor is not None:
|
||||
opt_dict['factr'] = self.bfgs_factor
|
||||
|
||||
opt_result = optimize.fmin_l_bfgs_b(f_fp, self.x_init, iprint=iprint,
|
||||
maxfun=self.max_iters,maxiter=self.max_iters, **opt_dict)
|
||||
self.x_opt = opt_result[0]
|
||||
self.f_opt = f_fp(self.x_opt)[0]
|
||||
self.funct_eval = opt_result[2]['funcalls']
|
||||
self.status = rcstrings[opt_result[2]['warnflag']]
|
||||
|
||||
#a more helpful error message is available in opt_result in the Error case
|
||||
if opt_result[2]['warnflag']==2:
|
||||
self.status = 'Error' + str(opt_result[2]['task'])
|
||||
|
||||
class opt_bfgs(Optimizer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Optimizer.__init__(self, *args, **kwargs)
|
||||
self.opt_name = "BFGS (Scipy implementation)"
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
"""
|
||||
Run the optimizer
|
||||
|
||||
"""
|
||||
rcstrings = ['','Maximum number of iterations exceeded', 'Gradient and/or function calls not changing']
|
||||
|
||||
opt_dict = {}
|
||||
if self.xtol is not None:
|
||||
print("WARNING: bfgs doesn't have an xtol arg, so I'm going to ignore it")
|
||||
if self.ftol is not None:
|
||||
print("WARNING: bfgs doesn't have an ftol arg, so I'm going to ignore it")
|
||||
if self.gtol is not None:
|
||||
opt_dict['pgtol'] = self.gtol
|
||||
|
||||
opt_result = optimize.fmin_bfgs(f, self.x_init, fp, disp=self.messages,
|
||||
maxiter=self.max_iters, full_output=True, **opt_dict)
|
||||
self.x_opt = opt_result[0]
|
||||
self.f_opt = f_fp(self.x_opt)[0]
|
||||
self.funct_eval = opt_result[4]
|
||||
self.status = rcstrings[opt_result[6]]
|
||||
|
||||
class opt_simplex(Optimizer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Optimizer.__init__(self, *args, **kwargs)
|
||||
self.opt_name = "Nelder-Mead simplex routine (via Scipy)"
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
"""
|
||||
The simplex optimizer does not require gradients.
|
||||
"""
|
||||
|
||||
statuses = ['Converged', 'Maximum number of function evaluations made', 'Maximum number of iterations reached']
|
||||
|
||||
opt_dict = {}
|
||||
if self.xtol is not None:
|
||||
opt_dict['xtol'] = self.xtol
|
||||
if self.ftol is not None:
|
||||
opt_dict['ftol'] = self.ftol
|
||||
if self.gtol is not None:
|
||||
print("WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it")
|
||||
|
||||
opt_result = optimize.fmin(f, self.x_init, (), disp=self.messages,
|
||||
maxfun=self.max_f_eval, full_output=True, **opt_dict)
|
||||
|
||||
self.x_opt = opt_result[0]
|
||||
self.f_opt = opt_result[1]
|
||||
self.funct_eval = opt_result[3]
|
||||
self.status = statuses[opt_result[4]]
|
||||
self.trace = None
|
||||
|
||||
|
||||
class opt_rasm(Optimizer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Optimizer.__init__(self, *args, **kwargs)
|
||||
self.opt_name = "Rasmussen's Conjugate Gradient"
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
"""
|
||||
Run Rasmussen's Conjugate Gradient optimizer
|
||||
"""
|
||||
|
||||
assert f_fp != None, "Rasmussen's minimizer requires f_fp"
|
||||
statuses = ['Converged', 'Line search failed', 'Maximum number of f evaluations reached',
|
||||
'NaNs in optimization']
|
||||
|
||||
opt_dict = {}
|
||||
if self.xtol is not None:
|
||||
print("WARNING: minimize doesn't have an xtol arg, so I'm going to ignore it")
|
||||
if self.ftol is not None:
|
||||
print("WARNING: minimize doesn't have an ftol arg, so I'm going to ignore it")
|
||||
if self.gtol is not None:
|
||||
print("WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it")
|
||||
|
||||
opt_result = rasm.minimize(self.x_init, f_fp, (), messages=self.messages,
|
||||
maxnumfuneval=self.max_f_eval)
|
||||
self.x_opt = opt_result[0]
|
||||
self.f_opt = opt_result[1][-1]
|
||||
self.funct_eval = opt_result[2]
|
||||
self.status = statuses[opt_result[3]]
|
||||
|
||||
self.trace = opt_result[1]
|
||||
|
||||
class opt_SCG(Optimizer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'max_f_eval' in kwargs:
|
||||
warn("max_f_eval deprecated for SCG optimizer: use max_iters instead!\nIgnoring max_f_eval!", FutureWarning)
|
||||
Optimizer.__init__(self, *args, **kwargs)
|
||||
|
||||
self.opt_name = "Scaled Conjugate Gradients"
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
assert not f is None
|
||||
assert not fp is None
|
||||
|
||||
opt_result = SCG(f, fp, self.x_init, display=self.messages,
|
||||
maxiters=self.max_iters,
|
||||
max_f_eval=self.max_f_eval,
|
||||
xtol=self.xtol, ftol=self.ftol,
|
||||
gtol=self.gtol)
|
||||
|
||||
self.x_opt = opt_result[0]
|
||||
self.trace = opt_result[1]
|
||||
self.f_opt = self.trace[-1]
|
||||
self.funct_eval = opt_result[2]
|
||||
self.status = opt_result[3]
|
||||
|
||||
class Opt_Adadelta(Optimizer):
|
||||
def __init__(self, step_rate=0.1, decay=0.9, momentum=0, *args, **kwargs):
|
||||
Optimizer.__init__(self, *args, **kwargs)
|
||||
self.opt_name = "Adadelta (climin)"
|
||||
self.step_rate=step_rate
|
||||
self.decay = decay
|
||||
self.momentum = momentum
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
assert not fp is None
|
||||
|
||||
import climin
|
||||
|
||||
opt = climin.adadelta.Adadelta(self.x_init, fp, step_rate=self.step_rate, decay=self.decay, momentum=self.momentum)
|
||||
|
||||
for info in opt:
|
||||
if info['n_iter']>=self.max_iters:
|
||||
self.x_opt = opt.wrt
|
||||
self.status = 'maximum number of function evaluations exceeded '
|
||||
break
|
||||
|
||||
def get_optimizer(f_min):
|
||||
|
||||
optimizers = {'fmin_tnc': opt_tnc,
|
||||
'simplex': opt_simplex,
|
||||
'lbfgsb': opt_lbfgsb,
|
||||
'org-bfgs': opt_bfgs,
|
||||
'scg': opt_SCG,
|
||||
'adadelta':Opt_Adadelta}
|
||||
|
||||
if rasm_available:
|
||||
optimizers['rasmussen'] = opt_rasm
|
||||
|
||||
for opt_name in optimizers.keys():
|
||||
if opt_name.lower().find(f_min.lower()) != -1:
|
||||
return optimizers[opt_name]
|
||||
|
||||
raise KeyError('No optimizer was found matching the name: %s' % f_min)
|
||||
|
|
@ -61,12 +61,12 @@ class Kern(Parameterized):
|
|||
self.psicomp = PSICOMP_GH()
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._all_dims_active = range(0, max(state['active_dims'])+1)
|
||||
self._all_dims_active = np.arange(0, max(state['active_dims'])+1)
|
||||
super(Kern, self).__setstate__(state)
|
||||
|
||||
@property
|
||||
def _effective_input_dim(self):
|
||||
return self._all_dims_active.size
|
||||
return np.size(self._all_dims_active)
|
||||
|
||||
@Cache_this(limit=20)
|
||||
def _slice_X(self, X):
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ class Stationary(Kern):
|
|||
r = self._scaled_dist(X, X2)
|
||||
return self.K_of_r(r)
|
||||
|
||||
@Cache_this(limit=20, ignore_args=())
|
||||
@Cache_this(limit=3, ignore_args=())
|
||||
def dK_dr_via_X(self, X, X2):
|
||||
#a convenience function, so we can cache dK_dr
|
||||
return self.dK_dr(self._scaled_dist(X, X2))
|
||||
|
|
@ -127,7 +127,7 @@ class Stationary(Kern):
|
|||
r2 = np.clip(r2, 0, np.inf)
|
||||
return np.sqrt(r2)
|
||||
|
||||
@Cache_this(limit=20, ignore_args=())
|
||||
@Cache_this(limit=3, ignore_args=())
|
||||
def _scaled_dist(self, X, X2=None):
|
||||
"""
|
||||
Efficiently compute the scaled distance, r.
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ def plot_mean(self, plot_limits=None, fixed_inputs=None,
|
|||
"""
|
||||
Plot the mean of the GP.
|
||||
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
|
||||
Give the Y_metadata in the predict_kw if you need it.
|
||||
|
||||
|
|
@ -116,7 +116,7 @@ def plot_confidence(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=
|
|||
E.g. the 95% confidence interval is $2.5, 97.5$.
|
||||
Note: Only implemented for one dimension!
|
||||
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
|
||||
Give the Y_metadata in the predict_kw if you need it.
|
||||
|
||||
|
|
@ -170,7 +170,7 @@ def plot_samples(self, plot_limits=None, fixed_inputs=None,
|
|||
"""
|
||||
Plot the mean of the GP.
|
||||
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
|
||||
Give the Y_metadata in the predict_kw if you need it.
|
||||
|
||||
|
|
@ -231,7 +231,7 @@ def plot_density(self, plot_limits=None, fixed_inputs=None,
|
|||
E.g. the 95% confidence interval is $2.5, 97.5$.
|
||||
Note: Only implemented for one dimension!
|
||||
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
|
||||
Give the Y_metadata in the predict_kw if you need it.
|
||||
|
||||
|
|
@ -288,7 +288,7 @@ def plot(self, plot_limits=None, fixed_inputs=None,
|
|||
"""
|
||||
Convenience function for plotting the fit of a GP.
|
||||
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
|
||||
Give the Y_metadata in the predict_kw if you need it.
|
||||
|
||||
|
|
@ -319,9 +319,17 @@ def plot(self, plot_limits=None, fixed_inputs=None,
|
|||
:param {2d|3d} projection: plot in 2d or 3d?
|
||||
:param bool legend: convenience, whether to put a legend on the plot or not.
|
||||
"""
|
||||
canvas, _ = pl().new_canvas(projection=projection, **kwargs)
|
||||
X = get_x_y_var(self)[0]
|
||||
helper_data = helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution)
|
||||
xmin, xmax = helper_data[5:7]
|
||||
free_dims = helper_data[1]
|
||||
|
||||
if not 'xlim' in kwargs:
|
||||
kwargs['xlim'] = (xmin[0], xmax[0])
|
||||
if not 'ylim' in kwargs and len(free_dims) == 2:
|
||||
kwargs['ylim'] = (xmin[1], xmax[1])
|
||||
|
||||
canvas, _ = pl().new_canvas(projection=projection, **kwargs)
|
||||
helper_prediction = helper_predict_with_model(self, helper_data[2], plot_raw,
|
||||
apply_link, np.linspace(2.5, 97.5, levels*2) if plot_density else (lower,upper),
|
||||
get_which_data_ycols(self, which_data_ycols),
|
||||
|
|
@ -330,9 +338,11 @@ def plot(self, plot_limits=None, fixed_inputs=None,
|
|||
# It does not make sense to plot the data (which lives not in the latent function space) into latent function space.
|
||||
plot_data = False
|
||||
plots = {}
|
||||
if hasattr(self, 'Z') and plot_inducing:
|
||||
plots.update(_plot_inducing(self, canvas, visible_dims, projection, 'Inducing'))
|
||||
if plot_data:
|
||||
plots.update(_plot_data(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, "Data"))
|
||||
plots.update(_plot_data_error(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, "Data Error"))
|
||||
plots.update(_plot_data(self, canvas, which_data_rows, which_data_ycols, free_dims, projection, "Data"))
|
||||
plots.update(_plot_data_error(self, canvas, which_data_rows, which_data_ycols, free_dims, projection, "Data Error"))
|
||||
plots.update(_plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_inducing, plot_density, projection))
|
||||
if plot_raw and (samples_likelihood > 0):
|
||||
helper_prediction = helper_predict_with_model(self, helper_data[2], False,
|
||||
|
|
@ -340,8 +350,6 @@ def plot(self, plot_limits=None, fixed_inputs=None,
|
|||
get_which_data_ycols(self, which_data_ycols),
|
||||
predict_kw, samples_likelihood)
|
||||
plots.update(_plot_samples(canvas, helper_data, helper_prediction, projection, "Lik Samples"))
|
||||
if hasattr(self, 'Z') and plot_inducing:
|
||||
plots.update(_plot_inducing(self, canvas, visible_dims, projection, 'Inducing'))
|
||||
return pl().add_to_canvas(canvas, plots, legend=legend)
|
||||
|
||||
|
||||
|
|
@ -362,7 +370,7 @@ def plot_f(self, plot_limits=None, fixed_inputs=None,
|
|||
|
||||
If you want fine graned control use the specific plotting functions supplied in the model.
|
||||
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
You can deactivate the legend for this one plot by supplying None to label.
|
||||
|
||||
Give the Y_metadata in the predict_kw if you need it.
|
||||
|
||||
|
|
@ -389,7 +397,7 @@ def plot_f(self, plot_limits=None, fixed_inputs=None,
|
|||
:param dict error_kwargs: kwargs for the error plot for the plotting library you are using
|
||||
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
|
||||
"""
|
||||
plot(self, plot_limits, fixed_inputs, resolution, True,
|
||||
return plot(self, plot_limits, fixed_inputs, resolution, True,
|
||||
apply_link, which_data_ycols, which_data_rows,
|
||||
visible_dims, levels, samples, 0,
|
||||
lower, upper, plot_data, plot_inducing,
|
||||
|
|
|
|||
|
|
@ -199,7 +199,7 @@ def subsample_X(X, labels, num_samples=1000):
|
|||
num_samples and the returned subsampled X.
|
||||
"""
|
||||
if X.shape[0] > num_samples:
|
||||
print("Warning: subsampling X, as it has more samples then 1000. X.shape={!s}".format(X.shape))
|
||||
print("Warning: subsampling X, as it has more samples then {}. X.shape={!s}".format(int(num_samples), X.shape))
|
||||
if labels is not None:
|
||||
subsample = []
|
||||
for _, _, _, _, index, _ in scatter_label_generator(labels, X, (0, None, None)):
|
||||
|
|
@ -285,7 +285,10 @@ def get_x_y_var(model):
|
|||
X = model.X.mean.values
|
||||
X_variance = model.X.variance.values
|
||||
else:
|
||||
X = model.X.values
|
||||
try:
|
||||
X = model.X.values
|
||||
except AttributeError:
|
||||
X = model.X
|
||||
X_variance = None
|
||||
try:
|
||||
Y = model.Y.values
|
||||
|
|
@ -346,7 +349,7 @@ def x_frame1D(X,plot_limits=None,resolution=None):
|
|||
xmin,xmax = X.min(0),X.max(0)
|
||||
xmin, xmax = xmin-0.25*(xmax-xmin), xmax+0.25*(xmax-xmin)
|
||||
elif len(plot_limits) == 2:
|
||||
xmin, xmax = plot_limits
|
||||
xmin, xmax = map(np.atleast_1d, plot_limits)
|
||||
else:
|
||||
raise ValueError("Bad limits for plotting")
|
||||
|
||||
|
|
|
|||
|
|
@ -50,5 +50,20 @@ class InferenceXTestCase(unittest.TestCase):
|
|||
x, mi = m.infer_newX(m.Y, optimize=True)
|
||||
np.testing.assert_array_almost_equal(m.X, mi.X, decimal=2)
|
||||
|
||||
class HMCSamplerTest(unittest.TestCase):
|
||||
|
||||
def test_sampling(self):
|
||||
np.random.seed(1)
|
||||
x = np.linspace(0.,2*np.pi,100)[:,None]
|
||||
y = -np.cos(x)+np.random.randn(*x.shape)*0.3+1
|
||||
|
||||
m = GPy.models.GPRegression(x,y)
|
||||
m.kern.lengthscale.set_prior(GPy.priors.Gamma.from_EV(1.,10.))
|
||||
m.kern.variance.set_prior(GPy.priors.Gamma.from_EV(1.,10.))
|
||||
m.likelihood.variance.set_prior(GPy.priors.Gamma.from_EV(1.,10.))
|
||||
|
||||
hmc = GPy.inference.mcmc.HMC(m,stepsize=1e-2)
|
||||
s = hmc.sample(num_samples=3)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
[bumpversion]
|
||||
current_version = 0.9.5
|
||||
current_version = 0.9.6
|
||||
tag = True
|
||||
commit = True
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue