mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 19:42:39 +02:00
testing a bit cleaned periodic is turned off, bc it need different tests, discontinuous still needed
This commit is contained in:
parent
0d343cf0ca
commit
1f9509d979
9 changed files with 71 additions and 65 deletions
|
|
@ -253,7 +253,7 @@ class Model(Parameterized):
|
||||||
sgd.run()
|
sgd.run()
|
||||||
self.optimization_runs.append(sgd)
|
self.optimization_runs.append(sgd)
|
||||||
|
|
||||||
def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, _debug=False):
|
def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3):
|
||||||
"""
|
"""
|
||||||
Check the gradient of the ,odel by comparing to a numerical
|
Check the gradient of the ,odel by comparing to a numerical
|
||||||
estimate. If the verbose flag is passed, invividual
|
estimate. If the verbose flag is passed, invividual
|
||||||
|
|
@ -349,13 +349,6 @@ class Model(Parameterized):
|
||||||
xx[xind] -= 2.*step
|
xx[xind] -= 2.*step
|
||||||
f2 = self.objective_function(xx)
|
f2 = self.objective_function(xx)
|
||||||
numerical_gradient = (f1 - f2) / (2 * step)
|
numerical_gradient = (f1 - f2) / (2 * step)
|
||||||
if _debug:
|
|
||||||
for p in self.kern.flattened_parameters:
|
|
||||||
p._parent_._debug=True
|
|
||||||
self.gradient[xind] = numerical_gradient
|
|
||||||
self._set_params_transformed(x)
|
|
||||||
for p in self.kern.flattened_parameters:
|
|
||||||
p._parent_._debug=False
|
|
||||||
if np.all(gradient[xind]==0): ratio = (f1-f2) == gradient[xind]
|
if np.all(gradient[xind]==0): ratio = (f1-f2) == gradient[xind]
|
||||||
else: ratio = (f1 - f2) / (2 * step * gradient[xind])
|
else: ratio = (f1 - f2) / (2 * step * gradient[xind])
|
||||||
difference = np.abs((f1 - f2) / 2 / step - gradient[xind])
|
difference = np.abs((f1 - f2) / 2 / step - gradient[xind])
|
||||||
|
|
|
||||||
|
|
@ -446,8 +446,8 @@ class ParamConcatenation(object):
|
||||||
def untie(self, *ties):
|
def untie(self, *ties):
|
||||||
[param.untie(*ties) for param in self.params]
|
[param.untie(*ties) for param in self.params]
|
||||||
|
|
||||||
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, _debug=False):
|
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3):
|
||||||
return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance, _debug=_debug)
|
return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance)
|
||||||
#checkgrad.__doc__ = Gradcheckable.checkgrad.__doc__
|
#checkgrad.__doc__ = Gradcheckable.checkgrad.__doc__
|
||||||
|
|
||||||
__lt__ = lambda self, val: self._vals() < val
|
__lt__ = lambda self, val: self._vals() < val
|
||||||
|
|
|
||||||
|
|
@ -206,7 +206,7 @@ class Gradcheckable(Parentable):
|
||||||
def __init__(self, *a, **kw):
|
def __init__(self, *a, **kw):
|
||||||
super(Gradcheckable, self).__init__(*a, **kw)
|
super(Gradcheckable, self).__init__(*a, **kw)
|
||||||
|
|
||||||
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, _debug=False):
|
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3):
|
||||||
"""
|
"""
|
||||||
Check the gradient of this parameter with respect to the highest parent's
|
Check the gradient of this parameter with respect to the highest parent's
|
||||||
objective function.
|
objective function.
|
||||||
|
|
@ -220,10 +220,10 @@ class Gradcheckable(Parentable):
|
||||||
:param flaot tolerance: the tolerance for the gradient ratio or difference.
|
:param flaot tolerance: the tolerance for the gradient ratio or difference.
|
||||||
"""
|
"""
|
||||||
if self.has_parent():
|
if self.has_parent():
|
||||||
return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance, _debug=_debug)
|
return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance)
|
||||||
return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance, _debug=_debug)
|
return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance)
|
||||||
|
|
||||||
def _checkgrad(self, param, verbose=0, step=1e-6, tolerance=1e-3, _debug=False):
|
def _checkgrad(self, param, verbose=0, step=1e-6, tolerance=1e-3):
|
||||||
"""
|
"""
|
||||||
Perform the checkgrad on the model.
|
Perform the checkgrad on the model.
|
||||||
TODO: this can be done more efficiently, when doing it inside here
|
TODO: this can be done more efficiently, when doing it inside here
|
||||||
|
|
@ -694,6 +694,10 @@ class Parameterizable(OptimizationHandlable):
|
||||||
elif pname not in dir(self):
|
elif pname not in dir(self):
|
||||||
self.__dict__[pname] = param
|
self.__dict__[pname] = param
|
||||||
self._added_names_.add(pname)
|
self._added_names_.add(pname)
|
||||||
|
else:
|
||||||
|
print "WARNING: added a parameter with formatted name {}, which is already a member of {} object. Trying to change the parameter name to\n {}".format(pname, self.__class__, param.name+"_")
|
||||||
|
param.name += "_"
|
||||||
|
self._add_parameter_name(param, ignore_added_names)
|
||||||
|
|
||||||
def _remove_parameter_name(self, param=None, pname=None):
|
def _remove_parameter_name(self, param=None, pname=None):
|
||||||
assert param is None or pname is None, "can only delete either param by name, or the name of a param"
|
assert param is None or pname is None, "can only delete either param by name, or the name of a param"
|
||||||
|
|
|
||||||
|
|
@ -156,7 +156,7 @@ class Kern(Parameterized):
|
||||||
other.active_dims += self.input_dim
|
other.active_dims += self.input_dim
|
||||||
return self.prod(other)
|
return self.prod(other)
|
||||||
|
|
||||||
def prod(self, other, name='prod'):
|
def prod(self, other, name='mul'):
|
||||||
"""
|
"""
|
||||||
Multiply two kernels (either on the same space, or on the tensor
|
Multiply two kernels (either on the same space, or on the tensor
|
||||||
product of the input space).
|
product of the input space).
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ class Prod(CombinationKernel):
|
||||||
:rtype: kernel object
|
:rtype: kernel object
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, kernels, name='prod'):
|
def __init__(self, kernels, name='mul'):
|
||||||
assert len(kernels) == 2, 'only implemented for two kernels as of yet'
|
assert len(kernels) == 2, 'only implemented for two kernels as of yet'
|
||||||
super(Prod, self).__init__(kernels, name)
|
super(Prod, self).__init__(kernels, name)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,13 +15,13 @@ from ..likelihoods import Gaussian
|
||||||
|
|
||||||
class MRD(Model):
|
class MRD(Model):
|
||||||
"""
|
"""
|
||||||
Apply MRD to all given datasets Y in Ylist.
|
Apply MRD to all given datasets Y in Ylist.
|
||||||
|
|
||||||
Y_i in [n x p_i]
|
Y_i in [n x p_i]
|
||||||
|
|
||||||
The samples n in the datasets need
|
The samples n in the datasets need
|
||||||
to match up, whereas the dimensionality p_d can differ.
|
to match up, whereas the dimensionality p_d can differ.
|
||||||
|
|
||||||
:param [array-like] Ylist: List of datasets to apply MRD on
|
:param [array-like] Ylist: List of datasets to apply MRD on
|
||||||
:param input_dim: latent dimensionality
|
:param input_dim: latent dimensionality
|
||||||
:type input_dim: int
|
:type input_dim: int
|
||||||
|
|
@ -45,13 +45,12 @@ class MRD(Model):
|
||||||
:param str name: the name of this model
|
:param str name: the name of this model
|
||||||
:param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None
|
:param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None
|
||||||
"""
|
"""
|
||||||
|
def __init__(self, Ylist, input_dim, X=None, X_variance=None,
|
||||||
def __init__(self, Ylist, input_dim, X=None, X_variance=None,
|
|
||||||
initx = 'PCA', initz = 'permute',
|
initx = 'PCA', initz = 'permute',
|
||||||
num_inducing=10, Z=None, kernel=None,
|
num_inducing=10, Z=None, kernel=None,
|
||||||
inference_method=None, likelihood=None, name='mrd', Ynames=None):
|
inference_method=None, likelihood=None, name='mrd', Ynames=None):
|
||||||
super(MRD, self).__init__(name)
|
super(MRD, self).__init__(name)
|
||||||
|
|
||||||
# sort out the kernels
|
# sort out the kernels
|
||||||
if kernel is None:
|
if kernel is None:
|
||||||
from ..kern import RBF
|
from ..kern import RBF
|
||||||
|
|
@ -64,23 +63,23 @@ class MRD(Model):
|
||||||
self.kern = kernel
|
self.kern = kernel
|
||||||
self.input_dim = input_dim
|
self.input_dim = input_dim
|
||||||
self.num_inducing = num_inducing
|
self.num_inducing = num_inducing
|
||||||
|
|
||||||
self.Ylist = Ylist
|
self.Ylist = Ylist
|
||||||
self._in_init_ = True
|
self._in_init_ = True
|
||||||
X = self._init_X(initx, Ylist)
|
X = self._init_X(initx, Ylist)
|
||||||
self.Z = Param('inducing inputs', self._init_Z(initz, X))
|
self.Z = Param('inducing inputs', self._init_Z(initz, X))
|
||||||
self.num_inducing = self.Z.shape[0] # ensure M==N if M>N
|
self.num_inducing = self.Z.shape[0] # ensure M==N if M>N
|
||||||
|
|
||||||
if X_variance is None:
|
if X_variance is None:
|
||||||
X_variance = np.random.uniform(0, .2, X.shape)
|
X_variance = np.random.uniform(0, .2, X.shape)
|
||||||
|
|
||||||
self.variational_prior = NormalPrior()
|
self.variational_prior = NormalPrior()
|
||||||
self.X = NormalPosterior(X, X_variance)
|
self.X = NormalPosterior(X, X_variance)
|
||||||
|
|
||||||
if likelihood is None:
|
if likelihood is None:
|
||||||
self.likelihood = [Gaussian(name='Gaussian_noise'.format(i)) for i in range(len(Ylist))]
|
self.likelihood = [Gaussian(name='Gaussian_noise'.format(i)) for i in range(len(Ylist))]
|
||||||
else: self.likelihood = likelihood
|
else: self.likelihood = likelihood
|
||||||
|
|
||||||
if inference_method is None:
|
if inference_method is None:
|
||||||
self.inference_method= []
|
self.inference_method= []
|
||||||
for y in Ylist:
|
for y in Ylist:
|
||||||
|
|
@ -91,12 +90,12 @@ class MRD(Model):
|
||||||
else:
|
else:
|
||||||
self.inference_method = inference_method
|
self.inference_method = inference_method
|
||||||
self.inference_method.set_limit(len(Ylist))
|
self.inference_method.set_limit(len(Ylist))
|
||||||
|
|
||||||
self.add_parameters(self.X, self.Z)
|
self.add_parameters(self.X, self.Z)
|
||||||
|
|
||||||
if Ynames is None:
|
if Ynames is None:
|
||||||
Ynames = ['Y{}'.format(i) for i in range(len(Ylist))]
|
Ynames = ['Y{}'.format(i) for i in range(len(Ylist))]
|
||||||
|
|
||||||
for i, n, k, l in itertools.izip(itertools.count(), Ynames, self.kern, self.likelihood):
|
for i, n, k, l in itertools.izip(itertools.count(), Ynames, self.kern, self.likelihood):
|
||||||
p = Parameterized(name=n)
|
p = Parameterized(name=n)
|
||||||
p.add_parameter(k)
|
p.add_parameter(k)
|
||||||
|
|
|
||||||
|
|
@ -227,6 +227,16 @@ class KernelGradientTestsContinuous(unittest.TestCase):
|
||||||
k.randomize()
|
k.randomize()
|
||||||
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
||||||
|
|
||||||
|
def test_Prod(self):
|
||||||
|
k = GPy.kern.Matern32([2,3]) * GPy.kern.RBF([0,4]) + GPy.kern.Linear(self.D)
|
||||||
|
k.randomize()
|
||||||
|
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
||||||
|
|
||||||
|
def test_Add(self):
|
||||||
|
k = GPy.kern.Matern32([2,3]) + GPy.kern.RBF([0,4]) + GPy.kern.Linear(self.D)
|
||||||
|
k.randomize()
|
||||||
|
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
||||||
|
|
||||||
def test_Matern52(self):
|
def test_Matern52(self):
|
||||||
k = GPy.kern.Matern52(self.D)
|
k = GPy.kern.Matern52(self.D)
|
||||||
k.randomize()
|
k.randomize()
|
||||||
|
|
@ -242,31 +252,30 @@ class KernelGradientTestsContinuous(unittest.TestCase):
|
||||||
k.randomize()
|
k.randomize()
|
||||||
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
||||||
|
|
||||||
class KernelGradientTestsContinuous1D(unittest.TestCase):
|
#TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize
|
||||||
def setUp(self):
|
# class KernelGradientTestsContinuous1D(unittest.TestCase):
|
||||||
self.N, self.D = 100, 1
|
# def setUp(self):
|
||||||
self.X = np.random.randn(self.N,self.D)
|
# self.N, self.D = 100, 1
|
||||||
self.X2 = np.random.randn(self.N+10,self.D)
|
# self.X = np.random.randn(self.N,self.D)
|
||||||
|
# self.X2 = np.random.randn(self.N+10,self.D)
|
||||||
continuous_kerns = ['RBF', 'Linear']
|
#
|
||||||
self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns]
|
# continuous_kerns = ['RBF', 'Linear']
|
||||||
|
# self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns]
|
||||||
def test_PeriodicExponential(self):
|
#
|
||||||
k = GPy.kern.PeriodicExponential(self.D)
|
# def test_PeriodicExponential(self):
|
||||||
k.randomize()
|
# k = GPy.kern.PeriodicExponential(self.D)
|
||||||
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
# k.randomize()
|
||||||
|
# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
||||||
def test_PeriodicMatern32(self):
|
#
|
||||||
k = GPy.kern.PeriodicMatern32(self.D)
|
# def test_PeriodicMatern32(self):
|
||||||
k.randomize()
|
# k = GPy.kern.PeriodicMatern32(self.D)
|
||||||
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
# k.randomize()
|
||||||
|
# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
||||||
def test_PeriodicMatern52(self):
|
#
|
||||||
k = GPy.kern.PeriodicMatern52(self.D)
|
# def test_PeriodicMatern52(self):
|
||||||
k.randomize()
|
# k = GPy.kern.PeriodicMatern52(self.D)
|
||||||
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
# k.randomize()
|
||||||
|
# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
|
||||||
#TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize
|
|
||||||
|
|
||||||
|
|
||||||
class KernelTestsMiscellaneous(unittest.TestCase):
|
class KernelTestsMiscellaneous(unittest.TestCase):
|
||||||
|
|
@ -275,7 +284,7 @@ class KernelTestsMiscellaneous(unittest.TestCase):
|
||||||
N, D = 100, 10
|
N, D = 100, 10
|
||||||
self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D)
|
self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D)
|
||||||
self.rbf = GPy.kern.RBF(range(2))
|
self.rbf = GPy.kern.RBF(range(2))
|
||||||
self.linear = GPy.kern.Linear((3,5,6))
|
self.linear = GPy.kern.Linear((3,6))
|
||||||
self.matern = GPy.kern.Matern32(np.array([2,4,7]))
|
self.matern = GPy.kern.Matern32(np.array([2,4,7]))
|
||||||
self.sumkern = self.rbf + self.linear
|
self.sumkern = self.rbf + self.linear
|
||||||
self.sumkern += self.matern
|
self.sumkern += self.matern
|
||||||
|
|
|
||||||
|
|
@ -541,8 +541,8 @@ class TestNoiseModels(object):
|
||||||
#import ipdb; ipdb.set_trace()
|
#import ipdb; ipdb.set_trace()
|
||||||
#NOTE this test appears to be stochastic for some likelihoods (student t?)
|
#NOTE this test appears to be stochastic for some likelihoods (student t?)
|
||||||
# appears to all be working in test mode right now...
|
# appears to all be working in test mode right now...
|
||||||
if isinstance(model, GPy.likelihoods.StudentT):
|
#if isinstance(model, GPy.likelihoods.StudentT):
|
||||||
import ipdb;ipdb.set_trace()
|
# import ipdb;ipdb.set_trace()
|
||||||
assert m.checkgrad(step=step)
|
assert m.checkgrad(step=step)
|
||||||
|
|
||||||
###########
|
###########
|
||||||
|
|
|
||||||
|
|
@ -63,10 +63,11 @@ class GradientTests(unittest.TestCase):
|
||||||
mlp = GPy.kern.MLP(1)
|
mlp = GPy.kern.MLP(1)
|
||||||
self.check_model(mlp, model_type='GPRegression', dimension=1)
|
self.check_model(mlp, model_type='GPRegression', dimension=1)
|
||||||
|
|
||||||
def test_GPRegression_poly_1d(self):
|
#TODO:
|
||||||
''' Testing the GP regression with polynomial kernel with white kernel on 1d data '''
|
#def test_GPRegression_poly_1d(self):
|
||||||
mlp = GPy.kern.Poly(1, degree=5)
|
# ''' Testing the GP regression with polynomial kernel with white kernel on 1d data '''
|
||||||
self.check_model(mlp, model_type='GPRegression', dimension=1)
|
# mlp = GPy.kern.Poly(1, degree=5)
|
||||||
|
# self.check_model(mlp, model_type='GPRegression', dimension=1)
|
||||||
|
|
||||||
def test_GPRegression_matern52_1D(self):
|
def test_GPRegression_matern52_1D(self):
|
||||||
''' Testing the GP regression with matern52 kernel on 1d data '''
|
''' Testing the GP regression with matern52 kernel on 1d data '''
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue