testing a bit cleaned periodic is turned off, bc it need different tests, discontinuous still needed

This commit is contained in:
Max Zwiessele 2014-03-13 13:13:15 +00:00
parent 0d343cf0ca
commit 1f9509d979
9 changed files with 71 additions and 65 deletions

View file

@ -253,7 +253,7 @@ class Model(Parameterized):
sgd.run() sgd.run()
self.optimization_runs.append(sgd) self.optimization_runs.append(sgd)
def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, _debug=False): def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3):
""" """
Check the gradient of the ,odel by comparing to a numerical Check the gradient of the ,odel by comparing to a numerical
estimate. If the verbose flag is passed, invividual estimate. If the verbose flag is passed, invividual
@ -349,13 +349,6 @@ class Model(Parameterized):
xx[xind] -= 2.*step xx[xind] -= 2.*step
f2 = self.objective_function(xx) f2 = self.objective_function(xx)
numerical_gradient = (f1 - f2) / (2 * step) numerical_gradient = (f1 - f2) / (2 * step)
if _debug:
for p in self.kern.flattened_parameters:
p._parent_._debug=True
self.gradient[xind] = numerical_gradient
self._set_params_transformed(x)
for p in self.kern.flattened_parameters:
p._parent_._debug=False
if np.all(gradient[xind]==0): ratio = (f1-f2) == gradient[xind] if np.all(gradient[xind]==0): ratio = (f1-f2) == gradient[xind]
else: ratio = (f1 - f2) / (2 * step * gradient[xind]) else: ratio = (f1 - f2) / (2 * step * gradient[xind])
difference = np.abs((f1 - f2) / 2 / step - gradient[xind]) difference = np.abs((f1 - f2) / 2 / step - gradient[xind])

View file

@ -446,8 +446,8 @@ class ParamConcatenation(object):
def untie(self, *ties): def untie(self, *ties):
[param.untie(*ties) for param in self.params] [param.untie(*ties) for param in self.params]
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3):
return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance, _debug=_debug) return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance)
#checkgrad.__doc__ = Gradcheckable.checkgrad.__doc__ #checkgrad.__doc__ = Gradcheckable.checkgrad.__doc__
__lt__ = lambda self, val: self._vals() < val __lt__ = lambda self, val: self._vals() < val

View file

@ -206,7 +206,7 @@ class Gradcheckable(Parentable):
def __init__(self, *a, **kw): def __init__(self, *a, **kw):
super(Gradcheckable, self).__init__(*a, **kw) super(Gradcheckable, self).__init__(*a, **kw)
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3):
""" """
Check the gradient of this parameter with respect to the highest parent's Check the gradient of this parameter with respect to the highest parent's
objective function. objective function.
@ -220,10 +220,10 @@ class Gradcheckable(Parentable):
:param flaot tolerance: the tolerance for the gradient ratio or difference. :param flaot tolerance: the tolerance for the gradient ratio or difference.
""" """
if self.has_parent(): if self.has_parent():
return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance, _debug=_debug) return self._highest_parent_._checkgrad(self, verbose=verbose, step=step, tolerance=tolerance)
return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance, _debug=_debug) return self._checkgrad(self[''], verbose=verbose, step=step, tolerance=tolerance)
def _checkgrad(self, param, verbose=0, step=1e-6, tolerance=1e-3, _debug=False): def _checkgrad(self, param, verbose=0, step=1e-6, tolerance=1e-3):
""" """
Perform the checkgrad on the model. Perform the checkgrad on the model.
TODO: this can be done more efficiently, when doing it inside here TODO: this can be done more efficiently, when doing it inside here
@ -694,6 +694,10 @@ class Parameterizable(OptimizationHandlable):
elif pname not in dir(self): elif pname not in dir(self):
self.__dict__[pname] = param self.__dict__[pname] = param
self._added_names_.add(pname) self._added_names_.add(pname)
else:
print "WARNING: added a parameter with formatted name {}, which is already a member of {} object. Trying to change the parameter name to\n {}".format(pname, self.__class__, param.name+"_")
param.name += "_"
self._add_parameter_name(param, ignore_added_names)
def _remove_parameter_name(self, param=None, pname=None): def _remove_parameter_name(self, param=None, pname=None):
assert param is None or pname is None, "can only delete either param by name, or the name of a param" assert param is None or pname is None, "can only delete either param by name, or the name of a param"

View file

@ -156,7 +156,7 @@ class Kern(Parameterized):
other.active_dims += self.input_dim other.active_dims += self.input_dim
return self.prod(other) return self.prod(other)
def prod(self, other, name='prod'): def prod(self, other, name='mul'):
""" """
Multiply two kernels (either on the same space, or on the tensor Multiply two kernels (either on the same space, or on the tensor
product of the input space). product of the input space).

View file

@ -17,7 +17,7 @@ class Prod(CombinationKernel):
:rtype: kernel object :rtype: kernel object
""" """
def __init__(self, kernels, name='prod'): def __init__(self, kernels, name='mul'):
assert len(kernels) == 2, 'only implemented for two kernels as of yet' assert len(kernels) == 2, 'only implemented for two kernels as of yet'
super(Prod, self).__init__(kernels, name) super(Prod, self).__init__(kernels, name)

View file

@ -45,7 +45,6 @@ class MRD(Model):
:param str name: the name of this model :param str name: the name of this model
:param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None :param [str] Ynames: the names for the datasets given, must be of equal length as Ylist or None
""" """
def __init__(self, Ylist, input_dim, X=None, X_variance=None, def __init__(self, Ylist, input_dim, X=None, X_variance=None,
initx = 'PCA', initz = 'permute', initx = 'PCA', initz = 'permute',
num_inducing=10, Z=None, kernel=None, num_inducing=10, Z=None, kernel=None,

View file

@ -227,6 +227,16 @@ class KernelGradientTestsContinuous(unittest.TestCase):
k.randomize() k.randomize()
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
def test_Prod(self):
k = GPy.kern.Matern32([2,3]) * GPy.kern.RBF([0,4]) + GPy.kern.Linear(self.D)
k.randomize()
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
def test_Add(self):
k = GPy.kern.Matern32([2,3]) + GPy.kern.RBF([0,4]) + GPy.kern.Linear(self.D)
k.randomize()
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
def test_Matern52(self): def test_Matern52(self):
k = GPy.kern.Matern52(self.D) k = GPy.kern.Matern52(self.D)
k.randomize() k.randomize()
@ -242,31 +252,30 @@ class KernelGradientTestsContinuous(unittest.TestCase):
k.randomize() k.randomize()
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
class KernelGradientTestsContinuous1D(unittest.TestCase): #TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize
def setUp(self): # class KernelGradientTestsContinuous1D(unittest.TestCase):
self.N, self.D = 100, 1 # def setUp(self):
self.X = np.random.randn(self.N,self.D) # self.N, self.D = 100, 1
self.X2 = np.random.randn(self.N+10,self.D) # self.X = np.random.randn(self.N,self.D)
# self.X2 = np.random.randn(self.N+10,self.D)
continuous_kerns = ['RBF', 'Linear'] #
self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns] # continuous_kerns = ['RBF', 'Linear']
# self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns]
def test_PeriodicExponential(self): #
k = GPy.kern.PeriodicExponential(self.D) # def test_PeriodicExponential(self):
k.randomize() # k = GPy.kern.PeriodicExponential(self.D)
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) # k.randomize()
# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
def test_PeriodicMatern32(self): #
k = GPy.kern.PeriodicMatern32(self.D) # def test_PeriodicMatern32(self):
k.randomize() # k = GPy.kern.PeriodicMatern32(self.D)
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) # k.randomize()
# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
def test_PeriodicMatern52(self): #
k = GPy.kern.PeriodicMatern52(self.D) # def test_PeriodicMatern52(self):
k.randomize() # k = GPy.kern.PeriodicMatern52(self.D)
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) # k.randomize()
# self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
#TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize
class KernelTestsMiscellaneous(unittest.TestCase): class KernelTestsMiscellaneous(unittest.TestCase):
@ -275,7 +284,7 @@ class KernelTestsMiscellaneous(unittest.TestCase):
N, D = 100, 10 N, D = 100, 10
self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D) self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D)
self.rbf = GPy.kern.RBF(range(2)) self.rbf = GPy.kern.RBF(range(2))
self.linear = GPy.kern.Linear((3,5,6)) self.linear = GPy.kern.Linear((3,6))
self.matern = GPy.kern.Matern32(np.array([2,4,7])) self.matern = GPy.kern.Matern32(np.array([2,4,7]))
self.sumkern = self.rbf + self.linear self.sumkern = self.rbf + self.linear
self.sumkern += self.matern self.sumkern += self.matern

View file

@ -541,8 +541,8 @@ class TestNoiseModels(object):
#import ipdb; ipdb.set_trace() #import ipdb; ipdb.set_trace()
#NOTE this test appears to be stochastic for some likelihoods (student t?) #NOTE this test appears to be stochastic for some likelihoods (student t?)
# appears to all be working in test mode right now... # appears to all be working in test mode right now...
if isinstance(model, GPy.likelihoods.StudentT): #if isinstance(model, GPy.likelihoods.StudentT):
import ipdb;ipdb.set_trace() # import ipdb;ipdb.set_trace()
assert m.checkgrad(step=step) assert m.checkgrad(step=step)
########### ###########

View file

@ -63,10 +63,11 @@ class GradientTests(unittest.TestCase):
mlp = GPy.kern.MLP(1) mlp = GPy.kern.MLP(1)
self.check_model(mlp, model_type='GPRegression', dimension=1) self.check_model(mlp, model_type='GPRegression', dimension=1)
def test_GPRegression_poly_1d(self): #TODO:
''' Testing the GP regression with polynomial kernel with white kernel on 1d data ''' #def test_GPRegression_poly_1d(self):
mlp = GPy.kern.Poly(1, degree=5) # ''' Testing the GP regression with polynomial kernel with white kernel on 1d data '''
self.check_model(mlp, model_type='GPRegression', dimension=1) # mlp = GPy.kern.Poly(1, degree=5)
# self.check_model(mlp, model_type='GPRegression', dimension=1)
def test_GPRegression_matern52_1D(self): def test_GPRegression_matern52_1D(self):
''' Testing the GP regression with matern52 kernel on 1d data ''' ''' Testing the GP regression with matern52 kernel on 1d data '''