merge for new kernel slice handling

This commit is contained in:
Max Zwiessele 2014-03-14 10:56:51 +00:00
commit 2b9d3b326c
5 changed files with 32 additions and 34 deletions

View file

@ -301,9 +301,8 @@ class Model(Parameterized):
denominator = (2 * np.dot(dx, gradient)) denominator = (2 * np.dot(dx, gradient))
global_ratio = (f1 - f2) / np.where(denominator==0., 1e-32, denominator) global_ratio = (f1 - f2) / np.where(denominator==0., 1e-32, denominator)
gloabl_diff = (f1 - f2) - denominator
return (np.abs(1. - global_ratio) < tolerance) or (np.abs(gloabl_diff) == 0) return np.abs(1. - global_ratio) < tolerance
else: else:
# check the gradient of each parameter individually, and do some pretty printing # check the gradient of each parameter individually, and do some pretty printing
try: try:

View file

@ -60,7 +60,7 @@ class SparseGP(GP):
dL_dKmm = self.grad_dict.pop('dL_dKmm') dL_dKmm = self.grad_dict.pop('dL_dKmm')
self.kern.update_gradients_full(dL_dKmm, self.Z, None) self.kern.update_gradients_full(dL_dKmm, self.Z, None)
target = self.kern.gradient.copy() target = self.kern.gradient.copy()
self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2']) self.kern.update_gradients_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2'])
self.kern.gradient += target self.kern.gradient += target
#gradients wrt Z #gradients wrt Z

View file

@ -83,7 +83,7 @@ class IndependentOutputs(CombinationKernel):
target = np.zeros_like(X) target = np.zeros_like(X)
slices = index_to_slices(X[:,self.index_dim]) slices = index_to_slices(X[:,self.index_dim])
if X2 is None: if X2 is None:
[[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X(dL_dK[s,s],X[s],X[ss])) for s, ss in product(slices_i, slices_i)] for slices_i in slices] [[np.copyto(target[s,self.kern.active_dims], self.kern.gradients_X(dL_dK[s,ss],X[s],X[ss])) for s, ss in itertools.product(slices_i, slices_i)] for slices_i in slices]
else: else:
X2,slices2 = X2[:,:self.index_dim],index_to_slices(X2[:,-1]) X2,slices2 = X2[:,:self.index_dim],index_to_slices(X2[:,-1])
[[[np.copyto(target[s,:self.index_dim], self.kern.gradients_X(dL_dK[s,s2], X[s], X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)] [[[np.copyto(target[s,:self.index_dim], self.kern.gradients_X(dL_dK[s,s2], X[s], X2[s2])) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices,slices2)]

View file

@ -252,34 +252,33 @@ class KernelGradientTestsContinuous(unittest.TestCase):
k.randomize() k.randomize()
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
#TODO: turn off grad checkingwrt X for indexed kernels liek coregionalize #TODO: turn off grad checkingwrt X for indexed kernels like coregionalize
class KernelGradientTestsContinuous1D(unittest.TestCase): # class KernelGradientTestsContinuous1D(unittest.TestCase):
def setUp(self): # def setUp(self):
self.N, self.D = 100, 1 # self.N, self.D = 100, 1
self.X = np.random.randn(self.N,self.D) # self.X = np.random.randn(self.N,self.D)
self.X2 = np.random.randn(self.N+10,self.D) # self.X2 = np.random.randn(self.N+10,self.D)
#
continuous_kerns = ['RBF', 'Linear'] # continuous_kerns = ['RBF', 'Linear']
self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns] # self.kernclasses = [getattr(GPy.kern, s) for s in continuous_kerns]
#
def test_PeriodicExponential(self): # def test_PeriodicExponential(self):
k = GPy.kern.PeriodicExponential(self.D) # k = GPy.kern.PeriodicExponential(self.D)
k.randomize() # k.randomize()
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) # self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
#
def test_PeriodicMatern32(self): # def test_PeriodicMatern32(self):
k = GPy.kern.PeriodicMatern32(self.D) # k = GPy.kern.PeriodicMatern32(self.D)
k.randomize() # k.randomize()
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) # self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
#
def test_PeriodicMatern52(self): # def test_PeriodicMatern52(self):
k = GPy.kern.PeriodicMatern52(self.D) # k = GPy.kern.PeriodicMatern52(self.D)
k.randomize() # k.randomize()
self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) # self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose))
class KernelTestsMiscellaneous(unittest.TestCase): class KernelTestsMiscellaneous(unittest.TestCase):
def setUp(self): def setUp(self):
N, D = 100, 10 N, D = 100, 10
self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D) self.X = np.linspace(-np.pi, +np.pi, N)[:,None] * np.ones(D)

View file

@ -48,7 +48,7 @@ class Cacher(object):
if k in kw and kw[k] is not None: if k in kw and kw[k] is not None:
return self.operation(*args, **kw) return self.operation(*args, **kw)
# TODO: WARNING !!! Cache OFFSWITCH !!! WARNING # TODO: WARNING !!! Cache OFFSWITCH !!! WARNING
return self.operation(*args) #return self.operation(*args)
#if the result is cached, return the cached computation #if the result is cached, return the cached computation
state = [all(a is b for a, b in itertools.izip_longest(args, cached_i)) for cached_i in self.cached_inputs] state = [all(a is b for a, b in itertools.izip_longest(args, cached_i)) for cached_i in self.cached_inputs]