mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-15 06:52:39 +02:00
Merge branch 'params' of github.com:SheffieldML/GPy into params
This commit is contained in:
commit
65a2c2a320
9 changed files with 112 additions and 14 deletions
|
|
@ -54,19 +54,21 @@ class SparseGP(GP):
|
|||
self.add_parameter(self.Z, index=0)
|
||||
self.parameters_changed()
|
||||
|
||||
|
||||
def parameters_changed(self):
|
||||
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.X_variance, self.Z, self.likelihood, self.Y)
|
||||
|
||||
#The derivative of the bound wrt the inducing inputs Z ( unless they're all fixed)
|
||||
def _update_gradients_Z(self, add=False):
|
||||
#The derivative of the bound wrt the inducing inputs Z ( unless they're all fixed)
|
||||
if not self.Z.is_fixed:
|
||||
self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z)
|
||||
if add: self.Z.gradient += self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z)
|
||||
else: self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z)
|
||||
if self.X_variance is None:
|
||||
self.Z.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'].T, self.Z, self.X)
|
||||
else:
|
||||
self.Z.gradient += self.kern.dpsi1_dZ(self.grad_dict['dL_dpsi1'], self.Z, self.X, self.X_variance)
|
||||
self.Z.gradient += self.kern.dpsi2_dZ(self.grad_dict['dL_dpsi2'], self.Z, self.X, self.X_variance)
|
||||
|
||||
def parameters_changed(self):
|
||||
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.X_variance, self.Z, self.likelihood, self.Y)
|
||||
self._update_gradients_Z(add=False)
|
||||
|
||||
def _raw_predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=False):
|
||||
"""
|
||||
Make a prediction for the latent function values
|
||||
|
|
|
|||
|
|
@ -59,8 +59,9 @@ class Optimizer():
|
|||
"""
|
||||
See GPy.plotting.matplot_dep.inference_plots
|
||||
"""
|
||||
import sys
|
||||
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
|
||||
from ..plotting.matplot_dep import inference_plots
|
||||
from ...plotting.matplot_dep import inference_plots
|
||||
inference_plots.plot_optimizer(self)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,8 @@ class Bias(Kernpart):
|
|||
:type variance: float
|
||||
"""
|
||||
super(Bias, self).__init__(input_dim, name)
|
||||
self.variance = Param("variance", variance)
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
self.variance = Param("variance", variance, Logexp())
|
||||
self.add_parameter(self.variance)
|
||||
|
||||
def K(self,X,X2,target):
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ class Linear(Kernpart):
|
|||
|
||||
def update_gradients_full(self, dL_dK, X):
|
||||
#self.variances.gradient[:] = 0
|
||||
self._param_grad_helper(dL_dK, X, self.variances.gradient)
|
||||
self._param_grad_helper(dL_dK, X, None, self.variances.gradient)
|
||||
|
||||
def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z):
|
||||
tmp = dL_dKdiag[:, None] * X ** 2
|
||||
|
|
|
|||
|
|
@ -265,7 +265,7 @@ class RBF(Kernpart):
|
|||
}
|
||||
"""
|
||||
num_data, num_inducing, input_dim = X.shape[0], X.shape[0], self.input_dim
|
||||
X, dvardLdK = param_to_array(X, dvardLdK)
|
||||
X, dvardLdK, var_len3 = param_to_array(X, dvardLdK, var_len3)
|
||||
weave.inline(code, arg_names=['num_data', 'num_inducing', 'input_dim', 'X', 'target', 'dvardLdK', 'var_len3'], type_converters=weave.converters.blitz, **self.weave_options)
|
||||
else:
|
||||
code = """
|
||||
|
|
@ -282,7 +282,7 @@ class RBF(Kernpart):
|
|||
}
|
||||
"""
|
||||
num_data, num_inducing, input_dim = X.shape[0], X2.shape[0], self.input_dim
|
||||
X, X2, dvardLdK = param_to_array(X, X2, dvardLdK)
|
||||
X, X2, dvardLdK, var_len3 = param_to_array(X, X2, dvardLdK, var_len3)
|
||||
weave.inline(code, arg_names=['num_data', 'num_inducing', 'input_dim', 'X', 'X2', 'target', 'dvardLdK', 'var_len3'], type_converters=weave.converters.blitz, **self.weave_options)
|
||||
return target
|
||||
|
||||
|
|
|
|||
|
|
@ -72,9 +72,10 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
return 0.5 * (var_mean + var_S) - 0.5 * self.input_dim * self.num_data
|
||||
|
||||
def parameters_changed(self):
|
||||
super(BayesianGPLVM, self).parameters_changed()
|
||||
self._log_marginal_likelihood -= self.KL_divergence()
|
||||
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.X_variance, self.Z, self.likelihood, self.Y)
|
||||
self._update_gradients_Z(add=False)
|
||||
|
||||
self._log_marginal_likelihood -= self.KL_divergence()
|
||||
dL_dmu, dL_dS = self.dL_dmuS()
|
||||
|
||||
# dL:
|
||||
|
|
@ -161,6 +162,38 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
|
||||
return dim_reduction_plots.plot_steepest_gradient_map(self,*args,**kwargs)
|
||||
|
||||
class BayesianGPLVMWithMissingData(BayesianGPLVM):
|
||||
def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10,
|
||||
Z=None, kernel=None, inference_method=None, likelihood=None, name='bayesian gplvm', **kwargs):
|
||||
from ..util.subarray_and_sorting import common_subarrays
|
||||
self.subarrays = common_subarrays(Y)
|
||||
import ipdb;ipdb.set_trace()
|
||||
BayesianGPLVM.__init__(self, Y, input_dim, X=X, X_variance=X_variance, init=init, num_inducing=num_inducing, Z=Z, kernel=kernel, inference_method=inference_method, likelihood=likelihood, name=name, **kwargs)
|
||||
|
||||
|
||||
def parameters_changed(self):
|
||||
super(BayesianGPLVM, self).parameters_changed()
|
||||
self._log_marginal_likelihood -= self.KL_divergence()
|
||||
|
||||
dL_dmu, dL_dS = self.dL_dmuS()
|
||||
|
||||
# dL:
|
||||
self.q.mean.gradient = dL_dmu
|
||||
self.q.variance.gradient = dL_dS
|
||||
|
||||
# dKL:
|
||||
self.q.mean.gradient -= self.X
|
||||
self.q.variance.gradient -= (1. - (1. / (self.X_variance))) * 0.5
|
||||
|
||||
if __name__ == '__main__':
|
||||
import numpy as np
|
||||
X = np.random.randn(20,2)
|
||||
W = np.linspace(0,1,10)[None,:]
|
||||
Y = (X*W).sum(1)
|
||||
missing = np.random.binomial(1,.1,size=Y.shape)
|
||||
|
||||
pass
|
||||
|
||||
def latent_cost_and_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2):
|
||||
"""
|
||||
objective function for fitting the latent variables for test points
|
||||
|
|
|
|||
60
GPy/testing/index_operations_tests.py
Normal file
60
GPy/testing/index_operations_tests.py
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
'''
|
||||
Created on 12 Feb 2014
|
||||
|
||||
@author: maxz
|
||||
'''
|
||||
import unittest
|
||||
import numpy as np
|
||||
from GPy.core.parameterization.index_operations import ParameterIndexOperations,\
|
||||
ParameterIndexOperationsView
|
||||
|
||||
one, two, three = 'one', 'two', 'three'
|
||||
|
||||
class Test(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.param_index = ParameterIndexOperations()
|
||||
self.param_index.add(one, [3])
|
||||
self.param_index.add(two, [0,5])
|
||||
self.param_index.add(three, [2,4,7])
|
||||
|
||||
def test_remove(self):
|
||||
self.param_index.remove(three, np.r_[3:10])
|
||||
self.assertListEqual(self.param_index[three].tolist(), [2])
|
||||
self.param_index.remove(one, [1])
|
||||
self.assertListEqual(self.param_index[one].tolist(), [3])
|
||||
|
||||
def test_index_view(self):
|
||||
#=======================================================================
|
||||
# 0 1 2 3 4 5 6 7 8 9
|
||||
# one
|
||||
# two two
|
||||
# three three three
|
||||
# view: [0 1 2 3 4 5 ]
|
||||
#=======================================================================
|
||||
view = ParameterIndexOperationsView(self.param_index, 2, 6)
|
||||
self.assertSetEqual(set(view.properties()), set([one, two, three]))
|
||||
for v,p in zip(view.properties_for(np.r_[:6]), self.param_index.properties_for(np.r_[2:2+6])):
|
||||
self.assertEqual(v, p)
|
||||
self.assertSetEqual(set(view[two]), set([3]))
|
||||
self.assertSetEqual(set(self.param_index[two]), set([0, 5]))
|
||||
view.add(two, np.array([0]))
|
||||
self.assertSetEqual(set(view[two]), set([0,3]))
|
||||
self.assertSetEqual(set(self.param_index[two]), set([0, 2, 5]))
|
||||
view.clear()
|
||||
for v,p in zip(view.properties_for(np.r_[:6]), self.param_index.properties_for(np.r_[2:2+6])):
|
||||
self.assertEqual(v, p)
|
||||
self.assertEqual(v, [])
|
||||
param_index = ParameterIndexOperations()
|
||||
param_index.add(one, [3])
|
||||
param_index.add(two, [0,5])
|
||||
param_index.add(three, [2,4,7])
|
||||
view2 = ParameterIndexOperationsView(param_index, 2, 6)
|
||||
view.update(view2)
|
||||
for [i,v],[i2,v2] in zip(sorted(param_index.items()), sorted(self.param_index.items())):
|
||||
self.assertEqual(i, i2)
|
||||
self.assertTrue(np.all(v == v2))
|
||||
|
||||
if __name__ == "__main__":
|
||||
#import sys;sys.argv = ['', 'Test.test_index_view']
|
||||
unittest.main()
|
||||
|
|
@ -10,6 +10,7 @@ import datasets
|
|||
import mocap
|
||||
import decorators
|
||||
import classification
|
||||
import subarray_and_sorting
|
||||
import caching
|
||||
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class Cacher(object):
|
|||
return self.cached_outputs[-1]
|
||||
|
||||
def on_cache_changed(self, X):
|
||||
print id(X)
|
||||
#print id(X)
|
||||
i = self.cached_inputs.index(X)
|
||||
self.inputs_changed[i] = True
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue