Merge branch 'params' of github.com:SheffieldML/GPy into params

This commit is contained in:
Max Zwiessele 2014-01-24 16:37:52 +00:00
commit a9e5513c3f
6 changed files with 40 additions and 54 deletions

View file

@ -154,7 +154,7 @@ class Param(ObservableArray, Constrainable):
def _parameters_(self):
return []
def _collect_gradient(self, target):
target[:] = self.gradient
target[:] = self.gradient.flat
#===========================================================================
# Fixing Parameters:
#===========================================================================

View file

@ -347,11 +347,11 @@ class kern(Parameterized):
def update_gradients_full(self, dL_dK, X):
[p.update_gradients_full(dL_dK, X) for p in self._parameters_]
pass
def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z):
pass
raise NotImplementedError
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
pass
raise NotImplementedError
def dK_dtheta(self, dL_dK, X, X2=None):
"""
@ -375,7 +375,7 @@ class kern(Parameterized):
return self._transform_gradients(target)
def dK_dX(self, dL_dK, X, X2=None):
def gradients_X(self, dL_dK, X, X2=None):
"""Compute the gradient of the objective function with respect to X.
:param dL_dK: An array of gradients of the objective function with respect to the covariance function.
@ -387,9 +387,9 @@ class kern(Parameterized):
target = np.zeros_like(X)
if X2 is None:
[p.dK_dX(dL_dK, X[:, i_s], None, target[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]
[p.gradients_X(dL_dK, X[:, i_s], None, target[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]
else:
[p.dK_dX(dL_dK, X[:, i_s], X2[:, i_s], target[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]
[p.gradients_X(dL_dK, X[:, i_s], X2[:, i_s], target[:, i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]
return target
def Kdiag(self, X, which_parts='all'):

View file

@ -16,17 +16,6 @@ class Bias(Kernpart):
super(Bias, self).__init__(input_dim, name)
self.variance = Param("variance", variance)
self.add_parameter(self.variance)
#self._set_params(np.array([variance]).flatten())
# def _get_params(self):
# return self.variance
#
# def _set_params(self,x):
# assert x.shape==(1,)
# self.variance = x
#
# def _get_param_names(self):
# return ['variance']
def K(self,X,X2,target):
target += self.variance
@ -34,18 +23,21 @@ class Bias(Kernpart):
def Kdiag(self,X,target):
target += self.variance
def dK_dtheta(self,dL_dKdiag,X,X2,target):
target += dL_dKdiag.sum()
#def dK_dtheta(self,dL_dKdiag,X,X2,target):
#target += dL_dKdiag.sum()
def update_gradients_full(self, dL_dK, X):
self.variance.gradient = dL_dK.sum()
def dKdiag_dtheta(self,dL_dKdiag,X,target):
target += dL_dKdiag.sum()
def dK_dX(self, dL_dK,X, X2, target):
def gradients_X(self, dL_dK,X, X2, target):
pass
def dKdiag_dX(self,dL_dKdiag,X,target):
pass
#---------------------------------------#
# PSI statistics #
#---------------------------------------#

View file

@ -161,7 +161,7 @@ class RBF(Kernpart):
else:
self.lengthscale.gradient += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK)
def _gradients_X(self, dL_dK, X, X2, target):
def gradients_X(self, dL_dK, X, X2, target):
#if self._X is None or X.base is not self._X.base or X2 is not None:
self._K_computations(X, X2)
if X2 is None:
@ -260,7 +260,7 @@ class RBF(Kernpart):
}
"""
num_data, num_inducing, input_dim = X.shape[0], X.shape[0], self.input_dim
X = param_to_array(X)
X, dvardLdK = param_to_array(X, dvardLdK)
weave.inline(code, arg_names=['num_data', 'num_inducing', 'input_dim', 'X', 'target', 'dvardLdK', 'var_len3'], type_converters=weave.converters.blitz, **self.weave_options)
else:
code = """
@ -277,7 +277,7 @@ class RBF(Kernpart):
}
"""
num_data, num_inducing, input_dim = X.shape[0], X2.shape[0], self.input_dim
X, X2 = param_to_array(X, X2)
X, X2, dvardLdK = param_to_array(X, X2, dvardLdK)
weave.inline(code, arg_names=['num_data', 'num_inducing', 'input_dim', 'X', 'X2', 'target', 'dvardLdK', 'var_len3'], type_converters=weave.converters.blitz, **self.weave_options)
return target

View file

@ -15,61 +15,54 @@ class GPLVM(GP):
"""
Gaussian Process Latent Variable Model
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
def __init__(self, Y, input_dim, init='PCA', X=None, kernel=None, normalize_Y=False, name="gplvm"):
"""
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
if X is None:
X = self.initialise_latent(init, input_dim, Y)
if kernel is None:
kernel = kern.rbf(input_dim, ARD=input_dim > 1) + kern.bias(input_dim, np.exp(-2))
likelihood = Gaussian(Y, normalize=normalize_Y, variance=np.exp(-2.))
GP.__init__(self, X, likelihood, kernel, normalize_X=False, name=name)
self.X = Param('q_mean', self.X)
self.add_parameter(self.X, gradient=self.dK_dX, index=0)
self.ensure_default_constraints()
likelihood = Gaussian()
super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM')
self.X = Param('X', X)
self.add_parameter(self.X, index=0)
def initialise_latent(self, init, input_dim, Y):
Xr = np.random.randn(Y.shape[0], input_dim)
if init == 'PCA':
PC = PCA(Y, input_dim)[0]
Xr[:PC.shape[0], :PC.shape[1]] = PC
else:
raise NotImplementedError
return Xr
def parameters_changed(self):
GP.parameters_changed(self)
self.X.gradient = self.kern.gradients_X(self.posterior.dL_dK, self.X)
def _getstate(self):
return GP._getstate(self)
def _setstate(self, state):
GP._setstate(self, state)
# def _get_param_names(self):
# return sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], []) + GP._get_param_names(self)
#
# def _get_params(self):
# return np.hstack((self.X.flatten(), GP._get_params(self)))
#
# def _set_params(self, x):
# self.X = x[:self.num_data * self.input_dim].reshape(self.num_data, self.input_dim).copy()
# GP._set_params(self, x[self.X.size:])
def dK_dX(self):
return self.kern.dK_dX(self.dL_dK, self.X)
# def _log_likelihood_gradients(self):
# dL_dX = self.kern.dK_dX(self.dL_dK, self.X)
#
# return np.hstack((dL_dX.flatten(), GP._log_likelihood_gradients(self)))
def jacobian(self,X):
target = np.zeros((X.shape[0],X.shape[1],self.output_dim))
for i in range(self.output_dim):
target[:,:,i]=self.kern.dK_dX(np.dot(self.Ki,self.likelihood.Y[:,i])[None, :],X,self.X)
return target
def magnification(self,X):
target=np.zeros(X.shape[0])
#J = np.zeros((X.shape[0],X.shape[1],self.output_dim))

View file

@ -2,6 +2,7 @@ import pylab as pb
import numpy as np
from .. import util
from GPy.util.latent_space_visualizations.controllers.imshow_controller import ImshowController
from misc import param_to_array
import itertools
def most_significant_input_dimensions(model, which_indices):
@ -74,7 +75,7 @@ def plot_latent(model, labels=None, which_indices=None,
index = np.nonzero(labels == ul)[0]
if model.input_dim == 1:
x = model.X[index, input_1]
x = param_to_array(model.X)[index, input_1]
y = np.zeros(index.size)
else:
x = model.X[index, input_1]