Merge branch 'devel' of github.com:SheffieldML/GPy into devel

This commit is contained in:
Max Zwiessele 2014-11-03 15:12:28 +00:00
commit 5ab16915a9
16 changed files with 295 additions and 421 deletions

View file

@ -94,6 +94,9 @@ class VariationalPosterior(Parameterized):
if self.has_uncertain_inputs():
assert self.variance.shape == self.mean.shape, "need one variance per sample and dimenion"
def set_gradients(self, grad):
self.mean.gradient, self.variance.gradient = grad
def _raveled_index(self):
index = np.empty(dtype=int, shape=0)
size = 0
@ -158,6 +161,9 @@ class SpikeAndSlabPosterior(VariationalPosterior):
self.gamma = Param("binary_prob",binary_prob, Logistic(1e-10,1.-1e-10))
self.link_parameter(self.gamma)
def set_gradients(self, grad):
self.mean.gradient, self.variance.gradient, self.gamma.gradient = grad
def __getitem__(self, s):
if isinstance(s, (int, slice, tuple, list, np.ndarray)):
import copy

View file

@ -0,0 +1,128 @@
"""
"""
import numpy as np
from ...core import Model
from ...core.parameterization import variational
def infer_newX(model, Y_new, optimize=True, init='L2'):
"""
Infer the distribution of X for the new observed data *Y_new*.
:param model: the GPy model used in inference
:type model: GPy.core.Model
:param Y_new: the new observed data for inference
:type Y_new: numpy.ndarray
:param optimize: whether to optimize the location of new X (True by default)
:type optimize: boolean
:return: a tuple containing the estimated posterior distribution of X and the model that optimize X
:rtype: (GPy.core.parameterization.variational.VariationalPosterior, GPy.core.Model)
"""
infr_m = InferenceX(model, Y_new, init=init)
if optimize:
infr_m.optimize()
return infr_m.X, infr_m
class InferenceX(Model):
"""
The class for inference of new X with given new Y. (do_test_latent)
:param model: the GPy model used in inference
:type model: GPy.core.Model
:param Y: the new observed data for inference
:type Y: numpy.ndarray
"""
def __init__(self, model, Y, name='inferenceX', init='L2'):
if np.isnan(Y).any():
assert Y.shape[0]==1, "The current implementation of inference X only support one data point at a time with missing data!"
self.missing_data = True
self.valid_dim = np.logical_not(np.isnan(Y[0]))
else:
self.missing_data = False
super(InferenceX, self).__init__(name)
self.likelihood = model.likelihood.copy()
self.kern = model.kern.copy()
if model.kern.useGPU:
from ...models import SSGPLVM
if isinstance(model, SSGPLVM):
self.kern.GPU_SSRBF(True)
else:
self.kern.GPU(True)
from copy import deepcopy
self.posterior = deepcopy(model.posterior)
self.variational_prior = model.variational_prior.copy()
self.Z = model.Z.copy()
self.Y = Y
self.X = self._init_X(model, Y, init=init)
self.compute_dL()
self.link_parameter(self.X)
def _init_X(self, model, Y_new, init='L2'):
# Initialize the new X by finding the nearest point in Y space.
Y = model.Y
if self.missing_data:
Y = Y[:,self.valid_dim]
Y_new = Y_new[:,self.valid_dim]
dist = -2.*Y_new.dot(Y.T) + np.square(Y_new).sum(axis=1)[:,None]+ np.square(Y).sum(axis=1)[None,:]
else:
if init=='L2':
dist = -2.*Y_new.dot(Y.T) + np.square(Y_new).sum(axis=1)[:,None]+ np.square(Y).sum(axis=1)[None,:]
elif init=='NCC':
dist = Y_new.dot(Y.T)
idx = dist.argmin(axis=1)
from ...models import SSGPLVM
from ...util.misc import param_to_array
if isinstance(model, SSGPLVM):
X = variational.SpikeAndSlabPosterior(param_to_array(model.X.mean[idx]), param_to_array(model.X.variance[idx]), param_to_array(model.X.gamma[idx]))
if model.group_spike:
X.gamma.fix()
else:
X = variational.NormalPosterior(param_to_array(model.X.mean[idx]), param_to_array(model.X.variance[idx]))
return X
def compute_dL(self):
# Common computation
beta = 1./np.fmax(self.likelihood.variance, 1e-6)
output_dim = self.Y.shape[-1]
wv = self.posterior.woodbury_vector
if self.missing_data:
wv = wv[:,self.valid_dim]
output_dim = self.valid_dim.sum()
self.dL_dpsi2 = beta*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))/2.
self.dL_dpsi1 = beta*np.dot(self.Y[:,self.valid_dim], wv.T)
self.dL_dpsi0 = - beta/2.* np.ones(self.Y.shape[0])
else:
self.dL_dpsi2 = beta*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))/2.
self.dL_dpsi1 = beta*np.dot(self.Y, wv.T)
self.dL_dpsi0 = -beta/2.* np.ones(self.Y.shape[0]) #self.dL_dpsi0[:] = 0
def parameters_changed(self):
psi0 = self.kern.psi0(self.Z, self.X)
psi1 = self.kern.psi1(self.Z, self.X)
psi2 = self.kern.psi2(self.Z, self.X)
self._log_marginal_likelihood = (self.dL_dpsi2*psi2).sum()+(self.dL_dpsi1*psi1).sum()+(self.dL_dpsi0*psi0).sum()
X_grad = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.dL_dpsi0, dL_dpsi1=self.dL_dpsi1, dL_dpsi2=self.dL_dpsi2)
self.X.set_gradients(X_grad)
from ...core.parameterization.variational import SpikeAndSlabPrior
if isinstance(self.variational_prior, SpikeAndSlabPrior):
# Update Log-likelihood
KL_div = self.variational_prior.KL_divergence(self.X, N=self.Y.shape[0])
# update for the KL divergence
self.variational_prior.update_gradients_KL(self.X, N=self.Y.shape[0])
else:
# Update Log-likelihood
KL_div = self.variational_prior.KL_divergence(self.X)
# update for the KL divergence
self.variational_prior.update_gradients_KL(self.X)
self._log_marginal_likelihood += -KL_div
def log_likelihood(self):
return self._log_marginal_likelihood

View file

@ -167,7 +167,7 @@ class VarDTC(LatentFunctionInference):
woodbury_vector = Cpsi1Vf # == Cpsi1V
else:
print 'foobar'
stop
import ipdb; ipdb.set_trace()
psi1V = np.dot(Y.T*beta, psi1).T
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
tmp, _ = dpotrs(LB, tmp, lower=1)

View file

@ -1,47 +0,0 @@
# Copyright (c) 2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import sympy as sym
from GPy.util.symbolic import gammaln, normcdfln, normcdf, IndMatrix, create_matrix
import numpy as np
import link_functions
from symbolic import Symbolic
from scipy import stats
class Ordinal(Symbolic):
"""
Ordinal
.. math::
p(y_{i}|\pi(f_{i})) = \left(\frac{r}{r+f_i}\right)^r \frac{\Gamma(r+y_i)}{y!\Gamma(r)}\left(\frac{f_i}{r+f_i}\right)^{y_i}
.. Note::
Y takes non zero integer values..
link function should have a positive domain, e.g. log (default).
.. See also::
symbolic.py, for the parent class
"""
def __init__(self, categories=3, gp_link=None):
if gp_link is None:
gp_link = link_functions.Identity()
dispersion = sym.Symbol('width', positive=True, real=True)
y_0 = sym.Symbol('y_0', nonnegative=True, integer=True)
f_0 = sym.Symbol('f_0', positive=True, real=True)
log_pdf = create_matrix('log_pdf', 1, categories)
log_pdf[0] = normcdfln(-f_0)
if categories>2:
w = create_matrix('w', 1, categories)
log_pdf[categories-1] = normcdfln(w.sum() + f_0)
for i in range(1, categories-1):
log_pdf[i] = sym.log(normcdf(w[0, 0:i-1].sum() + f_0) - normcdf(w[0, 0:i].sum()-f_0) )
else:
log_pdf[1] = normcdfln(f_0)
log_pdf.index_var = y_0
super(Ordinal, self).__init__(log_pdf=log_pdf, gp_link=gp_link, name='Ordinal')
# TODO: Check this.
self.log_concave = True

View file

@ -141,6 +141,22 @@ class BayesianGPLVM(SparseGP_MPI):
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
def infer_newX(self, Y_new, optimize=True, ):
"""
Infer the distribution of X for the new observed data *Y_new*.
:param model: the GPy model used in inference
:type model: GPy.core.Model
:param Y_new: the new observed data for inference
:type Y_new: numpy.ndarray
:param optimize: whether to optimize the location of new X (True by default)
:type optimize: boolean
:return: a tuple containing the estimated posterior distribution of X and the model that optimize X
:rtype: (GPy.core.parameterization.variational.VariationalPosterior, GPy.core.Model)
"""
from ..inference.latent_function_inference.inferenceX import infer_newX
return infer_newX(self, Y_new, optimize=optimize)
def do_test_latents(self, Y):
"""

View file

@ -0,0 +1,71 @@
"""
The test cases for various inference algorithms
"""
import unittest, itertools
import numpy as np
import GPy
class InferenceXTestCase(unittest.TestCase):
def genData(self):
D1,D2,N = 12,12,50
np.random.seed(1234)
x = np.linspace(0, 4 * np.pi, N)[:, None]
s1 = np.vectorize(lambda x: np.sin(x))
s2 = np.vectorize(lambda x: np.cos(x)**2)
s3 = np.vectorize(lambda x:-np.exp(-np.cos(2 * x)))
sS = np.vectorize(lambda x: np.cos(x))
s1 = s1(x)
s2 = s2(x)
s3 = s3(x)
sS = sS(x)
s1 -= s1.mean(); s1 /= s1.std(0)
s2 -= s2.mean(); s2 /= s2.std(0)
s3 -= s3.mean(); s3 /= s3.std(0)
sS -= sS.mean(); sS /= sS.std(0)
S1 = np.hstack([s1, sS])
S2 = np.hstack([s3, sS])
P1 = np.random.randn(S1.shape[1], D1)
P2 = np.random.randn(S2.shape[1], D2)
Y1 = S1.dot(P1)
Y2 = S2.dot(P2)
Y1 += .01 * np.random.randn(*Y1.shape)
Y2 += .01 * np.random.randn(*Y2.shape)
Y1 -= Y1.mean(0)
Y2 -= Y2.mean(0)
Y1 /= Y1.std(0)
Y2 /= Y2.std(0)
slist = [s1, s2, s3, sS]
slist_names = ["s1", "s2", "s3", "sS"]
Ylist = [Y1, Y2]
return Ylist
def test_inferenceX_BGPLVM(self):
Ys = self.genData()
m = GPy.models.BayesianGPLVM(Ys[0],5,kernel=GPy.kern.Linear(5,ARD=True))
x,mi = m.infer_newX(m.Y, optimize=False)
self.assertTrue(mi.checkgrad())
m.optimize(max_iters=10000)
x,mi = m.infer_newX(m.Y)
self.assertTrue(np.allclose(m.X.mean, mi.X.mean))
self.assertTrue(np.allclose(m.X.variance, mi.X.variance))
if __name__ == "__main__":
unittest.main()

View file

@ -4,11 +4,10 @@ The module of tools for parallelization (MPI)
try:
from mpi4py import MPI
def get_id_within_node(comm=MPI.COMM_WORLD):
rank = comm.rank
nodename = MPI.Get_processor_name()
nodelist = comm.allgather(nodename)
return len([i for i in nodelist[:rank] if i==nodename])
except:
pass
def get_id_within_node(comm=MPI.COMM_WORLD):
rank = comm.rank
nodename = MPI.Get_processor_name()
nodelist = comm.allgather(nodename)
return len([i for i in nodelist[:rank] if i==nodename])