merge conflict

This commit is contained in:
Max Zwiessele 2014-03-17 17:03:36 +00:00
commit 3b42bd4def
14 changed files with 109 additions and 55 deletions

View file

@ -42,7 +42,10 @@ class GP(Model):
assert Y.shape[0] == self.num_data
_, self.output_dim = self.Y.shape
self.Y_metadata = Y_metadata or {}
if Y_metadata is None:
Y_metadata = {}
else:
self.Y_metadata = Y_metadata
assert isinstance(kernel, kern.Kern)
#assert self.input_dim == kernel.input_dim

View file

@ -3,6 +3,7 @@
from posterior import Posterior
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
from ...util import diag
import numpy as np
log_2_pi = np.log(2*np.pi)
@ -14,8 +15,7 @@ class FITC(object):
the posterior.
"""
def __init__(self):
self.const_jitter = 1e-6
const_jitter = 1e-6
def inference(self, kern, X, Z, likelihood, Y):
@ -33,6 +33,7 @@ class FITC(object):
U = Knm
#factor Kmm
diag.add(Kmm, self.const_jitter)
Kmmi, L, Li, _ = pdinv(Kmm)
#compute beta_star, the effective noise precision

View file

@ -65,7 +65,7 @@ class VarDTC(object):
_, output_dim = Y.shape
#see whether we've got a different noise variance for each datum
beta = 1./np.fmax(likelihood.variance, 1e-6)
beta = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
#self.YYTfactor = self.get_YYTfactor(Y)
#VVT_factor = self.get_VVTfactor(self.YYTfactor, beta)
@ -221,7 +221,7 @@ class VarDTCMissingData(object):
psi2_all = None
Ys, traces = self._Y(Y)
beta_all = 1./np.fmax(likelihood.variance, 1e-6)
beta_all = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
het_noise = beta_all.size != 1
import itertools
@ -328,18 +328,20 @@ class VarDTCMissingData(object):
diag.add(Bi, 1)
woodbury_inv_all[:, :, ind] = backsub_both_sides(Lm, Bi)[:,:,None]
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
# gradients:
if uncertain_inputs:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dpsi0':dL_dpsi0_all,
'dL_dpsi1':dL_dpsi1_all,
'dL_dpsi2':dL_dpsi2_all,
'dL_dR':dL_dR}
'dL_dthetaL':dL_dthetaL}
else:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dKdiag':dL_dpsi0_all,
'dL_dKnm':dL_dpsi1_all,
'dL_dR':dL_dR}
'dL_dthetaL':dL_dthetaL}
#get sufficient things for posterior prediction
#TODO: do we really want to do this in the loop?

View file

@ -15,21 +15,21 @@ class Stationary(Kern):
"""
Stationary kernels (covariance functions).
Stationary covariance fucntion depend only on r, where r is defined as
Stationary covariance fucntion depend only on r, where r is defined as
r = \sqrt{ \sum_{q=1}^Q (x_q - x'_q)^2 }
The covariance function k(x, x' can then be written k(r).
The covariance function k(x, x' can then be written k(r).
In this implementation, r is scaled by the lengthscales parameter(s):
r = \sqrt{ \sum_{q=1}^Q \frac{(x_q - x'_q)^2}{\ell_q^2} }.
r = \sqrt{ \sum_{q=1}^Q \frac{(x_q - x'_q)^2}{\ell_q^2} }.
By default, there's only one lengthscale: seaprate lengthscales for each
dimension can be enables by setting ARD=True.
dimension can be enables by setting ARD=True.
To implement a stationary covariance function using this class, one need
only define the covariance function k(r), and it derivative.
only define the covariance function k(r), and it derivative.
...
def K_of_r(self, r):
@ -37,10 +37,10 @@ class Stationary(Kern):
def dK_dr(self, r):
return bar
The lengthscale(s) and variance parameters are added to the structure automatically.
The lengthscale(s) and variance parameters are added to the structure automatically.
"""
def __init__(self, input_dim, variance, lengthscale, ARD, active_dims, name):
super(Stationary, self).__init__(input_dim, active_dims, name)
self.ARD = ARD
@ -57,7 +57,7 @@ class Stationary(Kern):
if lengthscale.size != input_dim:
lengthscale = np.ones(input_dim)*lengthscale
else:
lengthscale = np.ones(self.input_dim)
lengthscale = np.ones(self.input_dim)
self.lengthscale = Param('lengthscale', lengthscale, Logexp())
self.variance = Param('variance', variance, Logexp())
assert self.variance.size==1
@ -95,7 +95,9 @@ class Stationary(Kern):
#X2, = self._slice_X(X2)
X1sq = np.sum(np.square(X),1)
X2sq = np.sum(np.square(X2),1)
return np.sqrt(-2.*np.dot(X, X2.T) + (X1sq[:,None] + X2sq[None,:]))
r2 = -2.*np.dot(X, X2.T) + X1sq[:,None] + X2sq[None,:]
r2[r2<0] = 0. # A bit hacky
return np.sqrt(r2)
@Cache_this(limit=5, ignore_args=())
def _scaled_dist(self, X, X2=None):
@ -133,7 +135,7 @@ class Stationary(Kern):
if self.ARD:
#rinv = self._inv_dis# this is rather high memory? Should we loop instead?t(X, X2)
#d = X[:, None, :] - X2[None, :, :]
#x_xl3 = np.square(d)
#x_xl3 = np.square(d)
#self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum(0).sum(0)/self.lengthscale**3
tmp = dL_dr*self._inv_dist(X, X2)
if X2 is None: X2 = X
@ -247,7 +249,7 @@ class Matern52(Stationary):
.. math::
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r)
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r)
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Mat52'):
super(Matern52, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)

View file

@ -56,7 +56,7 @@ class Gaussian(Likelihood):
def update_gradients(self, grad):
self.variance.gradient = grad
def exact_inference_gradients(self, dL_dKdiag):
def exact_inference_gradients(self, dL_dKdiag,Y_metadata=None):
return dL_dKdiag.sum()
def _preprocess_values(self, Y):
@ -295,7 +295,7 @@ class Gaussian(Likelihood):
"""
return self.variance
def samples(self, gp):
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations based on a given value of the latent variable.
@ -303,6 +303,8 @@ class Gaussian(Likelihood):
"""
orig_shape = gp.shape
gp = gp.flatten()
#orig_shape = gp.shape
gp = gp.flatten()
Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp])
return Ysim.reshape(orig_shape)

View file

@ -142,7 +142,12 @@ class Likelihood(Parameterized):
"""
#conditional_mean: the edpected value of y given some f, under this likelihood
def int_mean(f,m,v):
return self.conditional_mean(f)*np.exp(-(0.5/v)*np.square(f - m))
p = np.exp(-(0.5/v)*np.square(f - m))
#If p is zero then conditional_mean will overflow
if p < 1e-10:
return 0.
else:
return self.conditional_mean(f)*p
scaled_mean = [quad(int_mean, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)]
mean = np.array(scaled_mean)[:,None] / np.sqrt(2*np.pi*(variance))
@ -165,7 +170,12 @@ class Likelihood(Parameterized):
# E( V(Y_star|f_star) )
def int_var(f,m,v):
return self.conditional_variance(f)*np.exp(-(0.5/v)*np.square(f - m))
p = np.exp(-(0.5/v)*np.square(f - m))
#If p is zero then conditional_variance will overflow
if p < 1e-10:
return 0.
else:
return self.conditional_variance(f)*p
scaled_exp_variance = [quad(int_var, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)]
exp_var = np.array(scaled_exp_variance)[:,None] / normalizer
@ -178,7 +188,13 @@ class Likelihood(Parameterized):
#E( E(Y_star|f_star)**2 )
def int_pred_mean_sq(f,m,v,predictive_mean_sq):
return self.conditional_mean(f)**2*np.exp(-(0.5/v)*np.square(f - m))
p = np.exp(-(0.5/v)*np.square(f - m))
#If p is zero then conditional_mean**2 will overflow
if p < 1e-10:
return 0.
else:
return self.conditional_mean(f)**2*p
scaled_exp_exp2 = [quad(int_pred_mean_sq, -np.inf, np.inf,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)]
exp_exp2 = np.array(scaled_exp_exp2)[:,None] / normalizer
@ -387,13 +403,14 @@ class Likelihood(Parameterized):
return pred_mean, pred_var
def predictive_quantiles(self, mu, var, quantiles, Y_metadata):
def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None):
#compute the quantiles by sampling!!!
N_samp = 1000
s = np.random.randn(mu.shape[0], N_samp)*np.sqrt(var) + mu
ss_f = s.flatten()
ss_y = self.samples(ss_f)
ss_y = ss_y.reshape(mu.shape[0], N_samp)
#ss_f = s.flatten()
#ss_y = self.samples(ss_f, Y_metadata)
ss_y = self.samples(s, Y_metadata)
#ss_y = ss_y.reshape(mu.shape[0], N_samp)
return [np.percentile(ss_y ,q, axis=1)[:,None] for q in quantiles]

View file

@ -6,6 +6,9 @@ from scipy import stats
import scipy as sp
from GPy.util.univariate_Gaussian import std_norm_pdf,std_norm_cdf,inv_std_norm_cdf
_exp_lim_val = np.finfo(np.float64).max
_lim_val = np.log(_exp_lim_val)
class GPTransformation(object):
"""
Link function class for doing non-Gaussian likelihoods approximation
@ -92,16 +95,16 @@ class Log(GPTransformation):
"""
def transf(self,f):
return np.exp(f)
return np.exp(np.clip(f, -_lim_val, _lim_val))
def dtransf_df(self,f):
return np.exp(f)
return np.exp(np.clip(f, -_lim_val, _lim_val))
def d2transf_df2(self,f):
return np.exp(f)
return np.exp(np.clip(f, -_lim_val, _lim_val))
def d3transf_df3(self,f):
return np.exp(f)
return np.exp(np.clip(f, -_lim_val, _lim_val))
class Log_ex_1(GPTransformation):
"""

View file

@ -23,22 +23,22 @@ class MixedNoise(Likelihood):
def exact_inference_gradients(self, dL_dKdiag, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index']
ind = Y_metadata['output_index'].flatten()
return np.array([dL_dKdiag[ind==i].sum() for i in range(len(self.likelihoods_list))])
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
if all([isinstance(l, Gaussian) for l in self.likelihoods_list]):
ind = Y_metadata['output_index']
ind = Y_metadata['output_index'].flatten()
_variance = np.array([self.likelihoods_list[j].variance for j in ind ])
if full_cov:
var += np.eye(var.shape[0])*_variance
d = 2*np.sqrt(np.diag(var))
low, up = mu - d, mu + d
#d = 2*np.sqrt(np.diag(var))
#low, up = mu - d, mu + d
else:
var += _variance
d = 2*np.sqrt(var)
low, up = mu - d, mu + d
return mu, var, low, up
#d = 2*np.sqrt(var)
#low, up = mu - d, mu + d
return mu, var#, low, up
else:
raise NotImplementedError
@ -52,8 +52,28 @@ class MixedNoise(Likelihood):
def covariance_matrix(self, Y, Y_metadata):
assert all([isinstance(l, Gaussian) for l in self.likelihoods_list])
ind = Y_metadata['output_index'].flatten()
variance = np.zeros(Y.shape[0])
for lik, ind in itertools.izip(self.likelihoods_list, self.likelihoods_indices):
variance[ind] = lik.variance
for lik, j in zip(self.likelihoods_list, range(len(self.likelihoods_list))):
variance[ind==j] = lik.variance
return np.diag(variance)
def samples(self, gp, Y_metadata):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
N1, N2 = gp.shape
Ysim = np.zeros((N1,N2))
ind = Y_metadata['output_index'].flatten()
for j in np.unique(ind):
flt = ind==j
gp_filtered = gp[flt,:]
n1 = gp_filtered.shape[0]
lik = self.likelihoods_list[j]
_ysim = np.array([np.random.normal(lik.gp_link.transf(gpj), scale=np.sqrt(lik.variance), size=1) for gpj in gp_filtered.flatten()])
Ysim[flt,:] = _ysim.reshape(n1,N2)
return Ysim

View file

@ -21,7 +21,7 @@ class Poisson(Likelihood):
"""
def __init__(self, gp_link=None):
if gp_link is None:
gp_link = link_functions.Log_ex_1()
gp_link = link_functions.Log()
super(Poisson, self).__init__(gp_link, name='Poisson')
@ -143,7 +143,7 @@ class Poisson(Likelihood):
"""
return self.gp_link.transf(gp)
def samples(self, gp):
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations based on a given value of the latent variable.

View file

@ -66,7 +66,7 @@ class BayesianGPLVM(SparseGP):
super(BayesianGPLVM, self).parameters_changed()
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict)
self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2'])
# update for the KL divergence
self.variational_prior.update_gradients_KL(self.X)

View file

@ -31,7 +31,7 @@ class GPCoregionalizedRegression(GP):
def __init__(self, X_list, Y_list, kernel=None, likelihoods_list=None, name='GPCR',W_rank=1,kernel_name='X'):
#Input and Output
X,Y,self.noise_index = util.multioutput.build_XY(X_list,Y_list)
X,Y,self.output_index = util.multioutput.build_XY(X_list,Y_list)
Ny = len(Y_list)
#Kernel
@ -39,6 +39,6 @@ class GPCoregionalizedRegression(GP):
kernel = util.multioutput.ICM(input_dim=X.shape[1]-1, num_outputs=Ny, kernel=GPy.kern.rbf(X.shape[1]-1), W_rank=1,name=kernel_name)
#Likelihood
likelihood = util.multioutput.build_likelihood(Y_list,self.noise_index,likelihoods_list)
likelihood = util.multioutput.build_likelihood(Y_list,self.output_index,likelihoods_list)
super(GPCoregionalizedRegression, self).__init__(X,Y,kernel,likelihood, noise_index=self.noise_index)
super(GPCoregionalizedRegression, self).__init__(X,Y,kernel,likelihood, Y_metadata={'output_index':self.output_index})

View file

@ -61,7 +61,7 @@ class SSGPLVM(SparseGP):
super(SSGPLVM, self).parameters_changed()
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
self.X.mean.gradient, self.X.variance.gradient, self.X.binary_prob.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, **self.grad_dict)
self.X.mean.gradient, self.X.variance.gradient, self.X.binary_prob.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2'])
# update for the KL divergence
self.variational_prior.update_gradients_KL(self.X)

View file

@ -6,6 +6,7 @@ import numpy as np
import Tango
from base_plots import gpplot, x_frame1D, x_frame2D
from ...util.misc import param_to_array
from ...models.gp_coregionalized_regression import GPCoregionalizedRegression
def plot_fit(model, plot_limits=None, which_data_rows='all',
@ -85,8 +86,9 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
lower = m - 2*np.sqrt(v)
upper = m + 2*np.sqrt(v)
else:
m, v = model.predict(Xgrid, full_cov=False, Y_metadata=Y_metadata)
lower, upper = model.predict_quantiles(Xgrid, Y_metadata=Y_metadata)
meta = {'output_index': Xgrid[:,-1:].astype(np.int)} if isinstance(model,GPCoregionalizedRegression) else None
m, v = model.predict(Xgrid, full_cov=False, Y_metadata=meta)
lower, upper = model.predict_quantiles(Xgrid, Y_metadata=meta)
for d in which_data_ycols:

View file

@ -35,7 +35,8 @@ def build_likelihood(Y_list,noise_index,likelihoods_list=None):
likelihoods_list = [GPy.likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for y,j in zip(Y_list,range(Ny))]
else:
assert len(likelihoods_list) == Ny
likelihood = GPy.likelihoods.mixed_noise.MixedNoise(likelihoods_list=likelihoods_list, noise_index=noise_index)
#likelihood = GPy.likelihoods.mixed_noise.MixedNoise(likelihoods_list=likelihoods_list, noise_index=noise_index)
likelihood = GPy.likelihoods.mixed_noise.MixedNoise(likelihoods_list=likelihoods_list)
return likelihood
@ -43,7 +44,7 @@ def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='X'):
"""
Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
@ -54,7 +55,8 @@ def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='X'):
kernel.input_dim = input_dim
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
K = kernel.prod(GPy.kern.Coregionalize([input_dim], num_outputs,W_rank,W,kappa,name='B'),name=name)
K = kernel.prod(GPy.kern.Coregionalize(1, num_outputs, active_dims=[input_dim], rank=W_rank,W=W,kappa=kappa,name='B'),name=name)
#K = kernel * GPy.kern.Coregionalize(1, num_outputs, active_dims=[input_dim], rank=W_rank,W=W,kappa=kappa,name='B')
#K = kernel ** GPy.kern.Coregionalize(input_dim, num_outputs,W_rank,W,kappa, name= 'B')
K['.*variance'] = 1.
K['.*variance'].fix()
@ -65,7 +67,7 @@ def LCM(input_dim, num_outputs, kernels_list, W_rank=1,name='X'):
"""
Builds a kernel for an Linear Coregionalization Model
:input_dim: Input dimensionality
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel