Merge branch 'devel' of github.com:sheffieldml/GPy into devel

This commit is contained in:
James Hensman 2014-10-29 20:28:33 +00:00
commit 91c409bac0
9 changed files with 43 additions and 218 deletions

View file

@ -25,3 +25,15 @@ from core.parameterization import Param, Parameterized, ObsAr
@nottest @nottest
def tests(): def tests():
Tester(testing).test(verbose=10) Tester(testing).test(verbose=10)
def load(file_path):
"""
Load a previously pickled model, using `m.pickle('path/to/file.pickle)'
:param file_name: path/to/file.pickle
"""
import cPickle as pickle
with open(file_path, 'rb') as f:
m = pickle.load(f)
return m

View file

@ -1,7 +1,7 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
__updated__ = '2014-10-22' __updated__ = '2014-10-29'
import numpy as np import numpy as np
from parameter_core import Observable, Pickleable from parameter_core import Observable, Pickleable
@ -17,7 +17,7 @@ class ObsAr(np.ndarray, Pickleable, Observable):
def __new__(cls, input_array, *a, **kw): def __new__(cls, input_array, *a, **kw):
# allways make a copy of input paramters, as we need it to be in C order: # allways make a copy of input paramters, as we need it to be in C order:
if not isinstance(input_array, ObsAr): if not isinstance(input_array, ObsAr):
obj = np.atleast_1d(np.require(np.copy(input_array), dtype=np.float64, requirements=['W', 'C'])).view(cls) obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['W', 'C'])).view(cls)
else: obj = input_array else: obj = input_array
super(ObsAr, obj).__init__(*a, **kw) super(ObsAr, obj).__init__(*a, **kw)
return obj return obj

View file

@ -18,7 +18,7 @@ import numpy as np
import re import re
import logging import logging
__updated__ = '2014-10-22' __updated__ = '2014-10-28'
class HierarchyError(Exception): class HierarchyError(Exception):
""" """
@ -99,7 +99,7 @@ class Observable(object):
:param bool trigger_parent: Whether to trigger the parent, after self has updated :param bool trigger_parent: Whether to trigger the parent, after self has updated
""" """
if not self.update_model() or self._in_init_: if not self.update_model() or (hasattr(self, "_in_init_") and self._in_init_):
#print "Warning: updates are off, updating the model will do nothing" #print "Warning: updates are off, updating the model will do nothing"
return return
self._trigger_params_changed(trigger_parent) self._trigger_params_changed(trigger_parent)

View file

@ -346,9 +346,11 @@ def optimize(m, maxiter=1000):
mu = np.dot(Kx.T, self.posterior.woodbury_vector) mu = np.dot(Kx.T, self.posterior.woodbury_vector)
if full_cov: if full_cov:
Kxx = kern.K(Xnew) Kxx = kern.K(Xnew)
var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx)) if self.posterior.woodbury_inv.ndim == 2:
#var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2) var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
var = var.squeeze() elif self.posterior.woodbury_inv.ndim == 3:
var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2)
var = var
else: else:
Kxx = kern.Kdiag(Xnew) Kxx = kern.Kdiag(Xnew)
var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T

View file

@ -152,7 +152,7 @@ class Laplace(LatentFunctionInference):
Ki_W_i = K - C.T.dot(C) Ki_W_i = K - C.T.dot(C)
#compute the log marginal #compute the log marginal
log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata).sum() - np.sum(np.log(np.diag(L))) log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - np.sum(np.log(np.diag(L)))
# Compute matrices for derivatives # Compute matrices for derivatives
dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat

View file

@ -182,204 +182,6 @@ class VarDTC(LatentFunctionInference):
post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm) post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
return post, log_marginal, grad_dict return post, log_marginal, grad_dict
class VarDTCMissingData(LatentFunctionInference):
const_jitter = 1e-10
def __init__(self, limit=1, inan=None):
from ...util.caching import Cacher
self._Y = Cacher(self._subarray_computations, limit)
if inan is not None: self._inan = ~inan
else: self._inan = None
pass
def set_limit(self, limit):
self._Y.limit = limit
def __getstate__(self):
# has to be overridden, as Cacher objects cannot be pickled.
return self._Y.limit, self._inan
def __setstate__(self, state):
# has to be overridden, as Cacher objects cannot be pickled.
from ...util.caching import Cacher
self.limit = state[0]
self._inan = state[1]
self._Y = Cacher(self._subarray_computations, self.limit)
def _subarray_computations(self, Y):
if self._inan is None:
inan = np.isnan(Y)
has_none = inan.any()
self._inan = ~inan
inan = self._inan
else:
inan = self._inan
has_none = True
if has_none:
#print "caching missing data slices, this can take several minutes depending on the number of unique dimensions of the data..."
#csa = common_subarrays(inan, 1)
size = Y.shape[1]
#logger.info('preparing subarrays {:3.3%}'.format((i+1.)/size))
Ys = []
next_ten = [0.]
count = itertools.count()
for v, y in itertools.izip(inan.T, Y.T[:,:,None]):
i = count.next()
if ((i+1.)/size) >= next_ten[0]:
logger.info('preparing subarrays {:>6.1%}'.format((i+1.)/size))
next_ten[0] += .1
Ys.append(y[v,:])
next_ten = [0.]
count = itertools.count()
def trace(y):
i = count.next()
if ((i+1.)/size) >= next_ten[0]:
logger.info('preparing traces {:>6.1%}'.format((i+1.)/size))
next_ten[0] += .1
y = y[inan[:,i],i:i+1]
return np.einsum('ij,ij->', y,y)
traces = [trace(Y) for _ in xrange(size)]
return Ys, traces
else:
self._subarray_indices = [[slice(None),slice(None)]]
return [Y], [(Y**2).sum()]
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, Lm=None):
if isinstance(X, VariationalPosterior):
uncertain_inputs = True
psi0_all = kern.psi0(Z, X)
psi1_all = kern.psi1(Z, X)
else:
uncertain_inputs = False
psi0_all = kern.Kdiag(X)
psi1_all = kern.K(X, Z)
Ys, traces = self._Y(Y)
beta_all = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
het_noise = beta_all.size != 1
num_inducing = Z.shape[0]
dL_dpsi0_all = np.zeros(Y.shape[0])
dL_dpsi1_all = np.zeros((Y.shape[0], num_inducing))
if uncertain_inputs:
dL_dpsi2_all = np.zeros((Y.shape[0], num_inducing, num_inducing))
dL_dR = 0
woodbury_vector = np.zeros((num_inducing, Y.shape[1]))
woodbury_inv_all = np.zeros((num_inducing, num_inducing, Y.shape[1]))
dL_dKmm = 0
log_marginal = 0
Kmm = kern.K(Z).copy()
diag.add(Kmm, self.const_jitter)
#factor Kmm
if Lm is None:
Lm = jitchol(Kmm)
if uncertain_inputs: LmInv = dtrtri(Lm)
size = len(Ys)
next_ten = 0
for i, [y, v, trYYT] in enumerate(itertools.izip(Ys, self._inan.T, traces)):
if ((i+1.)/size) >= next_ten:
logger.info('inference {:> 6.1%}'.format((i+1.)/size))
next_ten += .1
if het_noise: beta = beta_all[i]
else: beta = beta_all
VVT_factor = (y*beta)
output_dim = 1#len(ind)
psi0 = psi0_all[v]
psi1 = psi1_all[v, :]
if uncertain_inputs:
psi2 = kern.psi2(Z, X[v, :])
else: psi2 = None
num_data = psi1.shape[0]
if uncertain_inputs:
if het_noise: psi2_beta = psi2 * (beta.flatten().reshape(num_data, 1, 1)).sum(0)
else: psi2_beta = psi2 * (beta)
A = LmInv.dot(psi2_beta.dot(LmInv.T))
else:
if het_noise: tmp = psi1 * (np.sqrt(beta.reshape(num_data, 1)))
else: tmp = psi1 * (np.sqrt(beta))
tmp, _ = dtrtrs(Lm, tmp.T, lower=1)
A = tdot(tmp) #print A.sum()
# factor B
B = np.eye(num_inducing) + A
LB = jitchol(B)
psi1Vf = psi1.T.dot(VVT_factor)
tmp, _ = dtrtrs(Lm, psi1Vf, lower=1, trans=0)
_LBi_Lmi_psi1Vf, _ = dtrtrs(LB, tmp, lower=1, trans=0)
tmp, _ = dtrtrs(LB, _LBi_Lmi_psi1Vf, lower=1, trans=1)
Cpsi1Vf, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
# data fit and derivative of L w.r.t. Kmm
delit = tdot(_LBi_Lmi_psi1Vf)
data_fit = np.trace(delit)
DBi_plus_BiPBi = backsub_both_sides(LB, output_dim * np.eye(num_inducing) + delit)
delit = -0.5 * DBi_plus_BiPBi
delit += -0.5 * B * output_dim
delit += output_dim * np.eye(num_inducing)
dL_dKmm += backsub_both_sides(Lm, delit)
# derivatives of L w.r.t. psi
dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm,
VVT_factor, Cpsi1Vf, DBi_plus_BiPBi,
psi1, het_noise, uncertain_inputs)
dL_dpsi0_all[v] += dL_dpsi0
dL_dpsi1_all[v, :] += dL_dpsi1
if uncertain_inputs:
dL_dpsi2_all[v, :, :] += dL_dpsi2
# log marginal likelihood
log_marginal += _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise,
psi0, A, LB, trYYT, data_fit, VVT_factor)
#put the gradients in the right places
dL_dR += _compute_dL_dR(likelihood,
het_noise, uncertain_inputs, LB,
_LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A,
psi0, psi1, beta,
data_fit, num_data, output_dim, trYYT, Y)
#if full_VVT_factor:
woodbury_vector[:, i:i+1] = Cpsi1Vf
#else:
# print 'foobar'
# tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
# tmp, _ = dpotrs(LB, tmp, lower=1)
# woodbury_vector[:, ind] = dtrtrs(Lm, tmp, lower=1, trans=1)[0]
#import ipdb;ipdb.set_trace()
Bi, _ = dpotri(LB, lower=1)
symmetrify(Bi)
Bi = -dpotri(LB, lower=1)[0]
diag.add(Bi, 1)
woodbury_inv_all[:, :, i:i+1] = backsub_both_sides(Lm, Bi)[:,:,None]
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
# gradients:
if uncertain_inputs:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dpsi0':dL_dpsi0_all,
'dL_dpsi1':dL_dpsi1_all,
'dL_dpsi2':dL_dpsi2_all,
'dL_dthetaL':dL_dthetaL}
else:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dKdiag':dL_dpsi0_all,
'dL_dKnm':dL_dpsi1_all,
'dL_dthetaL':dL_dthetaL}
post = Posterior(woodbury_inv=woodbury_inv_all, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
return post, log_marginal, grad_dict
def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs): def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, VVT_factor, Cpsi1Vf, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs):
dL_dpsi0 = -0.5 * output_dim * (beta[:,None] * np.ones([num_data, 1])).flatten() dL_dpsi0 = -0.5 * output_dim * (beta[:,None] * np.ones([num_data, 1])).flatten()
dL_dpsi1 = np.dot(VVT_factor, Cpsi1Vf.T) dL_dpsi1 = np.dot(VVT_factor, Cpsi1Vf.T)

View file

@ -21,6 +21,11 @@ class StochasticStorage(object):
""" """
pass pass
def reset(self):
"""
Reset the state of this stochastics generator.
"""
class SparseGPMissing(StochasticStorage): class SparseGPMissing(StochasticStorage):
def __init__(self, model, batchsize=1): def __init__(self, model, batchsize=1):
""" """
@ -36,18 +41,19 @@ class SparseGPStochastics(StochasticStorage):
and the indices corresponding to those and the indices corresponding to those
""" """
def __init__(self, model, batchsize=1): def __init__(self, model, batchsize=1):
import itertools
self.batchsize = batchsize self.batchsize = batchsize
if self.batchsize == 1: self.output_dim = model.Y.shape[1]
self.dimensions = itertools.cycle(range(model.Y_normalized.shape[1])) self.reset()
else:
import numpy as np
self.dimensions = lambda: np.random.choice(model.Y_normalized.shape[1], size=batchsize, replace=False)
self.d = None
self.do_stochastics() self.do_stochastics()
def do_stochastics(self): def do_stochastics(self):
if self.batchsize == 1: if self.batchsize == 1:
self.d = [self.dimensions.next()] self.current_dim = (self.current_dim+1)%self.output_dim
self.d = [self.current_dim]
else: else:
self.d = self.dimensions() import numpy as np
self.d = np.random.choice(self.output_dim, size=self.batchsize, replace=False)
def reset(self):
self.current_dim = -1
self.d = None

View file

@ -80,7 +80,10 @@ class Gaussian(Likelihood):
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None): def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
if full_cov: if full_cov:
var += np.eye(var.shape[0])*self.variance if var.ndim == 2:
var += np.eye(var.shape[0])*self.variance
if var.ndim == 3:
var += np.atleast_3d(np.eye(var.shape[0])*self.variance)
else: else:
var += self.variance var += self.variance
return mu, var return mu, var

View file

@ -162,7 +162,7 @@ def plot_latent(model, labels=None, which_indices=None,
else: else:
x = X[index, input_1] x = X[index, input_1]
y = X[index, input_2] y = X[index, input_2]
ax.scatter(x, y, marker=m, s=s, color=Tango.nextMedium(), label=this_label) ax.scatter(x, y, marker=m, s=s, c=Tango.nextMedium(), label=this_label, linewidth=.5, edgecolor='k', alpha=.9)
ax.set_xlabel('latent dimension %i' % input_1) ax.set_xlabel('latent dimension %i' % input_1)
ax.set_ylabel('latent dimension %i' % input_2) ax.set_ylabel('latent dimension %i' % input_2)
@ -175,7 +175,7 @@ def plot_latent(model, labels=None, which_indices=None,
if plot_inducing: if plot_inducing:
Z = model.Z Z = model.Z
ax.plot(Z[:, input_1], Z[:, input_2], '^w') ax.scatter(Z[:, input_1], Z[:, input_2], c='w', s=14, marker="^", edgecolor='k', linewidth=.3, alpha=.6)
ax.set_xlim((xmin, xmax)) ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax)) ax.set_ylim((ymin, ymax))