mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 19:42:39 +02:00
sparse GP now working nicely
This commit is contained in:
parent
e0dbfbe148
commit
9f40ab0f83
3 changed files with 24 additions and 12 deletions
|
|
@ -14,7 +14,7 @@ class Posterior(object):
|
|||
schemes and the model classes.
|
||||
|
||||
"""
|
||||
def __init__(self, woodbury_chol=None, woodbury_vector=None, K=None, mean=None, cov=None, K_chol=None):
|
||||
def __init__(self, woodbury_chol=None, woodbury_vector=None, K=None, mean=None, cov=None, K_chol=None, woodbury_inv=None):
|
||||
"""
|
||||
woodbury_chol : a lower triangular matrix L that satisfies posterior_covariance = K - K L^{-T} L^{-1} K
|
||||
woodbury_vector : a matrix (or vector, as Nx1 matrix) M which satisfies posterior_mean = K M
|
||||
|
|
@ -45,7 +45,9 @@ class Posterior(object):
|
|||
#obligatory
|
||||
self._K = K
|
||||
|
||||
if ((woodbury_chol is not None) and (woodbury_vector is not None) and (K is not None)) or ((mean is not None) and (cov is not None) and (K is not None)):
|
||||
if ((woodbury_chol is not None) and (woodbury_vector is not None))\
|
||||
or ((woodbury_inv is not None) and (woodbury_vector is not None))\
|
||||
or ((mean is not None) and (cov is not None)):
|
||||
pass # we have sufficient to compute the posterior
|
||||
else:
|
||||
raise ValueError, "insufficient information to compute the posterior"
|
||||
|
|
@ -56,13 +58,16 @@ class Posterior(object):
|
|||
self._woodbury_chol = woodbury_chol
|
||||
self._woodbury_vector = woodbury_vector
|
||||
|
||||
#option 2.
|
||||
self._woodbury_inv = woodbury_inv
|
||||
#and woodbury vector
|
||||
|
||||
#option 2:
|
||||
self._mean = mean
|
||||
self._covariance = cov
|
||||
|
||||
#compute this lazily
|
||||
self._precision = None
|
||||
self._woodbury_inv = None
|
||||
|
||||
@property
|
||||
def mean(self):
|
||||
|
|
@ -99,7 +104,6 @@ class Posterior(object):
|
|||
symmetrify(self._woodbury_inv)
|
||||
return self._woodbury_inv
|
||||
|
||||
|
||||
@property
|
||||
def woodbury_vector(self):
|
||||
if self._woodbury_vector is None:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from posterior import Posterior
|
||||
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs
|
||||
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dpotri, symmetrify
|
||||
import numpy as np
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
|
|
@ -163,7 +163,7 @@ class VarDTC(object):
|
|||
else:
|
||||
lik_1 = -0.5 * num_data * output_dim * (np.log(2.*np.pi) - np.log(beta)) - 0.5 * beta * trYYT
|
||||
lik_2 = -0.5 * output_dim * (np.sum(beta * psi0) - np.trace(A))
|
||||
lik_3 = -output_dim * (np.sum(np.log(np.diag(LB)))) # + 0.5 * num_inducing * np.log(sf2))
|
||||
lik_3 = -output_dim * (np.sum(np.log(np.diag(LB))))
|
||||
lik_4 = 0.5 * data_fit
|
||||
log_marginal = lik_1 + lik_2 + lik_3 + lik_4
|
||||
|
||||
|
|
@ -178,6 +178,7 @@ class VarDTC(object):
|
|||
kern.update_gradients_sparse(X=X, Z=Z, **grad_dict)
|
||||
|
||||
#get sufficient things for posterior prediction
|
||||
#TODO: do we really want to do this in the loop?
|
||||
if VVT_factor.shape[1] == Y.shape[1]:
|
||||
woodbury_vector = Cpsi1Vf # == Cpsi1V
|
||||
else:
|
||||
|
|
@ -185,11 +186,13 @@ class VarDTC(object):
|
|||
tmp, _ = dtrtrs(Lm, np.asfortranarray(psi1V), lower=1, trans=0)
|
||||
tmp, _ = dpotrs(LB, tmp, lower=1)
|
||||
woodbury_vector, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
|
||||
#TODO: totally wrong, fix.
|
||||
woodbury_chol = np.eye(num_inducing)
|
||||
Bi, _ = dpotri(LB, lower=0)
|
||||
symmetrify(Bi)
|
||||
woodbury_inv = backsub_both_sides(Lm, np.eye(num_inducing) - Bi)
|
||||
|
||||
|
||||
#construct a posterior object
|
||||
post = Posterior(woodbury_chol=woodbury_chol, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
|
||||
post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
|
||||
|
||||
return post, log_marginal, grad_dict
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import pylab as pb
|
|||
import numpy as np
|
||||
import Tango
|
||||
from base_plots import gpplot, x_frame1D, x_frame2D
|
||||
from ...util.misc import param_to_array
|
||||
|
||||
|
||||
def plot_fit(model, plot_limits=None, which_data_rows='all',
|
||||
|
|
@ -95,8 +96,11 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
|||
|
||||
#add inducing inputs (if a sparse model is used)
|
||||
if hasattr(model,"Z"):
|
||||
Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims]
|
||||
ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12)
|
||||
#Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims]
|
||||
Zu = param_to_array(model.Z[:,free_dims])
|
||||
z_height = ax.get_ylim()[0]
|
||||
ax.plot(Zu, np.zeros_like(Zu) + z_height, 'r|', mew=1.5, markersize=12)
|
||||
|
||||
|
||||
#add error bars for uncertain (if input uncertainty is being modelled)
|
||||
if hasattr(model,"has_uncertain_inputs"):
|
||||
|
|
@ -144,7 +148,8 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
|||
|
||||
#add inducing inputs (if a sparse model is used)
|
||||
if hasattr(model,"Z"):
|
||||
Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims]
|
||||
#Zu = model.Z[:,free_dims] * model._Xscale[:,free_dims] + model._Xoffset[:,free_dims]
|
||||
Zu = model.Z[:,free_dims]
|
||||
ax.plot(Zu[:,free_dims[0]], Zu[:,free_dims[1]], 'wo')
|
||||
|
||||
else:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue