mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-27 22:06:22 +02:00
Worked out in terms of W, needs gradients implementing
This commit is contained in:
parent
46d59c94b2
commit
a9d5555976
3 changed files with 57 additions and 40 deletions
|
|
@ -15,13 +15,13 @@ def student_t_approx():
|
|||
Y = np.sin(X)
|
||||
|
||||
#Add student t random noise to datapoints
|
||||
deg_free = 2.5
|
||||
deg_free = 3.5
|
||||
t_rv = t(deg_free, loc=0, scale=1)
|
||||
noise = t_rv.rvs(size=Y.shape)
|
||||
Y += noise
|
||||
|
||||
#Add some extreme value noise to some of the datapoints
|
||||
#percent_corrupted = 0.05
|
||||
#percent_corrupted = 0.15
|
||||
#corrupted_datums = int(np.round(Y.shape[0] * percent_corrupted))
|
||||
#indices = np.arange(Y.shape[0])
|
||||
#np.random.shuffle(indices)
|
||||
|
|
@ -31,11 +31,11 @@ def student_t_approx():
|
|||
#Y[corrupted_indices] += noise
|
||||
|
||||
# Kernel object
|
||||
#print X.shape
|
||||
#kernel = GPy.kern.rbf(X.shape[1])
|
||||
print X.shape
|
||||
kernel = GPy.kern.rbf(X.shape[1])
|
||||
|
||||
##A GP should completely break down due to the points as they get a lot of weight
|
||||
## create simple GP model
|
||||
#A GP should completely break down due to the points as they get a lot of weight
|
||||
# create simple GP model
|
||||
#m = GPy.models.GP_regression(X, Y, kernel=kernel)
|
||||
|
||||
## optimize
|
||||
|
|
@ -46,27 +46,27 @@ def student_t_approx():
|
|||
#print m
|
||||
|
||||
#with a student t distribution, since it has heavy tails it should work well
|
||||
#likelihood_function = student_t(deg_free, sigma=1)
|
||||
#lap = Laplace(Y, likelihood_function)
|
||||
#cov = kernel.K(X)
|
||||
#lap.fit_full(cov)
|
||||
likelihood_function = student_t(deg_free, sigma=1)
|
||||
lap = Laplace(Y, likelihood_function)
|
||||
cov = kernel.K(X)
|
||||
lap.fit_full(cov)
|
||||
|
||||
#test_range = np.arange(0, 10, 0.1)
|
||||
#plt.plot(test_range, t_rv.pdf(test_range))
|
||||
#for i in xrange(X.shape[0]):
|
||||
#mode = lap.f_hat[i]
|
||||
#covariance = lap.hess_hat_i[i,i]
|
||||
#scaling = np.exp(lap.ln_z_hat)
|
||||
#normalised_approx = norm(loc=mode, scale=covariance)
|
||||
#print "Normal with mode %f, and variance %f" % (mode, covariance)
|
||||
#plt.plot(test_range, scaling*normalised_approx.pdf(test_range))
|
||||
#plt.show()
|
||||
#import ipdb; ipdb.set_trace() ### XXX BREAKPOINT
|
||||
test_range = np.arange(0, 10, 0.1)
|
||||
plt.plot(test_range, t_rv.pdf(test_range))
|
||||
for i in xrange(X.shape[0]):
|
||||
mode = lap.f_hat[i]
|
||||
covariance = lap.hess_hat_i[i,i]
|
||||
scaling = np.exp(lap.ln_z_hat)
|
||||
normalised_approx = norm(loc=mode, scale=covariance)
|
||||
print "Normal with mode %f, and variance %f" % (mode, covariance)
|
||||
plt.plot(test_range, scaling*normalised_approx.pdf(test_range))
|
||||
plt.show()
|
||||
import ipdb; ipdb.set_trace() ### XXX BREAKPOINT
|
||||
|
||||
# Likelihood object
|
||||
t_distribution = student_t(deg_free, sigma=1)
|
||||
stu_t_likelihood = Laplace(Y, t_distribution)
|
||||
kernel = GPy.kern.rbf(X.shape[1])
|
||||
kernel = GPy.kern.rbf(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
|
||||
m = GPy.models.GP(X, stu_t_likelihood, kernel)
|
||||
m.ensure_default_constraints()
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
import numpy as np
|
||||
import scipy as sp
|
||||
import GPy
|
||||
#from GPy.util.linalg import jitchol
|
||||
from scipy.linalg import cholesky, eig, inv
|
||||
from functools import partial
|
||||
from GPy.likelihoods.likelihood import likelihood
|
||||
from GPy.util.linalg import pdinv,mdot
|
||||
import numpy.testing.assert_array_equal
|
||||
#import numpy.testing.assert_array_equal
|
||||
|
||||
class Laplace(likelihood):
|
||||
"""Laplace approximation to a posterior"""
|
||||
|
|
@ -56,8 +56,8 @@ class Laplace(likelihood):
|
|||
pass # TODO: Laplace likelihood might want to take some parameters...
|
||||
|
||||
def _gradients(self,partial):
|
||||
return np.zeros(0) # TODO: Laplace likelihood might want to take some parameters...
|
||||
raise NotImplementedError
|
||||
#return np.zeros(0) # TODO: Laplace likelihood might want to take some parameters...
|
||||
|
||||
def _compute_GP_variables(self):
|
||||
"""
|
||||
|
|
@ -83,16 +83,23 @@ class Laplace(likelihood):
|
|||
and $$\ln \tilde{z} = \ln z + \frac{N}{2}\ln 2\pi + \frac{1}{2}\tilde{Y}\tilde{\Sigma}^{-1}\tilde{Y}$$
|
||||
|
||||
"""
|
||||
self.Sigma_tilde_i = self.hess_hat + self.Ki
|
||||
self.Sigma_tilde_i = self.hess_hat_i #self.W #self.hess_hat_i - self.Ki
|
||||
#Do we really need to inverse Sigma_tilde_i? :(
|
||||
(self.Sigma_tilde, _, _, self.log_Sig_i_det) = pdinv(self.Sigma_tilde_i)
|
||||
Y_tilde = mdot(self.Sigma_tilde, self.hess_hat, self.f_hat) #f_hat? should be f but we must have optimized for them I guess?
|
||||
self.Z_tilde = np.exp(self.ln_z_hat - self.NORMAL_CONST + (0.5 * mdot(Y_tilde.T, (self.Sigma_tilde_i, Y_tilde))))
|
||||
if self.likelihood_function.log_concave:
|
||||
(self.Sigma_tilde, _, _, _) = pdinv(self.Sigma_tilde_i)
|
||||
else:
|
||||
self.Sigma_tilde = inv(self.Sigma_tilde_i)
|
||||
#f_hat? should be f but we must have optimized for them I guess?
|
||||
Y_tilde = mdot(self.Sigma_tilde, self.hess_hat, self.f_hat)
|
||||
self.Z_tilde = np.exp(self.ln_z_hat - self.NORMAL_CONST
|
||||
- 0.5*mdot(self.f_hat, self.hess_hat, self.f_hat)
|
||||
+ 0.5*mdot(Y_tilde.T, (self.Sigma_tilde_i, Y_tilde))
|
||||
)
|
||||
|
||||
self.Z = self.Z_tilde
|
||||
self.Y = Y_tilde
|
||||
self.covariance_matrix = self.Sigma_tilde
|
||||
self.precision = 1/np.diag(self.Sigma_tilde)[:, None]
|
||||
self.precision = 1 / np.diag(self.Sigma_tilde)[:, None]
|
||||
self.YYT = np.dot(self.Y, self.Y.T)
|
||||
import ipdb; ipdb.set_trace() ### XXX BREAKPOINT
|
||||
|
||||
|
|
@ -112,34 +119,41 @@ class Laplace(likelihood):
|
|||
#FIXME: Can we get rid of this horrible reshaping?
|
||||
def obj(f):
|
||||
#f = f[:, None]
|
||||
res = -1 * (self.likelihood_function.link_function(self.data[:,0], f) - 0.5 * mdot(f.T, (self.Ki, f)) + OBJ_CONST)
|
||||
res = -1 * (self.likelihood_function.link_function(self.data[:, 0], f) - 0.5 * mdot(f.T, (self.Ki, f)) + OBJ_CONST)
|
||||
return float(res)
|
||||
|
||||
def obj_grad(f):
|
||||
#f = f[:, None]
|
||||
res = -1 * (self.likelihood_function.link_grad(self.data[:,0], f) - mdot(self.Ki, f))
|
||||
res = -1 * (self.likelihood_function.link_grad(self.data[:, 0], f) - mdot(self.Ki, f))
|
||||
return np.squeeze(res)
|
||||
|
||||
def obj_hess(f):
|
||||
res = -1 * (-np.diag(self.likelihood_function.link_hess(self.data[:,0], f)) - self.Ki)
|
||||
res = -1 * (-np.diag(self.likelihood_function.link_hess(self.data[:, 0], f)) - self.Ki)
|
||||
return np.squeeze(res)
|
||||
|
||||
self.f_hat = sp.optimize.fmin_ncg(obj, f, fprime=obj_grad, fhess=obj_hess)
|
||||
|
||||
#At this point get the hessian matrix
|
||||
self.hess_hat = np.diag(self.likelihood_function.link_hess(self.data[:,0], self.f_hat)) + self.Ki
|
||||
self.W = -np.diag(self.likelihood_function.link_hess(self.data[:, 0], self.f_hat))
|
||||
self.hess_hat = self.Ki + self.W
|
||||
(self.hess_hat_i, _, _, self.log_hess_hat_det) = pdinv(self.hess_hat)
|
||||
(self.hess_hat, _, _, self.log_hess_hat_i_det) = pdinv(self.hess_hat_i)
|
||||
|
||||
np.testing.assert_array_equal(self.hess_hat, hess_hat_new)
|
||||
#Check hess_hat is positive definite
|
||||
try:
|
||||
cholesky(self.hess_hat)
|
||||
except:
|
||||
raise ValueError("Must be positive definite")
|
||||
|
||||
#Check its eigenvalues are positive
|
||||
eigenvalues = eig(self.hess_hat)
|
||||
if not np.all(eigenvalues > 0):
|
||||
raise ValueError("Eigen values not positive")
|
||||
|
||||
#Need to add the constant as we previously were trying to avoid computing it (seems like a small overhead though...)
|
||||
#self.height_unnormalised = -1*obj(self.f_hat) #FIXME: Is it - obj constant and *-1?
|
||||
#z_hat is how much we need to scale the normal distribution by to get the area of our approximation close to
|
||||
#the area of p(f)p(y|f) we do this by matching the height of the distributions at the mode
|
||||
#z_hat = -0.5*ln|H| - 0.5*ln|K| - 0.5*f_hat*K^{-1}*f_hat \sum_{n} ln p(y_n|f_n)
|
||||
#Unsure whether its log_hess or log_hess_i
|
||||
self.ln_z_hat = -0.5*np.log(self.log_hess_hat_det) - 0.5*self.log_Kdet + self.likelihood_function.link_function(self.data[:,0], self.f_hat) - mdot(f.T, (self.Ki, f))
|
||||
self.ln_z_hat = -0.5*np.log(self.log_hess_hat_det) - 0.5*self.log_Kdet + -1*self.likelihood_function.link_function(self.data[:,0], self.f_hat) - mdot(self.f_hat.T, (self.Ki, self.f_hat))
|
||||
import ipdb; ipdb.set_trace() ### XXX BREAKPOINT
|
||||
|
||||
return self._compute_GP_variables()
|
||||
|
|
|
|||
|
|
@ -19,6 +19,9 @@ class student_t(likelihood_function):
|
|||
self.v = deg_free
|
||||
self.sigma = sigma
|
||||
|
||||
#FIXME: This should be in the superclass
|
||||
self.log_concave = False
|
||||
|
||||
def link_function(self, y, f):
|
||||
"""link_function $\ln p(y|f)$
|
||||
$$\ln p(y_{i}|f_{i}) = \ln \Gamma(\frac{v+1}{2}) - \ln \Gamma(\frac{v}{2})\sqrt{v \pi}\sigma - \frac{v+1}{2}\ln (1 + \frac{1}{v}\left(\frac{y_{i} - f_{i}}{\sigma}\right)^2$$
|
||||
|
|
@ -70,7 +73,7 @@ class student_t(likelihood_function):
|
|||
assert y.shape == f.shape
|
||||
e = y - f
|
||||
#hess = ((self.v + 1) * e) / ((((self.sigma**2) * self.v) + e**2)**2)
|
||||
hess = ((self.v + 1) * (e**2 - self.v*(self.sigma**2))) / ((((self.sigma**2) * self.v) + e**2)**2)
|
||||
hess = ((self.v + 1)*(e**2 - self.v*(self.sigma**2))) / ((((self.sigma**2)*self.v) + e**2)**2)
|
||||
return hess
|
||||
|
||||
def predictive_values(self, mu, var):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue