Lots of name changing and went through all likelihood gradients again

This commit is contained in:
Alan Saul 2013-05-29 17:33:06 +01:00
parent 117c377d13
commit 23ed2a2d15
4 changed files with 103 additions and 57 deletions

View file

@ -69,22 +69,21 @@ def debug_student_t_noise_approx():
print "Clean Gaussian"
#A GP should completely break down due to the points as they get a lot of weight
# create simple GP model
m = GPy.models.GP_regression(X, Y, kernel=kernel1)
# optimize
m.ensure_default_constraints()
m.optimize()
# plot
if plot:
plt.figure(1)
plt.suptitle('Gaussian likelihood')
plt.subplot(131)
m.plot()
plt.plot(X_full, Y_full)
print m
#m = GPy.models.GP_regression(X, Y, kernel=kernel1)
## optimize
#m.ensure_default_constraints()
#m.optimize()
## plot
#if plot:
#plt.figure(1)
#plt.suptitle('Gaussian likelihood')
#plt.subplot(131)
#m.plot()
#plt.plot(X_full, Y_full)
#print m
edited_real_sd = initial_var_guess #real_sd
import ipdb; ipdb.set_trace() ### XXX BREAKPOINT
print "Clean student t, rasm"
t_distribution = GPy.likelihoods.likelihood_functions.student_t(deg_free, sigma=edited_real_sd)
stu_t_likelihood = GPy.likelihoods.Laplace(Y.copy(), t_distribution, rasm=True)
@ -95,10 +94,10 @@ def debug_student_t_noise_approx():
m.constrain_positive('t_noi')
#m.constrain_fixed('t_noise_variance', real_sd)
m.update_likelihood_approximation()
m.optimize('scg', messages=True)
print(m)
return m
#m.optimize('lbfgsb', messages=True, callback=m._update_params_callback)
m.optimize('scg', messages=True)
if plot:
plt.suptitle('Student-t likelihood')
plt.subplot(132)

View file

@ -79,17 +79,40 @@ class Laplace(likelihood):
return (self._Kgradients(dL_d_K_Sigma, dK_dthetaK), self._gradients(dL_d_K_Sigma))
def _shared_gradients_components(self):
Ki, _, _, _ = pdinv(self.K)
Ki_W_i = inv(Ki + self.W) #Do it non numerically stable for now
d3lik_d3fhat = self.likelihood_function.d3lik_d3f(self.data, self.f_hat)
dL_dfhat = -0.5*np.dot(np.diag(Ki_W_i), d3lik_d3fhat)
KW = np.dot(self.K, self.W)
I_KW_i = inv(np.eye(KW.shape[0]) + KW)
return dL_dfhat, Ki, I_KW_i
def _Kgradients(self, dL_d_K_Sigma, dK_dthetaK):
"""
Gradients with respect to prior kernel parameters
"""
dL_dfhat, Ki, I_KW_i = self._shared_gradients_components()
K_Wi_i = inv(self.K + inv(self.W))
dlp = self.likelihood_function.dlik_df(self.data, self.f_hat)
dL_dthetaK = np.zeros(dK_dthetaK.shape)
for thetaK_i, dK_dthetaK_i in enumerate(dK_dthetaK):
#Explicit
dL_dthetaK[thetaK_i] = 0.5*mdot(self.f_hat.T, Ki, dK_dthetaK_i, Ki, self.f_hat) - 0.5*np.trace(np.dot(K_Wi_i, dK_dthetaK_i))
#Implicit
df_hat_dthetaK = mdot(I_KW_i, dK_dthetaK, dlp)
dL_dthetaK[thetaK_i] += np.dot(dL_dfhat.T, df_hat_dthetaK)
return dL_dthetaK
def _gradients(self, partial):
"""
Gradients with respect to likelihood parameters
"""
dL_dfhat, Ki, I_KW_i = self._shared_gradients_components()
dlik_dthetaL, dlik_grad_dthetaL, dlik_hess_dthetaL = self.likelihood_function._gradients(self.data, self.f_hat)
dL_dthetaL = np.zeros(dlik_dthetaL.shape)
return dL_dthetaL #should be array of length *params-being optimized*, for student t just optimising 1 parameter, this is (1,)
def _compute_GP_variables(self):
@ -197,7 +220,7 @@ class Laplace(likelihood):
#At this point get the hessian matrix
#print "Data: ", self.data
#print "fhat: ", self.f_hat
self.W = -np.diag(self.likelihood_function.link_hess(self.data, self.f_hat, extra_data=self.extra_data))
self.W = -np.diag(self.likelihood_function.d2lik_d2f(self.data, self.f_hat, extra_data=self.extra_data))
if not self.likelihood_function.log_concave:
self.W[self.W < 0] = 1e-6 # FIXME-HACK: This is a hack since GPy can't handle negative variances which can occur
@ -212,7 +235,7 @@ class Laplace(likelihood):
Ki_W_i = self.K - mdot(self.K, self.W_12, self.Bi, self.W_12, self.K)
self.ln_Ki_W_i_det = np.linalg.det(Ki_W_i)
b = np.dot(self.W, self.f_hat) + self.likelihood_function.link_grad(self.data, self.f_hat, extra_data=self.extra_data)[:, None]
b = np.dot(self.W, self.f_hat) + self.likelihood_function.dlik_df(self.data, self.f_hat, extra_data=self.extra_data)[:, None]
solve_chol = cho_solve((self.B_chol, True), mdot(self.W_12, (self.K, b)))
a = b - mdot(self.W_12, solve_chol)
self.Ki_f = a
@ -259,11 +282,11 @@ class Laplace(likelihood):
return float(res)
def obj_grad(f):
res = -1 * (self.likelihood_function.link_grad(self.data[:, 0], f, extra_data=self.extra_data) - np.dot(self.Ki, f))
res = -1 * (self.likelihood_function.dlik_df(self.data[:, 0], f, extra_data=self.extra_data) - np.dot(self.Ki, f))
return np.squeeze(res)
def obj_hess(f):
res = -1 * (--np.diag(self.likelihood_function.link_hess(self.data[:, 0], f, extra_data=self.extra_data)) - self.Ki)
res = -1 * (--np.diag(self.likelihood_function.d2lik_d2f(self.data[:, 0], f, extra_data=self.extra_data)) - self.Ki)
return np.squeeze(res)
f_hat = sp.optimize.fmin_ncg(obj, f, fprime=obj_grad, fhess=obj_hess)
@ -294,7 +317,7 @@ class Laplace(likelihood):
i = 0
while difference > epsilon and i < MAX_ITER and rs < MAX_RESTART:
#f_old = f.copy()
W = -np.diag(self.likelihood_function.link_hess(self.data, f, extra_data=self.extra_data))
W = -np.diag(self.likelihood_function.d2lik_d2f(self.data, f, extra_data=self.extra_data))
if not self.likelihood_function.log_concave:
W[W < 0] = 1e-6 # FIXME-HACK: This is a hack since GPy can't handle negative variances which can occur
# If the likelihood is non-log-concave. We wan't to say that there is a negative variance
@ -303,7 +326,7 @@ class Laplace(likelihood):
B, L, W_12 = self._compute_B_statistics(K, W)
W_f = np.dot(W, f)
grad = self.likelihood_function.link_grad(self.data, f, extra_data=self.extra_data)[:, None]
grad = self.likelihood_function.dlik_df(self.data, f, extra_data=self.extra_data)[:, None]
#Find K_i_f
b = W_f + grad

View file

@ -159,10 +159,10 @@ class student_t(likelihood_function):
d2ln p(yi|fi)_d2fifj
"""
def __init__(self, deg_free, sigma=2):
#super(student_t, self).__init__()
self.v = deg_free
self.sigma = sigma
self.log_concave = False
#super(student_t, self).__init__()
self._set_params(np.asarray(sigma))
@ -174,8 +174,6 @@ class student_t(likelihood_function):
def _set_params(self, x):
self.sigma = float(x)
#self.covariance_matrix = np.eye(self.N)*self._variance
#self.precision = 1./self._variance
@property
def variance(self, extra_data=None):
@ -185,6 +183,8 @@ class student_t(likelihood_function):
"""link_function $\ln p(y|f)$
$$\ln p(y_{i}|f_{i}) = \ln \Gamma(\frac{v+1}{2}) - \ln \Gamma(\frac{v}{2})\sqrt{v \pi}\sigma - \frac{v+1}{2}\ln (1 + \frac{1}{v}\left(\frac{y_{i} - f_{i}}{\sigma}\right)^2$$
For wolfram alpha import parts for derivative of sigma are -log(sqrt(v*pi)*s) -(1/2)*(v + 1)*log(1 + (1/v)*((y-f)/(s))^2))
:y: data
:f: latent variables f
:extra_data: extra_data which is not used in student t distribution
@ -198,17 +198,16 @@ class student_t(likelihood_function):
e = y - f
objective = (gammaln((self.v + 1) * 0.5)
- gammaln(self.v * 0.5)
+ np.log(self.sigma * np.sqrt(self.v * np.pi))
- (self.v + 1) * 0.5
* np.log(1 + ((e**2 / self.sigma**2) / self.v))
)
- np.log(self.sigma * np.sqrt(self.v * np.pi))
- (self.v + 1) * 0.5 * np.log(1 + ((e**2 / self.sigma**2) / self.v))
)
return np.sum(objective)
def link_grad(self, y, f, extra_data=None):
def dlik_df(self, y, f, extra_data=None):
"""
Gradient of the link function at y, given f w.r.t f
$$\frac{d}{df}p(y_{i}|f_{i}) = \frac{(v + 1)(y - f)}{v \sigma^{2} + (y_{i} - f_{i})^{2}}$$
$$\frac{dp(y_{i}|f_{i})}{df} = \frac{-(v+1)(f_{i}-y_{i})}{(f_{i}-y_{i})^{2} + \sigma^{2}v}$$
:y: data
:f: latent variables f
@ -220,17 +219,17 @@ class student_t(likelihood_function):
f = np.squeeze(f)
assert y.shape == f.shape
e = y - f
grad = ((self.v + 1) * e) / (self.v * (self.sigma**2) + (e**2))
grad = -((self.v + 1) * e) / (self.v * (self.sigma**2) + (e**2))
return np.squeeze(grad)
def link_hess(self, y, f, extra_data=None):
def d2lik_d2f(self, y, f, extra_data=None):
"""
Hessian at this point (if we are only looking at the link function not the prior) the hessian will be 0 unless i == j
i.e. second derivative link_function at y given f f_j w.r.t f and f_j
Will return diagonal of hessian, since every where else it is 0
$$\frac{d^{2}p(y_{i}|f_{i})}{df^{2}} = \frac{(v + 1)(y - f)}{v \sigma^{2} + (y_{i} - f_{i})^{2}}$$
$$\frac{d^{2}p(y_{i}|f_{i})}{d^{3}f} = \frac{(v+1)((f_{i}-y_{i})^{2} - \sigma^{2}v)}{((f_{i}-y_{i})^{2} + \sigma^{2}v)^{2}}$$
:y: data
:f: latent variables f
@ -245,54 +244,79 @@ class student_t(likelihood_function):
hess = ((self.v + 1)*(e**2 - self.v*(self.sigma**2))) / ((((self.sigma**2)*self.v) + e**2)**2)
return np.squeeze(hess)
def d3link(self, y, f, extra_data=None):
def d3lik_d3f(self, y, f, extra_data=None):
"""
Third order derivative link_function (log-likelihood ) at y given f f_j w.r.t f and f_j
$$\frac{2(v+1)((y-f)^{3} - 3\sigma^{2}v(y-f))}{((y-f)^{2} + \sigma^{2}v)^{3}}$$
$$\frac{d^{3}p(y_{i}|f_{i})}{d^{3}f} = \frac{-2(v+1)((f_{i} - y_{i})^3 - 3(f_{i} - y_{i}) \sigma^{2} v))}{((f_{i} - y_{i}) + \sigma^{2} v)^3}$$
"""
y = np.squeeze(y)
f = np.squeeze(f)
assert y.shape == f.shape
e = y - f
d3link_d3f = ( (2*(self.v + 1)*(-1*e)*(e**2 - 3*(self.sigma**2)*self.v))
/ ((e**2 + (self.sigma**2)*self.v)**3)
)
return np.squeeze(d3link_d3f)
d3lik_d3f = ( -(2*(self.v + 1)*(e**3 - e*3*self.v*(self.sigma**2))) /
((e**2 + (self.sigma**2)*self.v)**3)
)
return np.squeeze(d3lik_d3f)
def link_hess_grad_std(self, y, f, extra_data=None):
def link_dstd(self, y, f, extra_data=None):
"""
Gradient of the hessian w.r.t sigma parameter (standard deviation)
Gradient of the likelihood (lik) w.r.t sigma parameter (standard deviation)
$$\frac{2\sigma v(v+1)(\sigma^{2}v - 3(f-y)^2)}{((f-y)^{2} + \sigma^{2}v)^{3}}
Terms relavent to derivatives wrt sigma are:
-log(sqrt(v*pi)*s) -(1/2)*(v + 1)*log(1 + (1/v)*((y-f)/(s))^2))
$$\frac{dp(y_{i}|f_{i})}{d\sigma} = -\frac{1}{\sigma} + \frac{(1+v)(y_{i}-f_{i})^2}{\sigma^3 v(1 + \frac{1}{v}(\frac{(y_{i} - f_{i})}{\sigma^2})^2)}$$
"""
y = np.squeeze(y)
f = np.squeeze(f)
assert y.shape == f.shape
e = y - f
hess_grad_sigma = ( (2*self.sigma*self.v*(self.v + 1)*((self.sigma**2)*self.v - 3*(e**2)))
/ ((e**2 + (self.sigma**2)*self.v)**3)
)
return np.squeeze(hess_grad_sigma)
dlik_dsigma = ( (1/self.sigma) -
((1+self.v)*(e**2))/((self.sigma**3)*self.v*(1 + (e**2) / ((self.sigma**2)*self.v) ) )
)
return np.squeeze(dlik_dsigma)
def link_grad_std(self, y, f, extra_data=None):
def dlik_df_dstd(self, y, f, extra_data=None):
"""
Gradient of the likelihood w.r.t sigma parameter (standard deviation)
Gradient of the dlik_df w.r.t sigma parameter (standard deviation)
$$\frac{-2\sigma(v+1)(y-f)}{(v\sigma^{2} + (y-f)^{2})^{2}}$$
$$\frac{d}{d\sigma}(\frac{dp(y_{i}|f_{i})}{df}) = \frac{2\sigma v(v + 1)(f-y)}{(f-y)^2 + \sigma^2 v)^2}$$
"""
y = np.squeeze(y)
f = np.squeeze(f)
assert y.shape == f.shape
e = y - f
grad_sigma = ( (-2*self.sigma*self.v*(self.v + 1)*e)
/ ((self.v*(self.sigma**2) + e**2)**2)
)
return np.squeeze(grad_sigma)
dlik_grad_dsigma = ((2*self.sigma*self.v*(self.v + 1)*e)
/ ((self.v*(self.sigma**2) + e**2)**2)
)
return np.squeeze(dlik_grad_dsigma)
def d2lik_d2f_dstd(self, y, f, extra_data=None):
"""
Gradient of the hessian (d2lik_d2f) w.r.t sigma parameter (standard deviation)
$$\frac{d}{d\sigma}(\frac{d^{2}p(y_{i}|f_{i})}{d^{2}f}) = \frac{(v + 1)((f-y)^2 - \sigma^2 v)}{((f-y)^2 + \sigma^2 v)}$$
"""
y = np.squeeze(y)
f = np.squeeze(f)
assert y.shape == f.shape
e = y - f
dlik_hess_dsigma = ( ((v + 1)*(e**2 - (self.sigma**2)*self.v)) /
((e**2 + (self.sigma**2)*self.v)**2)
)
return np.squeeze(dlik_hess_dsigma)
def _gradients(self, y, f, extra_data=None):
return [self.link_grad_std(y, f, extra_data=extra_data),
self.link_hess_grad_std(y, f, extra_data=extra_data)] # list as we might learn many parameters
derivs = ([self.link_dstd(y, f, extra_data=extra_data)],
[self.dlik_df_dstd(y, f, extra_data=extra_data)],
[self.d2lik_d2f_dstd(y, f, extra_data=extra_data)]
) # lists as we might learn many parameters
# ensure we have gradients for every parameter we want to optimize
assert len(derivs[0]) == len(self._get_param_names())
assert len(derivs[1]) == len(self._get_param_names())
assert len(derivs[2]) == len(self._get_param_names())
return derivs
def predictive_values(self, mu, var):
"""
@ -412,7 +436,7 @@ class weibull_survival(likelihood_function):
objective = v*(np.log(self.shape) + (self.shape - 1)*np.log(y) + f) - (y**self.shape)*np.exp(f) # FIXME: CHECK THIS WITH BOOK, wheres scale?
return np.sum(objective)
def link_grad(self, y, f, extra_data=None):
def dlik_df(self, y, f, extra_data=None):
"""
Gradient of the link function at y, given f w.r.t f
@ -432,7 +456,7 @@ class weibull_survival(likelihood_function):
grad = v - (y**self.shape)*np.exp(f)
return np.squeeze(grad)
def link_hess(self, y, f, extra_data=None):
def d2lik_d2f(self, y, f, extra_data=None):
"""
Hessian at this point (if we are only looking at the link function not the prior) the hessian will be 0 unless i == j
i.e. second derivative link_function at y given f f_j w.r.t f and f_j

View file

@ -147,7 +147,7 @@ class GP(model):
if isinstance(self.likelihood, Laplace):
dL_dthetaK_explicit = dL_dthetaK
#Need to pass in a matrix of ones to get access to raw dK_dthetaK values without being chained
fake_dL_dKs = np.eye(self.dL_dK.shape[0]) #FIXME: Check this is right...
fake_dL_dKs = np.ones(self.dL_dK.shape) #FIXME: Check this is right...
dK_dthetaK = self.kern.dK_dtheta(dL_dK=fake_dL_dKs, X=self.X)
dL_dthetaK = self.likelihood._Kgradients(dL_d_K_Sigma=self.dL_dK, dK_dthetaK=dK_dthetaK)