Have most of the likelihood testing working, laplace likelihood parameters need fixing, some of the signs are wrong I believe

This commit is contained in:
Alan Saul 2014-02-10 12:28:24 +00:00
parent 625943ef27
commit 0f263d2ff2
5 changed files with 122 additions and 71 deletions

View file

@ -256,6 +256,16 @@ class Parameterized(Constrainable, Pickleable, Observable):
cPickle.dump(self, f, protocol)
def copy(self):
"""Returns a (deep) copy of the current model """
#dc = dict()
#for k, v in self.__dict__.iteritems():
#if k not in ['_highest_parent_', '_direct_parent_']:
#dc[k] = copy.deepcopy(v)
#dc = copy.deepcopy(self.__dict__)
#dc['_highest_parent_'] = None
#dc['_direct_parent_'] = None
#s = self.__class__.new()
#s.__dict__ = dc
return copy.deepcopy(self)
def __getstate__(self):
if self._has_get_set_state():
@ -419,6 +429,8 @@ class Parameterized(Constrainable, Pickleable, Observable):
#===========================================================================
# Convenience for fixed, tied checking of param:
#===========================================================================
def fixed_indices(self):
return np.array([x.is_fixed for x in self._parameters_])
def _is_fixed(self, param):
# returns if the whole param is fixed
if not self._has_fixes():
@ -449,7 +461,6 @@ class Parameterized(Constrainable, Pickleable, Observable):
# if removing constraints before adding new is not wanted, just delete the above line!
self.constraints.add(transform, rav_i)
param = self._get_original(param)
#FIXME: Max, is this the right thing to do to handle fixed?
if not (transform == __fixed__):
param._set_params(transform.initialize(param._get_params()), update=False)
if warning and any(reconstrained):

View file

@ -32,6 +32,7 @@ class LaplaceInference(object):
self._mode_finding_tolerance = 1e-7
self._mode_finding_max_iter = 40
self.bad_fhat = True
self._previous_Ki_fhat = None
def inference(self, kern, X, likelihood, Y, Y_metadata=None):
"""
@ -53,14 +54,13 @@ class LaplaceInference(object):
f_hat, Ki_fhat = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata)
#Compute hessian and other variables at mode
log_marginal, Ki_W_i, K_Wi_i, dL_dK, woodbury_vector = self.mode_computations(f_hat, Ki_fhat, K, Y, likelihood, Y_metadata)
log_marginal, woodbury_vector, woodbury_inv, dL_dK, dL_dthetaL = self.mode_computations(f_hat, Ki_fhat, K, Y, likelihood, kern, Y_metadata)
#likelihood.gradient = self.likelihood_gradients()
kern.update_gradients_full(dL_dK, X)
likelihood.update_gradients(np.ones(10))
likelihood.update_gradients(dL_dthetaL)
self._previous_Ki_fhat = Ki_fhat.copy()
return Posterior(woodbury_vector=woodbury_vector, woodbury_inv = K_Wi_i, K=K), log_marginal, {'dL_dK':dL_dK}
return Posterior(woodbury_vector=woodbury_vector, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK}
def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None):
"""
@ -134,13 +134,15 @@ class LaplaceInference(object):
return f, Ki_f
def mode_computations(self, f_hat, Ki_f, K, Y, likelihood, Y_metadata):
def mode_computations(self, f_hat, Ki_f, K, Y, likelihood, kern, Y_metadata):
"""
At the mode, compute the hessian and effective covariance matrix.
returns: logZ : approximation to the marginal likelihood
Cov : the approximation to the covariance matrix
woodbury_vector : variable required for calculating the approximation to the covariance matrix
woodbury_inv : variable required for calculating the approximation to the covariance matrix
dL_dthetaL : array of derivatives (1 x num_kernel_params)
dL_dthetaL : array of derivatives (1 x num_likelihood_params)
"""
#At this point get the hessian matrix (or vector as W is diagonal)
W = -likelihood.d2logpdf_df2(f_hat, Y, extra_data=Y_metadata)
@ -154,48 +156,75 @@ class LaplaceInference(object):
#compute the log marginal
log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + likelihood.logpdf(f_hat, Y, extra_data=Y_metadata) - np.sum(np.log(np.diag(L)))
#compute dL_dK
explicit_part = 0.5*(np.dot(Ki_f, Ki_f.T) - K_Wi_i)
#Implicit
#Compute vival matrices for derivatives
dW_df = likelihood.d3logpdf_df3(f_hat, Y, extra_data=Y_metadata) # d3lik_d3fhat
woodbury_vector = likelihood.dlogpdf_df(f_hat, Y, extra_data=Y_metadata)
dL_dfhat = 0.5*(np.diag(Ki_W_i)[:, None]*dW_df) #why isn't this -0.5? s2 in R&W p126 line 9.
#implicit_part = np.dot(woodbury_vector, dL_dfhat.T).dot(np.eye(Y.shape[0]) - np.dot(K, K_Wi_i))
BiK, _ = dpotrs(L, K, lower=1)
#BiK, _ = dpotrs(L, K, lower=1)
#dL_dfhat = 0.5*np.diag(BiK)[:, None]*dW_df
implicit_part = np.dot(woodbury_vector, dL_dfhat.T).dot(np.eye(Y.shape[0]) - np.dot(K, K_Wi_i))
I_KW_i = np.eye(Y.shape[0]) - np.dot(K, K_Wi_i)
dL_dK = explicit_part + implicit_part
return log_marginal, Ki_W_i, K_Wi_i, dL_dK, woodbury_vector
def likelihood_gradients(self):
"""
Gradients with respect to likelihood parameters (dL_dthetaL)
:rtype: array of derivatives (1 x num_likelihood_params)
"""
dL_dfhat, I_KW_i = self._shared_gradients_components()
dlik_dthetaL, dlik_grad_dthetaL, dlik_hess_dthetaL = likelihood._laplace_gradients(self.f_hat, self.data, extra_data=self.extra_data)
num_params = len(self._get_param_names())
# make space for one derivative for each likelihood parameter
dL_dthetaL = np.zeros(num_params)
for thetaL_i in range(num_params):
####################
#compute dL_dK#
####################
if kern.size > 0 and not kern.is_fixed:
#Explicit
dL_dthetaL_exp = ( np.sum(dlik_dthetaL[:, thetaL_i])
#- 0.5*np.trace(mdot(self.Ki_W_i, (self.K, np.diagflat(dlik_hess_dthetaL[thetaL_i]))))
+ np.dot(0.5*np.diag(self.Ki_W_i)[:,None].T, dlik_hess_dthetaL[:, thetaL_i])
)
explicit_part = 0.5*(np.dot(Ki_f, Ki_f.T) - K_Wi_i)
#Implicit
dfhat_dthetaL = mdot(I_KW_i, self.K, dlik_grad_dthetaL[:, thetaL_i])
dL_dthetaL_imp = np.dot(dL_dfhat, dfhat_dthetaL)
dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp
implicit_part = np.dot(woodbury_vector, dL_dfhat.T).dot(I_KW_i)
return dL_dthetaL
dL_dK = explicit_part + implicit_part
else:
dL_dK = np.zeros(likelihood.size)
####################
#compute dL_dthetaL#
####################
if likelihood.size > 0 and not likelihood.is_fixed:
dlik_dthetaL, dlik_grad_dthetaL, dlik_hess_dthetaL = likelihood._laplace_gradients(f_hat, Y, extra_data=Y_metadata)
num_params = likelihood.size
# make space for one derivative for each likelihood parameter
dL_dthetaL = np.zeros(num_params)
for thetaL_i in range(num_params):
#Explicit
dL_dthetaL_exp = ( + np.sum(dlik_dthetaL[thetaL_i])
+ 0.5*np.sum(np.diag(Ki_W_i).flatten()*dlik_hess_dthetaL[:, thetaL_i].flatten())
#- 0.5*np.trace(np.diag(Ki_W_i)[:,None]*dlik_hess_dthetaL[:, thetaL_i])
#+ 0.5*np.trace(np.dot(I_KW_i, K)*dlik_hess_dthetaL[:, thetaL_i])
)
#Implicit
dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[:, thetaL_i])
#dfhat_dthetaL = mdot(Wi_K_i, dlik_grad_dthetaL[:, thetaL_i])
dL_dthetaL_imp = np.dot(dL_dfhat.T, dfhat_dthetaL)
#import pylab as pb
#pb.figure(1)
#pb.matshow(Ki_W_i)
#pb.title('I_KW_i approx')
#pb.colorbar()
#pb.figure(2)
#pb.matshow(np.linalg.inv(np.dot(np.eye(Y.shape[0]) + np.sqrt(W).T*K*np.sqrt(W), K)))
#pb.title('I_KW_i')
#pb.colorbar()
#print likelihood
#pb.show()
#import ipdb; ipdb.set_trace() # XXX BREAKPOINT
dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp
else:
dL_dthetaL = np.zeros(likelihood.size)
return log_marginal, woodbury_vector, K_Wi_i, dL_dK, dL_dthetaL
#def likelihood_gradients(self, f_hat, K, Y, Ki_W_i, dL_dfhat, I_KW_i, likelihood, Y_metadata):
#"""
#Gradients with respect to likelihood parameters (dL_dthetaL)
#:rtype: array of derivatives (1 x num_likelihood_params)
#"""
def _compute_B_statistics(self, K, W, log_concave):
"""

View file

@ -312,7 +312,7 @@ class Likelihood(Parameterized):
return self.dlogpdf_link_dtheta(link_f, y, extra_data=extra_data)
else:
#Is no parameters so return an empty array for its derivatives
return np.empty([1, 0])
return np.zeros([1, 0])
def dlogpdf_df_dtheta(self, f, y, extra_data=None):
"""
@ -325,7 +325,7 @@ class Likelihood(Parameterized):
return chain_1(dlogpdf_dlink_dtheta, dlink_df)
else:
#Is no parameters so return an empty array for its derivatives
return np.empty([f.shape[0], 0])
return np.zeros([f.shape[0], 0])
def d2logpdf_df2_dtheta(self, f, y, extra_data=None):
"""
@ -340,7 +340,7 @@ class Likelihood(Parameterized):
return chain_2(d2logpdf_dlink2_dtheta, dlink_df, dlogpdf_dlink_dtheta, d2link_df2)
else:
#Is no parameters so return an empty array for its derivatives
return np.empty([f.shape[0], 0])
return np.zeros([f.shape[0], 0])
def _laplace_gradients(self, f, y, extra_data=None):
dlogpdf_dtheta = self.dlogpdf_dtheta(f, y, extra_data=extra_data)
@ -349,9 +349,12 @@ class Likelihood(Parameterized):
#Parameters are stacked vertically. Must be listed in same order as 'get_param_names'
# ensure we have gradients for every parameter we want to optimize
assert dlogpdf_dtheta.shape[1] == self.size
assert dlogpdf_df_dtheta.shape[1] == self.size
assert d2logpdf_df2_dtheta.shape[1] == self.size
try:
assert len(dlogpdf_dtheta) == self.size #1 x num_param array
assert dlogpdf_df_dtheta.shape[1] == self.size #f x num_param matrix
assert d2logpdf_df2_dtheta.shape[1] == self.size #f x num_param matrix
except Exception as e:
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
return dlogpdf_dtheta, dlogpdf_df_dtheta, d2logpdf_df2_dtheta

View file

@ -30,6 +30,7 @@ class StudentT(Likelihood):
self.v = Param('deg_free', float(deg_free))
self.add_parameter(self.sigma2)
self.add_parameter(self.v)
self.v.constrain_fixed()
self.log_concave = False
@ -226,15 +227,18 @@ class StudentT(Likelihood):
def dlogpdf_link_dtheta(self, f, y, extra_data=None):
dlogpdf_dvar = self.dlogpdf_link_dvar(f, y, extra_data=extra_data)
return np.asarray([[dlogpdf_dvar]])
dlogpdf_dv = np.zeros_like(dlogpdf_dvar) #FIXME: Not done yet
return np.hstack((dlogpdf_dvar, dlogpdf_dv))
def dlogpdf_dlink_dtheta(self, f, y, extra_data=None):
dlogpdf_dlink_dvar = self.dlogpdf_dlink_dvar(f, y, extra_data=extra_data)
return dlogpdf_dlink_dvar
dlogpdf_dlink_dv = np.zeros_like(dlogpdf_dlink_dvar) #FIXME: Not done yet
return np.hstack((dlogpdf_dlink_dvar, dlogpdf_dlink_dv))
def d2logpdf_dlink2_dtheta(self, f, y, extra_data=None):
d2logpdf_dlink2_dvar = self.d2logpdf_dlink2_dvar(f, y, extra_data=extra_data)
return d2logpdf_dlink2_dvar
d2logpdf_dlink2_dv = np.zeros_like(d2logpdf_dlink2_dvar) #FIXME: Not done yet
return np.hstack((d2logpdf_dlink2_dvar, d2logpdf_dlink2_dv))
def _predictive_variance_analytical(self, mu, sigma, predictive_mean=None):
"""

View file

@ -8,7 +8,7 @@ from GPy.likelihoods import link_functions
from ..core.parameterization import Param
from functools import partial
#np.random.seed(300)
np.random.seed(7)
#np.random.seed(7)
def dparam_partial(inst_func, *args):
"""
@ -41,25 +41,27 @@ def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None,
The number of parameters and N is the number of data
Need to take a slice out from f and a slice out of df
"""
#print "\n{} likelihood: {} vs {}".format(func.im_self.__class__.__name__,
#func.__name__, dfunc.__name__)
print "\n{} likelihood: {} vs {}".format(func.im_self.__class__.__name__,
func.__name__, dfunc.__name__)
partial_f = dparam_partial(func, *args)
partial_df = dparam_partial(dfunc, *args)
gradchecking = True
zipped_params = zip(params, params_names)
for param_val, param_name in zipped_params:
fnum = np.atleast_1d(partial_f(param_val, param_name)).shape[0]
dfnum = np.atleast_1d(partial_df(param_val, param_name)).shape[0]
for param_ind, (param_val, param_name) in enumerate(zipped_params):
#Check one parameter at a time, make sure it is 2d (as some gradients only return arrays) then strip out the parameter
fnum = np.atleast_2d(partial_f(param_val, param_name))[:, param_ind].shape[0]
dfnum = np.atleast_2d(partial_df(param_val, param_name))[:, param_ind].shape[0]
for fixed_val in range(dfnum):
#dlik and dlik_dvar gives back 1 value for each
f_ind = min(fnum, fixed_val+1) - 1
print "fnum: {} dfnum: {} f_ind: {} fixed_val: {}".format(fnum, dfnum, f_ind, fixed_val)
#Make grad checker with this param moving, note that set_params is NOT being called
#The parameter is being set directly with __setattr__
grad = GradientChecker(lambda p_val: np.atleast_1d(partial_f(p_val, param_name))[f_ind],
lambda p_val: np.atleast_1d(partial_df(p_val, param_name))[fixed_val],
#Check only the parameter and function value we wish to check at a time
grad = GradientChecker(lambda p_val: np.atleast_2d(partial_f(p_val, param_name))[f_ind, param_ind],
lambda p_val: np.atleast_2d(partial_df(p_val, param_name))[fixed_val, param_ind],
param_val, [param_name])
#This is not general for more than one param...
if constraints is not None:
for constrain_param, constraint in constraints:
if grad.grep_param_names(constrain_param):
@ -115,8 +117,8 @@ class TestNoiseModels(object):
####################################################
# Constraint wrappers so we can just list them off #
####################################################
def constrain_fixed(regex, model, value):
model[regex].constrain_fixed(value)
def constrain_fixed(regex, model):
model[regex].constrain_fixed()
def constrain_negative(regex, model):
model[regex].constrain_negative()
@ -149,7 +151,7 @@ class TestNoiseModels(object):
"grad_params": {
"names": ["t_noise"],
"vals": [self.var],
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_positive)]
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_fixed)]
#"constraints": [("t_noise", constrain_positive), ("deg_free", partial(constrain_fixed, value=5))]
},
"laplace": True
@ -159,7 +161,7 @@ class TestNoiseModels(object):
"grad_params": {
"names": ["t_noise"],
"vals": [1.0],
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_positive)]
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_fixed)]
},
"laplace": True
},
@ -168,7 +170,7 @@ class TestNoiseModels(object):
"grad_params": {
"names": ["t_noise"],
"vals": [0.01],
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_positive)]
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_fixed)]
},
"laplace": True
},
@ -177,7 +179,7 @@ class TestNoiseModels(object):
"grad_params": {
"names": ["t_noise"],
"vals": [10.0],
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_positive)]
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_fixed)]
},
"laplace": True
},
@ -186,7 +188,7 @@ class TestNoiseModels(object):
"grad_params": {
"names": ["t_noise"],
"vals": [self.var],
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_positive)]
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_fixed)]
},
"laplace": True
},
@ -195,7 +197,7 @@ class TestNoiseModels(object):
"grad_params": {
"names": ["t_noise"],
"vals": [self.var],
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_positive)]
"constraints": [("t_noise", constrain_positive), ("deg_free", constrain_fixed)]
},
"laplace": True
},
@ -542,8 +544,8 @@ class TestNoiseModels(object):
Y = Y/Y.max()
white_var = 1e-6
kernel = GPy.kern.rbf(X.shape[1]) + GPy.kern.white(X.shape[1])
ep_likelihood = GPy.likelihoods.EP(Y.copy(), model)
m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=ep_likelihood)
ep_inf = GPy.inference.latent_function_inference.EP()
m = GPy.core.GP(X.copy(), Y.copy(), kernel=kernel, likelihood=model, inference_method=ep_inf)
m.ensure_default_constraints()
m['white'].constrain_fixed(white_var)
@ -622,7 +624,9 @@ class LaplaceTests(unittest.TestCase):
#Yc = Y.copy()
#Yc[75:80] += 1
kernel1 = GPy.kern.rbf(X.shape[1]) + GPy.kern.white(X.shape[1])
kernel2 = kernel1.copy()
#FIXME: Make sure you can copy kernels when params is fixed
#kernel2 = kernel1.copy()
kernel2 = GPy.kern.rbf(X.shape[1]) + GPy.kern.white(X.shape[1])
gauss_distr1 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
exact_inf = GPy.inference.latent_function_inference.ExactGaussianInference()
@ -686,7 +690,7 @@ class LaplaceTests(unittest.TestCase):
#Check Y's are the same
np.testing.assert_almost_equal(Y, m2.likelihood.Y, decimal=5)
np.testing.assert_almost_equal(m1.Y, m2.Y, decimal=5)
#Check marginals are the same
np.testing.assert_almost_equal(m1.log_likelihood(), m2.log_likelihood(), decimal=2)
#Check marginals are the same with random