mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-04 01:02:39 +02:00
[inference] changed gaussian variance to precision (which it really is)
This commit is contained in:
parent
8132084de6
commit
69f6cfa6f7
5 changed files with 45 additions and 45 deletions
|
|
@ -28,8 +28,8 @@ class DTC(LatentFunctionInference):
|
|||
num_data, output_dim = Y.shape
|
||||
|
||||
#make sure the noise is not hetero
|
||||
gaussian_variance = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if gaussian_variance.size > 1:
|
||||
precision = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if precision.size > 1:
|
||||
raise NotImplementedError("no hetero noise with this implementation of DTC")
|
||||
|
||||
Kmm = kern.K(Z)
|
||||
|
|
@ -42,7 +42,7 @@ class DTC(LatentFunctionInference):
|
|||
Kmmi, L, Li, _ = pdinv(Kmm)
|
||||
|
||||
# Compute A
|
||||
LiUTbeta = np.dot(Li, U.T)*np.sqrt(gaussian_variance)
|
||||
LiUTbeta = np.dot(Li, U.T)*np.sqrt(precision)
|
||||
A = tdot(LiUTbeta) + np.eye(num_inducing)
|
||||
|
||||
# factor A
|
||||
|
|
@ -50,7 +50,7 @@ class DTC(LatentFunctionInference):
|
|||
|
||||
# back substutue to get b, P, v
|
||||
tmp, _ = dtrtrs(L, Uy, lower=1)
|
||||
b, _ = dtrtrs(LA, tmp*gaussian_variance, lower=1)
|
||||
b, _ = dtrtrs(LA, tmp*precision, lower=1)
|
||||
tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
|
||||
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
|
||||
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
|
||||
|
|
@ -59,8 +59,8 @@ class DTC(LatentFunctionInference):
|
|||
#compute log marginal
|
||||
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
|
||||
-np.sum(np.log(np.diag(LA)))*output_dim + \
|
||||
0.5*num_data*output_dim*np.log(gaussian_variance) + \
|
||||
-0.5*gaussian_variance*np.sum(np.square(Y)) + \
|
||||
0.5*num_data*output_dim*np.log(precision) + \
|
||||
-0.5*precision*np.sum(np.square(Y)) + \
|
||||
0.5*np.sum(np.square(b))
|
||||
|
||||
# Compute dL_dKmm
|
||||
|
|
@ -70,11 +70,11 @@ class DTC(LatentFunctionInference):
|
|||
# Compute dL_dU
|
||||
vY = np.dot(v.reshape(-1,1),Y.T)
|
||||
dL_dU = vY - np.dot(vvT_P, U.T)
|
||||
dL_dU *= gaussian_variance
|
||||
dL_dU *= precision
|
||||
|
||||
#compute dL_dR
|
||||
Uv = np.dot(U, v)
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./gaussian_variance + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1))*gaussian_variance**2
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./precision + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1))*precision**2
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||
|
||||
|
|
@ -97,8 +97,8 @@ class vDTC(object):
|
|||
num_data, output_dim = Y.shape
|
||||
|
||||
#make sure the noise is not hetero
|
||||
gaussian_variance = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if gaussian_variance.size > 1:
|
||||
precision = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if precision.size > 1:
|
||||
raise NotImplementedError("no hetero noise with this implementation of DTC")
|
||||
|
||||
Kmm = kern.K(Z)
|
||||
|
|
@ -111,9 +111,9 @@ class vDTC(object):
|
|||
Kmmi, L, Li, _ = pdinv(Kmm)
|
||||
|
||||
# Compute A
|
||||
LiUTbeta = np.dot(Li, U.T)*np.sqrt(gaussian_variance)
|
||||
LiUTbeta = np.dot(Li, U.T)*np.sqrt(precision)
|
||||
A_ = tdot(LiUTbeta)
|
||||
trace_term = -0.5*(np.sum(Knn)*gaussian_variance - np.trace(A_))
|
||||
trace_term = -0.5*(np.sum(Knn)*precision - np.trace(A_))
|
||||
A = A_ + np.eye(num_inducing)
|
||||
|
||||
# factor A
|
||||
|
|
@ -121,7 +121,7 @@ class vDTC(object):
|
|||
|
||||
# back substutue to get b, P, v
|
||||
tmp, _ = dtrtrs(L, Uy, lower=1)
|
||||
b, _ = dtrtrs(LA, tmp*gaussian_variance, lower=1)
|
||||
b, _ = dtrtrs(LA, tmp*precision, lower=1)
|
||||
tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
|
||||
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
|
||||
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
|
||||
|
|
@ -131,8 +131,8 @@ class vDTC(object):
|
|||
#compute log marginal
|
||||
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
|
||||
-np.sum(np.log(np.diag(LA)))*output_dim + \
|
||||
0.5*num_data*output_dim*np.log(gaussian_variance) + \
|
||||
-0.5*gaussian_variance*np.sum(np.square(Y)) + \
|
||||
0.5*num_data*output_dim*np.log(precision) + \
|
||||
-0.5*precision*np.sum(np.square(Y)) + \
|
||||
0.5*np.sum(np.square(b)) + \
|
||||
trace_term
|
||||
|
||||
|
|
@ -145,15 +145,15 @@ class vDTC(object):
|
|||
vY = np.dot(v.reshape(-1,1),Y.T)
|
||||
#dL_dU = vY - np.dot(vvT_P, U.T)
|
||||
dL_dU = vY - np.dot(vvT_P - Kmmi, U.T)
|
||||
dL_dU *= gaussian_variance
|
||||
dL_dU *= precision
|
||||
|
||||
#compute dL_dR
|
||||
Uv = np.dot(U, v)
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./gaussian_variance + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) )*gaussian_variance**2
|
||||
dL_dR -=gaussian_variance*trace_term/num_data
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./precision + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) )*precision**2
|
||||
dL_dR -=precision*trace_term/num_data
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR)
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*gaussian_variance, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn) + -0.5*precision, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
#construct a posterior object
|
||||
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ class ExactGaussianInference(LatentFunctionInference):
|
|||
def __init__(self):
|
||||
pass#self._YYTfactor_cache = caching.cache()
|
||||
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, gaussian_variance=None):
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, K=None, precision=None):
|
||||
"""
|
||||
Returns a Posterior class containing essential quantities of the posterior
|
||||
"""
|
||||
|
|
@ -32,8 +32,8 @@ class ExactGaussianInference(LatentFunctionInference):
|
|||
else:
|
||||
m = mean_function.f(X)
|
||||
|
||||
if gaussian_variance is None:
|
||||
gaussian_variance = likelihood.gaussian_variance(Y_metadata)
|
||||
if precision is None:
|
||||
precision = likelihood.gaussian_variance(Y_metadata)
|
||||
|
||||
YYT_factor = Y-m
|
||||
|
||||
|
|
@ -41,7 +41,7 @@ class ExactGaussianInference(LatentFunctionInference):
|
|||
K = kern.K(X)
|
||||
|
||||
Ky = K.copy()
|
||||
diag.add(Ky, gaussian_variance+1e-8)
|
||||
diag.add(Ky, precision+1e-8)
|
||||
|
||||
Wi, LW, LWi, W_logdet = pdinv(Ky)
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class EP(ExactGaussianInference):
|
|||
# TODO: update approximation in the end as well? Maybe even with a switch?
|
||||
pass
|
||||
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, gaussian_variance=None, K=None):
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, precision=None, K=None):
|
||||
num_data, output_dim = Y.shape
|
||||
assert output_dim ==1, "ep in 1D only (for now!)"
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ class EP(ExactGaussianInference):
|
|||
#if we've already run EP, just use the existing approximation stored in self._ep_approximation
|
||||
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation
|
||||
|
||||
return super(EP, self).inference(kern, X, likelihood, mu_tilde[:,None], mean_function=mean_function, Y_metadata=Y_metadata, gaussian_variance=1./tau_tilde, K=K)
|
||||
return super(EP, self).inference(kern, X, likelihood, mu_tilde[:,None], mean_function=mean_function, Y_metadata=Y_metadata, precision=1./tau_tilde, K=K)
|
||||
|
||||
def expectation_propagation(self, K, Y, likelihood, Y_metadata):
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ class EPDTC(VarDTC):
|
|||
return super(EPDTC, self).inference(kern, X, Z, likelihood, mu_tilde,
|
||||
mean_function=mean_function,
|
||||
Y_metadata=Y_metadata,
|
||||
gaussian_variance=tau_tilde,
|
||||
precision=tau_tilde,
|
||||
Lm=Lm, dL_dKmm=dL_dKmm,
|
||||
psi0=psi0, psi1=psi1, psi2=psi2)
|
||||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ class VarDTC(LatentFunctionInference):
|
|||
def get_VVTfactor(self, Y, prec):
|
||||
return Y * prec # TODO chache this, and make it effective
|
||||
|
||||
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, mean_function=None, gaussian_variance=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None):
|
||||
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, mean_function=None, precision=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None):
|
||||
assert mean_function is None, "inference with a mean function not implemented"
|
||||
|
||||
num_data, output_dim = Y.shape
|
||||
|
|
@ -72,16 +72,16 @@ class VarDTC(LatentFunctionInference):
|
|||
|
||||
uncertain_inputs = isinstance(X, VariationalPosterior)
|
||||
|
||||
if gaussian_variance is None:
|
||||
if precision is None:
|
||||
#assume Gaussian likelihood
|
||||
gaussian_variance = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), self.const_jitter)
|
||||
precision = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), self.const_jitter)
|
||||
|
||||
if gaussian_variance.ndim == 1:
|
||||
gaussian_variance = gaussian_variance[:, None]
|
||||
het_noise = gaussian_variance.size > 1
|
||||
if precision.ndim == 1:
|
||||
precision = precision[:, None]
|
||||
het_noise = precision.size > 1
|
||||
|
||||
VVT_factor = gaussian_variance*Y
|
||||
#VVT_factor = gaussian_variance*Y
|
||||
VVT_factor = precision*Y
|
||||
#VVT_factor = precision*Y
|
||||
trYYT = self.get_trYYT(Y)
|
||||
|
||||
# kernel computations, using BGPLVM notation
|
||||
|
|
@ -98,16 +98,16 @@ class VarDTC(LatentFunctionInference):
|
|||
psi1 = kern.psi1(Z, X)
|
||||
if het_noise:
|
||||
if psi2 is None:
|
||||
psi2_beta = (kern.psi2n(Z, X) * gaussian_variance[:, :, None]).sum(0)
|
||||
psi2_beta = (kern.psi2n(Z, X) * precision[:, :, None]).sum(0)
|
||||
else:
|
||||
psi2_beta = (psi2 * gaussian_variance[:, :, None]).sum(0)
|
||||
psi2_beta = (psi2 * precision[:, :, None]).sum(0)
|
||||
else:
|
||||
if psi2 is None:
|
||||
psi2_beta = kern.psi2(Z,X) * gaussian_variance
|
||||
psi2_beta = kern.psi2(Z,X) * precision
|
||||
elif psi2.ndim == 3:
|
||||
psi2_beta = psi2.sum(0) * gaussian_variance
|
||||
psi2_beta = psi2.sum(0) * precision
|
||||
else:
|
||||
psi2_beta = psi2 * gaussian_variance
|
||||
psi2_beta = psi2 * precision
|
||||
LmInv = dtrtri(Lm)
|
||||
A = LmInv.dot(psi2_beta.dot(LmInv.T))
|
||||
else:
|
||||
|
|
@ -116,9 +116,9 @@ class VarDTC(LatentFunctionInference):
|
|||
if psi1 is None:
|
||||
psi1 = kern.K(X, Z)
|
||||
if het_noise:
|
||||
tmp = psi1 * (np.sqrt(gaussian_variance))
|
||||
tmp = psi1 * (np.sqrt(precision))
|
||||
else:
|
||||
tmp = psi1 * (np.sqrt(gaussian_variance))
|
||||
tmp = psi1 * (np.sqrt(precision))
|
||||
tmp, _ = dtrtrs(Lm, tmp.T, lower=1)
|
||||
A = tdot(tmp) #print A.sum()
|
||||
|
||||
|
|
@ -144,19 +144,19 @@ class VarDTC(LatentFunctionInference):
|
|||
dL_dKmm = backsub_both_sides(Lm, delit)
|
||||
|
||||
# derivatives of L w.r.t. psi
|
||||
dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, gaussian_variance, Lm,
|
||||
dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, precision, Lm,
|
||||
VVT_factor, Cpsi1Vf, DBi_plus_BiPBi,
|
||||
psi1, het_noise, uncertain_inputs)
|
||||
|
||||
# log marginal likelihood
|
||||
log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, gaussian_variance, het_noise,
|
||||
log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, precision, het_noise,
|
||||
psi0, A, LB, trYYT, data_fit, Y)
|
||||
|
||||
#noise derivatives
|
||||
dL_dR = _compute_dL_dR(likelihood,
|
||||
het_noise, uncertain_inputs, LB,
|
||||
_LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A,
|
||||
psi0, psi1, gaussian_variance,
|
||||
psi0, psi1, precision,
|
||||
data_fit, num_data, output_dim, trYYT, Y, VVT_factor)
|
||||
|
||||
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR,Y_metadata)
|
||||
|
|
@ -181,7 +181,7 @@ class VarDTC(LatentFunctionInference):
|
|||
else:
|
||||
print('foobar')
|
||||
import ipdb; ipdb.set_trace()
|
||||
psi1V = np.dot(Y.T*gaussian_variance, psi1).T
|
||||
psi1V = np.dot(Y.T*precision, psi1).T
|
||||
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
|
||||
tmp, _ = dpotrs(LB, tmp, lower=1)
|
||||
woodbury_vector, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue