mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-13 14:03:20 +02:00
Scale Factor removed and moved V=Y*beta into likelihoods
This commit is contained in:
parent
65ead17ff5
commit
f39afb9ea5
3 changed files with 140 additions and 122 deletions
|
|
@ -32,6 +32,7 @@ class EP(likelihood):
|
||||||
self.precision = np.ones(self.N)[:,None]
|
self.precision = np.ones(self.N)[:,None]
|
||||||
self.Z = 0
|
self.Z = 0
|
||||||
self.YYT = None
|
self.YYT = None
|
||||||
|
self.V = self.precision * self.Y
|
||||||
|
|
||||||
def restart(self):
|
def restart(self):
|
||||||
self.tau_tilde = np.zeros(self.N)
|
self.tau_tilde = np.zeros(self.N)
|
||||||
|
|
@ -41,6 +42,7 @@ class EP(likelihood):
|
||||||
self.precision = np.ones(self.N)[:,None]
|
self.precision = np.ones(self.N)[:,None]
|
||||||
self.Z = 0
|
self.Z = 0
|
||||||
self.YYT = None
|
self.YYT = None
|
||||||
|
self.V = self.precision * self.Y
|
||||||
|
|
||||||
def predictive_values(self,mu,var,full_cov):
|
def predictive_values(self,mu,var,full_cov):
|
||||||
if full_cov:
|
if full_cov:
|
||||||
|
|
@ -67,6 +69,7 @@ class EP(likelihood):
|
||||||
self.YYT = np.dot(self.Y,self.Y.T)
|
self.YYT = np.dot(self.Y,self.Y.T)
|
||||||
self.covariance_matrix = np.diag(1./self.tau_tilde)
|
self.covariance_matrix = np.diag(1./self.tau_tilde)
|
||||||
self.precision = self.tau_tilde[:,None]
|
self.precision = self.tau_tilde[:,None]
|
||||||
|
self.V = self.precision * self.Y
|
||||||
|
|
||||||
def fit_full(self,K):
|
def fit_full(self,K):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -29,6 +29,7 @@ class Gaussian(likelihood):
|
||||||
|
|
||||||
self.set_data(data)
|
self.set_data(data)
|
||||||
|
|
||||||
|
self._variance = np.asarray(variance) + 1.
|
||||||
self._set_params(np.asarray(variance))
|
self._set_params(np.asarray(variance))
|
||||||
|
|
||||||
def set_data(self, data):
|
def set_data(self, data):
|
||||||
|
|
@ -50,9 +51,12 @@ class Gaussian(likelihood):
|
||||||
return ["noise_variance"]
|
return ["noise_variance"]
|
||||||
|
|
||||||
def _set_params(self, x):
|
def _set_params(self, x):
|
||||||
self._variance = float(x)
|
x = float(x)
|
||||||
|
if self._variance != x:
|
||||||
|
self._variance = x
|
||||||
self.covariance_matrix = np.eye(self.N) * self._variance
|
self.covariance_matrix = np.eye(self.N) * self._variance
|
||||||
self.precision = 1. / self._variance
|
self.precision = 1. / self._variance
|
||||||
|
self.V = (self.precision) * self.Y
|
||||||
|
|
||||||
def predictive_values(self, mu, var, full_cov):
|
def predictive_values(self, mu, var, full_cov):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -35,8 +35,8 @@ class sparse_GP(GP):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False):
|
def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False):
|
||||||
self.scale_factor = 100.0# a scaling factor to help keep the algorithm stable
|
# self.scale_factor = 100.0 # a scaling factor to help keep the algorithm stable
|
||||||
self.auto_scale_factor = False
|
# self.auto_scale_factor = False
|
||||||
self.Z = Z
|
self.Z = Z
|
||||||
self.M = Z.shape[0]
|
self.M = Z.shape[0]
|
||||||
self.likelihood = likelihood
|
self.likelihood = likelihood
|
||||||
|
|
@ -68,8 +68,8 @@ class sparse_GP(GP):
|
||||||
self.psi2 = None
|
self.psi2 = None
|
||||||
|
|
||||||
def _computations(self):
|
def _computations(self):
|
||||||
sf = self.scale_factor
|
# sf = self.scale_factor
|
||||||
sf2 = sf**2
|
# sf2 = sf ** 2
|
||||||
|
|
||||||
# factor Kmm
|
# factor Kmm
|
||||||
self.Lm = jitchol(self.Kmm)
|
self.Lm = jitchol(self.Kmm)
|
||||||
|
|
@ -78,7 +78,8 @@ class sparse_GP(GP):
|
||||||
if self.likelihood.is_heteroscedastic:
|
if self.likelihood.is_heteroscedastic:
|
||||||
assert self.likelihood.D == 1 # TODO: what if the likelihood is heterscedatic and there are multiple independent outputs?
|
assert self.likelihood.D == 1 # TODO: what if the likelihood is heterscedatic and there are multiple independent outputs?
|
||||||
if self.has_uncertain_inputs:
|
if self.has_uncertain_inputs:
|
||||||
psi2_beta_scaled = (self.psi2*(self.likelihood.precision.flatten().reshape(self.N,1,1)/sf2)).sum(0)
|
# psi2_beta_scaled = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.N, 1, 1) / sf2)).sum(0)
|
||||||
|
psi2_beta_scaled = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.N, 1, 1))).sum(0)
|
||||||
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
||||||
clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable
|
clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable
|
||||||
if not np.allclose(evals, clipped_evals):
|
if not np.allclose(evals, clipped_evals):
|
||||||
|
|
@ -87,12 +88,14 @@ class sparse_GP(GP):
|
||||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||||
self.A = tdot(tmp)
|
self.A = tdot(tmp)
|
||||||
else:
|
else:
|
||||||
tmp = self.psi1*(np.sqrt(self.likelihood.precision.flatten().reshape(1,self.N))/sf)
|
# tmp = self.psi1 * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.N)) / sf)
|
||||||
|
tmp = self.psi1 * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.N)))
|
||||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||||
self.A = tdot(tmp)
|
self.A = tdot(tmp)
|
||||||
else:
|
else:
|
||||||
if self.has_uncertain_inputs:
|
if self.has_uncertain_inputs:
|
||||||
psi2_beta_scaled = (self.psi2*(self.likelihood.precision/sf2)).sum(0)
|
# psi2_beta_scaled = (self.psi2 * (self.likelihood.precision / sf2)).sum(0)
|
||||||
|
psi2_beta_scaled = (self.psi2 * (self.likelihood.precision)).sum(0)
|
||||||
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
||||||
clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable
|
clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable
|
||||||
if not np.allclose(evals, clipped_evals):
|
if not np.allclose(evals, clipped_evals):
|
||||||
|
|
@ -101,16 +104,18 @@ class sparse_GP(GP):
|
||||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||||
self.A = tdot(tmp)
|
self.A = tdot(tmp)
|
||||||
else:
|
else:
|
||||||
tmp = self.psi1*(np.sqrt(self.likelihood.precision)/sf)
|
# tmp = self.psi1 * (np.sqrt(self.likelihood.precision) / sf)
|
||||||
|
tmp = self.psi1 * (np.sqrt(self.likelihood.precision))
|
||||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||||
self.A = tdot(tmp)
|
self.A = tdot(tmp)
|
||||||
|
|
||||||
# factor B
|
# factor B
|
||||||
self.B = np.eye(self.M)/sf2 + self.A
|
# self.B = np.eye(self.M) / sf2 + self.A
|
||||||
|
self.B = np.eye(self.M) + self.A
|
||||||
self.LB = jitchol(self.B)
|
self.LB = jitchol(self.B)
|
||||||
|
|
||||||
self.V = (self.likelihood.precision/self.scale_factor)*self.likelihood.Y
|
# TODO: make a switch for either first compute psi1V, or VV.T
|
||||||
self.psi1V = np.dot(self.psi1, self.V)
|
self.psi1V = np.dot(self.psi1, self.likelihood.V)
|
||||||
|
|
||||||
# back substutue C into psi1V
|
# back substutue C into psi1V
|
||||||
tmp, info1 = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
tmp, info1 = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
||||||
|
|
@ -121,14 +126,16 @@ class sparse_GP(GP):
|
||||||
# Compute dL_dKmm
|
# Compute dL_dKmm
|
||||||
tmp = tdot(self._LBi_Lmi_psi1V)
|
tmp = tdot(self._LBi_Lmi_psi1V)
|
||||||
self.DBi_plus_BiPBi = backsub_both_sides(self.LB, self.D * np.eye(self.M) + tmp)
|
self.DBi_plus_BiPBi = backsub_both_sides(self.LB, self.D * np.eye(self.M) + tmp)
|
||||||
tmp = -0.5*self.DBi_plus_BiPBi/sf2
|
# tmp = -0.5 * self.DBi_plus_BiPBi / sf2
|
||||||
tmp += -0.5*self.B*sf2*self.D
|
# tmp += -0.5 * self.B * sf2 * self.D
|
||||||
|
tmp = -0.5 * self.DBi_plus_BiPBi
|
||||||
|
tmp += -0.5 * self.B * self.D
|
||||||
tmp += self.D * np.eye(self.M)
|
tmp += self.D * np.eye(self.M)
|
||||||
self.dL_dKmm = backsub_both_sides(self.Lm, tmp)
|
self.dL_dKmm = backsub_both_sides(self.Lm, tmp)
|
||||||
|
|
||||||
# Compute dL_dpsi # FIXME: this is untested for the heterscedastic + uncertain inputs case
|
# Compute dL_dpsi # FIXME: this is untested for the heterscedastic + uncertain inputs case
|
||||||
self.dL_dpsi0 = -0.5 * self.D * (self.likelihood.precision * np.ones([self.N, 1])).flatten()
|
self.dL_dpsi0 = -0.5 * self.D * (self.likelihood.precision * np.ones([self.N, 1])).flatten()
|
||||||
self.dL_dpsi1 = np.dot(self.Cpsi1V,self.V.T)
|
self.dL_dpsi1 = np.dot(self.Cpsi1V, self.likelihood.V.T)
|
||||||
dL_dpsi2_beta = 0.5 * backsub_both_sides(self.Lm, self.D * np.eye(self.M) - self.DBi_plus_BiPBi)
|
dL_dpsi2_beta = 0.5 * backsub_both_sides(self.Lm, self.D * np.eye(self.M) - self.DBi_plus_BiPBi)
|
||||||
if self.likelihood.is_heteroscedastic:
|
if self.likelihood.is_heteroscedastic:
|
||||||
if self.has_uncertain_inputs:
|
if self.has_uncertain_inputs:
|
||||||
|
|
@ -156,21 +163,25 @@ class sparse_GP(GP):
|
||||||
else:
|
else:
|
||||||
# likelihood is not heterscedatic
|
# likelihood is not heterscedatic
|
||||||
self.partial_for_likelihood = -0.5 * self.N * self.D * self.likelihood.precision + 0.5 * self.likelihood.trYYT * self.likelihood.precision ** 2
|
self.partial_for_likelihood = -0.5 * self.N * self.D * self.likelihood.precision + 0.5 * self.likelihood.trYYT * self.likelihood.precision ** 2
|
||||||
self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum()*self.likelihood.precision**2 - np.trace(self.A)*self.likelihood.precision*sf2)
|
# self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum() * self.likelihood.precision ** 2 - np.trace(self.A) * self.likelihood.precision * sf2)
|
||||||
|
self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum() * self.likelihood.precision ** 2 - np.trace(self.A) * self.likelihood.precision)
|
||||||
self.partial_for_likelihood += self.likelihood.precision * (0.5 * np.sum(self.A * self.DBi_plus_BiPBi) - np.sum(np.square(self._LBi_Lmi_psi1V)))
|
self.partial_for_likelihood += self.likelihood.precision * (0.5 * np.sum(self.A * self.DBi_plus_BiPBi) - np.sum(np.square(self._LBi_Lmi_psi1V)))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def log_likelihood(self):
|
def log_likelihood(self):
|
||||||
""" Compute the (lower bound on the) log marginal likelihood """
|
""" Compute the (lower bound on the) log marginal likelihood """
|
||||||
sf2 = self.scale_factor**2
|
# sf2 = self.scale_factor ** 2
|
||||||
if self.likelihood.is_heteroscedastic:
|
if self.likelihood.is_heteroscedastic:
|
||||||
A = -0.5*self.N*self.D*np.log(2.*np.pi) +0.5*np.sum(np.log(self.likelihood.precision)) -0.5*np.sum(self.V*self.likelihood.Y)
|
A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
|
||||||
B = -0.5*self.D*(np.sum(self.likelihood.precision.flatten()*self.psi0) - np.trace(self.A)*sf2)
|
# B = -0.5 * self.D * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A) * sf2)
|
||||||
|
B = -0.5 * self.D * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
|
||||||
else:
|
else:
|
||||||
A = -0.5 * self.N * self.D * (np.log(2.*np.pi) + np.log(self.likelihood._variance)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
A = -0.5 * self.N * self.D * (np.log(2.*np.pi) + np.log(self.likelihood._variance)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
||||||
B = -0.5*self.D*(np.sum(self.likelihood.precision*self.psi0) - np.trace(self.A)*sf2)
|
# B = -0.5 * self.D * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A) * sf2)
|
||||||
C = -self.D * (np.sum(np.log(np.diag(self.LB))) + 0.5*self.M*np.log(sf2))
|
B = -0.5 * self.D * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A))
|
||||||
|
# C = -self.D * (np.sum(np.log(np.diag(self.LB))) + 0.5 * self.M * np.log(sf2))
|
||||||
|
C = -self.D * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.M * np.log(sf2))
|
||||||
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||||
return A + B + C + D
|
return A + B + C + D
|
||||||
|
|
||||||
|
|
@ -186,7 +197,7 @@ class sparse_GP(GP):
|
||||||
# self.scale_factor = max(100,np.sqrt(self.psi2_beta_scaled.sum(0).mean()))
|
# self.scale_factor = max(100,np.sqrt(self.psi2_beta_scaled.sum(0).mean()))
|
||||||
# else:
|
# else:
|
||||||
# self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
|
# self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
|
||||||
self.scale_factor = 1.
|
# self.scale_factor = 100.
|
||||||
self._computations()
|
self._computations()
|
||||||
|
|
||||||
def _get_params(self):
|
def _get_params(self):
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue