mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 20:42:39 +02:00
lots of bugfixes after refactoring
This commit is contained in:
parent
3fbd7e4943
commit
527586a012
12 changed files with 53 additions and 53 deletions
|
|
@ -73,8 +73,8 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
self._oldps.insert(0, p.copy())
|
||||
|
||||
def _get_param_names(self):
|
||||
X_names = sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.N)], [])
|
||||
S_names = sum([['X_variance_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.N)], [])
|
||||
X_names = sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
|
||||
S_names = sum([['X_variance_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
|
||||
return (X_names + S_names + SparseGP._get_param_names(self))
|
||||
|
||||
def _get_params(self):
|
||||
|
|
@ -96,7 +96,7 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
def _set_params(self, x, save_old=True, save_count=0):
|
||||
# try:
|
||||
x = self._clipped(x)
|
||||
N, input_dim = self.N, self.input_dim
|
||||
N, input_dim = self.num_data, self.input_dim
|
||||
self.X = x[:self.X.size].reshape(N, input_dim).copy()
|
||||
self.X_variance = x[(N * input_dim):(2 * N * input_dim)].reshape(N, input_dim).copy()
|
||||
SparseGP._set_params(self, x[(2 * N * input_dim):])
|
||||
|
|
@ -126,7 +126,7 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
def KL_divergence(self):
|
||||
var_mean = np.square(self.X).sum()
|
||||
var_S = np.sum(self.X_variance - np.log(self.X_variance))
|
||||
return 0.5 * (var_mean + var_S) - 0.5 * self.input_dim * self.N
|
||||
return 0.5 * (var_mean + var_S) - 0.5 * self.input_dim * self.num_data
|
||||
|
||||
def log_likelihood(self):
|
||||
ll = SparseGP.log_likelihood(self)
|
||||
|
|
@ -146,11 +146,11 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
self._savedpsiKmm.append([self.f_call, [self.Kmm, self.dL_dKmm]])
|
||||
# sf2 = self.scale_factor ** 2
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
A = -0.5 * self.N * self.input_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.V * self.likelihood.Y)
|
||||
A = -0.5 * self.num_data * self.input_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.V * self.likelihood.Y)
|
||||
# B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A) * sf2)
|
||||
B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
|
||||
else:
|
||||
A = -0.5 * self.N * self.input_dim * (np.log(2.*np.pi) + np.log(self.likelihood._variance)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
||||
A = -0.5 * self.num_data * self.input_dim * (np.log(2.*np.pi) + np.log(self.likelihood._variance)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
||||
# B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A) * sf2)
|
||||
B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A))
|
||||
C = -self.input_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))
|
||||
|
|
@ -266,9 +266,9 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
|
||||
def _debug_filter_params(self, x):
|
||||
start, end = 0, self.X.size,
|
||||
X = x[start:end].reshape(self.N, self.input_dim)
|
||||
X = x[start:end].reshape(self.num_data, self.input_dim)
|
||||
start, end = end, end + self.X_variance.size
|
||||
X_v = x[start:end].reshape(self.N, self.input_dim)
|
||||
X_v = x[start:end].reshape(self.num_data, self.input_dim)
|
||||
start, end = end, end + (self.num_inducing * self.input_dim)
|
||||
Z = x[start:end].reshape(self.num_inducing, self.input_dim)
|
||||
start, end = end, end + self.input_dim
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class FITC(SparseGP):
|
|||
else:
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
assert self.likelihood.input_dim == 1
|
||||
tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.N)))
|
||||
tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.num_data)))
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||
self.A = tdot(tmp)
|
||||
|
||||
|
|
@ -108,7 +108,7 @@ class FITC(SparseGP):
|
|||
self._dpsi1_dX_jkj = 0
|
||||
self._dpsi1_dtheta_jkj = 0
|
||||
|
||||
for i, V_n, alpha_n, gamma_n, gamma_k in zip(range(self.N), self.V_star, alpha, gamma_2, gamma_3):
|
||||
for i, V_n, alpha_n, gamma_n, gamma_k in zip(range(self.num_data), self.V_star, alpha, gamma_2, gamma_3):
|
||||
K_pp_K = np.dot(Kmmipsi1[:, i:(i + 1)], Kmmipsi1[:, i:(i + 1)].T)
|
||||
|
||||
# Diag_dpsi1 = Diag_dA_dpsi1: yT*beta_star*y + Diag_dC_dpsi1 +Diag_dD_dpsi1
|
||||
|
|
@ -155,7 +155,7 @@ class FITC(SparseGP):
|
|||
|
||||
def log_likelihood(self):
|
||||
""" Compute the (lower bound on the) log marginal likelihood """
|
||||
A = -0.5 * self.N * self.input_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)
|
||||
A = -0.5 * self.num_data * self.input_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)
|
||||
C = -self.input_dim * (np.sum(np.log(np.diag(self.LB))))
|
||||
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||
return A + C + D
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ class FITCClassification(FITC):
|
|||
|
||||
:param X: input observations
|
||||
:param Y: observed values
|
||||
:param likelihood: a GPy likelihood, defaults to binomial with probit link_function
|
||||
:param likelihood: a GPy likelihood, defaults to Binomial with probit link_function
|
||||
:param kernel: a GPy kernel, defaults to rbf+white
|
||||
:param normalize_X: whether to normalize the input data before computing (predictions will be in original scales)
|
||||
:type normalize_X: False|True
|
||||
|
|
|
|||
|
|
@ -112,9 +112,9 @@ class GeneralizedFITC(SparseGP):
|
|||
self.mu = self.w + np.dot(self.P,self.Gamma)
|
||||
|
||||
# Remove extra term from dL_dpsi1
|
||||
self.dL_dpsi1 -= mdot(self.Lmi.T,Lmipsi1*self.likelihood.precision.flatten().reshape(1,self.N))
|
||||
self.dL_dpsi1 -= mdot(self.Lmi.T,Lmipsi1*self.likelihood.precision.flatten().reshape(1,self.num_data))
|
||||
#self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm)
|
||||
#self.dL_dpsi1 -= mdot(self.Kmmi,self.psi1*self.likelihood.precision.flatten().reshape(1,self.N)) #dB
|
||||
#self.dL_dpsi1 -= mdot(self.Kmmi,self.psi1*self.likelihood.precision.flatten().reshape(1,self.num_data)) #dB
|
||||
|
||||
#########333333
|
||||
#self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B)
|
||||
|
|
@ -142,7 +142,7 @@ class GeneralizedFITC(SparseGP):
|
|||
else:
|
||||
raise NotImplementedError, "homoscedastic derivatives not implemented"
|
||||
#likelihood is not heterscedatic
|
||||
#self.partial_for_likelihood = - 0.5 * self.N*self.input_dim*self.likelihood.precision + 0.5 * np.sum(np.square(self.likelihood.Y))*self.likelihood.precision**2
|
||||
#self.partial_for_likelihood = - 0.5 * self.num_data*self.input_dim*self.likelihood.precision + 0.5 * np.sum(np.square(self.likelihood.Y))*self.likelihood.precision**2
|
||||
#self.partial_for_likelihood += 0.5 * self.input_dim * trace_dot(self.Bi,self.A)*self.likelihood.precision
|
||||
#self.partial_for_likelihood += self.likelihood.precision*(0.5*trace_dot(self.psi2_beta_scaled,self.E*sf2) - np.trace(self.Cpsi1VVpsi1))
|
||||
#TODO partial derivative vector for the likelihood not implemented
|
||||
|
|
@ -164,9 +164,9 @@ class GeneralizedFITC(SparseGP):
|
|||
""" Compute the (lower bound on the) log marginal likelihood """
|
||||
sf2 = self.scale_factor**2
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
A = -0.5*self.N*self.input_dim*np.log(2.*np.pi) +0.5*np.sum(np.log(self.likelihood.precision)) -0.5*np.sum(self.V*self.likelihood.Y)
|
||||
A = -0.5*self.num_data*self.input_dim*np.log(2.*np.pi) +0.5*np.sum(np.log(self.likelihood.precision)) -0.5*np.sum(self.V*self.likelihood.Y)
|
||||
else:
|
||||
A = -0.5*self.N*self.input_dim*(np.log(2.*np.pi) + np.log(self.likelihood._variance)) -0.5*self.likelihood.precision*self.likelihood.trYYT
|
||||
A = -0.5*self.num_data*self.input_dim*(np.log(2.*np.pi) + np.log(self.likelihood._variance)) -0.5*self.likelihood.precision*self.likelihood.trYYT
|
||||
C = -self.input_dim * (np.sum(np.log(np.diag(self.LB))) + 0.5*self.num_inducing*np.log(sf2))
|
||||
#C = -0.5*self.input_dim * (self.B_logdet + self.num_inducing*np.log(sf2))
|
||||
D = 0.5*np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||
|
|
|
|||
|
|
@ -42,13 +42,13 @@ class GPLVM(GP):
|
|||
return np.random.randn(Y.shape[0], input_dim)
|
||||
|
||||
def _get_param_names(self):
|
||||
return sum([['X_%i_%i'%(n,q) for q in range(self.input_dim)] for n in range(self.N)],[]) + GP._get_param_names(self)
|
||||
return sum([['X_%i_%i'%(n,q) for q in range(self.input_dim)] for n in range(self.num_data)],[]) + GP._get_param_names(self)
|
||||
|
||||
def _get_params(self):
|
||||
return np.hstack((self.X.flatten(), GP._get_params(self)))
|
||||
|
||||
def _set_params(self,x):
|
||||
self.X = x[:self.N*self.input_dim].reshape(self.N,self.input_dim).copy()
|
||||
self.X = x[:self.num_data*self.input_dim].reshape(self.num_data,self.input_dim).copy()
|
||||
GP._set_params(self, x[self.X.size:])
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
|
|
|
|||
|
|
@ -74,8 +74,8 @@ class MRD(model):
|
|||
nparams = numpy.array([0] + [SparseGP._get_params(g).size - g.Z.size for g in self.bgplvms])
|
||||
self.nparams = nparams.cumsum()
|
||||
|
||||
self.N = self.gref.N
|
||||
self.NQ = self.N * self.input_dim
|
||||
self.num_data = self.gref.num_data
|
||||
self.NQ = self.num_data * self.input_dim
|
||||
self.MQ = self.num_inducing * self.input_dim
|
||||
|
||||
model.__init__(self) # @UndefinedVariable
|
||||
|
|
@ -142,8 +142,8 @@ class MRD(model):
|
|||
self._init_Z(initz, self.X)
|
||||
|
||||
def _get_param_names(self):
|
||||
# X_names = sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.N)], [])
|
||||
# S_names = sum([['X_variance_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.N)], [])
|
||||
# X_names = sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
|
||||
# S_names = sum([['X_variance_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
|
||||
n1 = self.gref._get_param_names()
|
||||
n1var = n1[:self.NQ * 2 + self.MQ]
|
||||
map_names = lambda ns, name: map(lambda x: "{1}_{0}".format(*x),
|
||||
|
|
@ -169,8 +169,8 @@ class MRD(model):
|
|||
return params
|
||||
|
||||
# def _set_var_params(self, g, X, X_var, Z):
|
||||
# g.X = X.reshape(self.N, self.input_dim)
|
||||
# g.X_variance = X_var.reshape(self.N, self.input_dim)
|
||||
# g.X = X.reshape(self.num_data, self.input_dim)
|
||||
# g.X_variance = X_var.reshape(self.num_data, self.input_dim)
|
||||
# g.Z = Z.reshape(self.num_inducing, self.input_dim)
|
||||
#
|
||||
# def _set_kern_params(self, g, p):
|
||||
|
|
|
|||
|
|
@ -28,14 +28,14 @@ class SparseGPLVM(SparseGPRegression, GPLVM):
|
|||
SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
|
||||
|
||||
def _get_param_names(self):
|
||||
return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.N)], [])
|
||||
return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
|
||||
+ SparseGPRegression._get_param_names(self))
|
||||
|
||||
def _get_params(self):
|
||||
return np.hstack((self.X.flatten(), SparseGPRegression._get_params(self)))
|
||||
|
||||
def _set_params(self, x):
|
||||
self.X = x[:self.X.size].reshape(self.N, self.input_dim).copy()
|
||||
self.X = x[:self.X.size].reshape(self.num_data, self.input_dim).copy()
|
||||
SparseGPRegression._set_params(self, x[self.X.size:])
|
||||
|
||||
def log_likelihood(self):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue