mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 12:02:38 +02:00
fix ss_mrd and fix white and bias kernel
This commit is contained in:
parent
2c88528ebd
commit
129985998c
4 changed files with 21 additions and 17 deletions
|
|
@ -88,17 +88,18 @@ class Add(CombinationKernel):
|
||||||
# rbf X bias
|
# rbf X bias
|
||||||
#elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)):
|
#elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)):
|
||||||
elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)):
|
elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)):
|
||||||
tmp = p2.psi1(Z, variational_posterior)
|
tmp = p2.psi1(Z, variational_posterior).sum(axis=0)
|
||||||
psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :])
|
psi2 += p1.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :])
|
||||||
#elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)):
|
#elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)):
|
||||||
elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)):
|
elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)):
|
||||||
tmp = p1.psi1(Z, variational_posterior)
|
tmp = p1.psi1(Z, variational_posterior).sum(axis=0)
|
||||||
psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :])
|
psi2 += p2.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :])
|
||||||
elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)):
|
elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)):
|
||||||
assert np.intersect1d(p1.active_dims, p2.active_dims).size == 0, "only non overlapping kernel dimensions allowed so far"
|
assert np.intersect1d(p1.active_dims, p2.active_dims).size == 0, "only non overlapping kernel dimensions allowed so far"
|
||||||
tmp1 = p1.psi1(Z, variational_posterior)
|
tmp1 = p1.psi1(Z, variational_posterior)
|
||||||
tmp2 = p2.psi1(Z, variational_posterior)
|
tmp2 = p2.psi1(Z, variational_posterior)
|
||||||
psi2 += (tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
|
psi2 += np.einsum('nm,no->mo',tmp1,tmp2)+np.einsum('nm,no->mo',tmp2,tmp1)
|
||||||
|
#(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError, "psi2 cannot be computed for this kernel"
|
raise NotImplementedError, "psi2 cannot be computed for this kernel"
|
||||||
return psi2
|
return psi2
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ class Static(Kern):
|
||||||
|
|
||||||
def psi2(self, Z, variational_posterior):
|
def psi2(self, Z, variational_posterior):
|
||||||
K = self.K(variational_posterior.mean, Z)
|
K = self.K(variational_posterior.mean, Z)
|
||||||
return K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes
|
return np.einsum('ij,ik->jk',K,K) #K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes
|
||||||
|
|
||||||
|
|
||||||
class White(Static):
|
class White(Static):
|
||||||
|
|
@ -53,7 +53,7 @@ class White(Static):
|
||||||
return np.zeros((X.shape[0], X2.shape[0]))
|
return np.zeros((X.shape[0], X2.shape[0]))
|
||||||
|
|
||||||
def psi2(self, Z, variational_posterior):
|
def psi2(self, Z, variational_posterior):
|
||||||
return np.zeros((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
|
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
|
||||||
|
|
||||||
def update_gradients_full(self, dL_dK, X, X2=None):
|
def update_gradients_full(self, dL_dK, X, X2=None):
|
||||||
self.variance.gradient = np.trace(dL_dK)
|
self.variance.gradient = np.trace(dL_dK)
|
||||||
|
|
@ -82,12 +82,12 @@ class Bias(Static):
|
||||||
self.variance.gradient = dL_dKdiag.sum()
|
self.variance.gradient = dL_dKdiag.sum()
|
||||||
|
|
||||||
def psi2(self, Z, variational_posterior):
|
def psi2(self, Z, variational_posterior):
|
||||||
ret = np.empty((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
|
ret = np.empty((Z.shape[0], Z.shape[0]), dtype=np.float64)
|
||||||
ret[:] = self.variance**2
|
ret[:] = self.variance*self.variance*variational_posterior.shape[0]
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||||
self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum()
|
self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0]
|
||||||
|
|
||||||
class Fixed(Static):
|
class Fixed(Static):
|
||||||
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'):
|
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'):
|
||||||
|
|
@ -97,7 +97,7 @@ class Fixed(Static):
|
||||||
:param variance: the variance of the kernel
|
:param variance: the variance of the kernel
|
||||||
:type variance: float
|
:type variance: float
|
||||||
"""
|
"""
|
||||||
super(Bias, self).__init__(input_dim, variance, active_dims, name)
|
super(Fixed, self).__init__(input_dim, variance, active_dims, name)
|
||||||
self.fixed_K = covariance_matrix
|
self.fixed_K = covariance_matrix
|
||||||
def K(self, X, X2):
|
def K(self, X, X2):
|
||||||
return self.variance * self.fixed_K
|
return self.variance * self.fixed_K
|
||||||
|
|
@ -112,7 +112,7 @@ class Fixed(Static):
|
||||||
self.variance.gradient = np.einsum('i,i', dL_dKdiag, self.fixed_K)
|
self.variance.gradient = np.einsum('i,i', dL_dKdiag, self.fixed_K)
|
||||||
|
|
||||||
def psi2(self, Z, variational_posterior):
|
def psi2(self, Z, variational_posterior):
|
||||||
return np.zeros((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64)
|
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
|
||||||
|
|
||||||
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||||
self.variance.gradient = dL_dpsi0.sum()
|
self.variance.gradient = dL_dpsi0.sum()
|
||||||
|
|
|
||||||
|
|
@ -15,13 +15,16 @@ class SSMRD(Model):
|
||||||
|
|
||||||
self.updates = False
|
self.updates = False
|
||||||
self.models = [SSGPLVM(y, input_dim, X=X, X_variance=X_variance, num_inducing=num_inducing,Z=Z,init=initx,
|
self.models = [SSGPLVM(y, input_dim, X=X, X_variance=X_variance, num_inducing=num_inducing,Z=Z,init=initx,
|
||||||
kernel=kernel if kernel else None,inference_method=inference_method,likelihood=likelihoods,
|
kernel=kernel.copy() if kernel else None,inference_method=inference_method,likelihood=likelihoods,
|
||||||
name='model_'+str(i)) for i,y in enumerate(Ylist)]
|
name='model_'+str(i)) for i,y in enumerate(Ylist)]
|
||||||
self.add_parameters(*(self.models))
|
self.add_parameters(*(self.models))
|
||||||
self.updates = True
|
|
||||||
|
|
||||||
[[self.models[j].X.mean.flat[i:i+1].tie('mean_'+str(i)) for j in xrange(len(self.models))] for i in xrange(self.models[0].X.mean.size)]
|
[[[self.models[m].X.mean[i,j:j+1].tie('mean_'+str(i)+'_'+str(j)) for m in xrange(len(self.models))] for j in xrange(self.models[0].X.mean.shape[1])]
|
||||||
[[self.models[j].X.variance.flat[i:i+1].tie('var_'+str(i)) for j in xrange(len(self.models))] for i in xrange(self.models[0].X.variance.size)]
|
for i in xrange(self.models[0].X.mean.shape[0])]
|
||||||
|
[[[self.models[m].X.variance[i,j:j+1].tie('var_'+str(i)+'_'+str(j)) for m in xrange(len(self.models))] for j in xrange(self.models[0].X.variance.shape[1])]
|
||||||
|
for i in xrange(self.models[0].X.variance.shape[0])]
|
||||||
|
|
||||||
|
self.updates = True
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
super(SSMRD, self).parameters_changed()
|
super(SSMRD, self).parameters_changed()
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue