mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-12 13:32:39 +02:00
all kernels working fine with the psi statistics now
This commit is contained in:
parent
16d9536027
commit
de665361a7
5 changed files with 14 additions and 18 deletions
|
|
@ -6,8 +6,8 @@ import pylab as pb
|
||||||
import GPy
|
import GPy
|
||||||
np.random.seed(123344)
|
np.random.seed(123344)
|
||||||
|
|
||||||
N = 3
|
N = 10
|
||||||
M = 2
|
M = 3
|
||||||
Q = 2
|
Q = 2
|
||||||
D = 4
|
D = 4
|
||||||
#generate GPLVM-like data
|
#generate GPLVM-like data
|
||||||
|
|
@ -16,7 +16,7 @@ k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||||
K = k.K(X)
|
K = k.K(X)
|
||||||
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
Y = np.random.multivariate_normal(np.zeros(N),K,D).T
|
||||||
|
|
||||||
k = GPy.kern.bias(Q) #+ GPy.kern.white(Q)
|
k = GPy.kern.linear(Q, ARD = True) + GPy.kern.white(Q)
|
||||||
# k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q)
|
# k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q)
|
||||||
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||||
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ Y -= Y.mean(axis=0)
|
||||||
# Y /= Y.std(axis=0)
|
# Y /= Y.std(axis=0)
|
||||||
|
|
||||||
Q = 5
|
Q = 5
|
||||||
k = GPy.kern.linear(Q, ARD = False) + GPy.kern.white(Q)
|
k = GPy.kern.linear(Q, ARD = True) + GPy.kern.white(Q)
|
||||||
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M = 20)
|
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M = 20)
|
||||||
m.constrain_positive('(rbf|bias|S|linear|white|noise)')
|
m.constrain_positive('(rbf|bias|S|linear|white|noise)')
|
||||||
|
|
||||||
|
|
@ -41,7 +41,7 @@ m.constrain_positive('(rbf|bias|S|linear|white|noise)')
|
||||||
# m.unconstrain('white')
|
# m.unconstrain('white')
|
||||||
# m.constrain_bounded('white', 1e-6, 10.0)
|
# m.constrain_bounded('white', 1e-6, 10.0)
|
||||||
# plot_oil(m.X, np.array([1,1]), labels, 'PCA initialization')
|
# plot_oil(m.X, np.array([1,1]), labels, 'PCA initialization')
|
||||||
# m.optimize(messages = True)
|
m.optimize(messages = True)
|
||||||
# m.optimize('tnc', messages = True)
|
# m.optimize('tnc', messages = True)
|
||||||
# plot_oil(m.X, m.kern.parts[0].lengthscale, labels, 'B-GPLVM')
|
# plot_oil(m.X, m.kern.parts[0].lengthscale, labels, 'B-GPLVM')
|
||||||
# # pb.figure()
|
# # pb.figure()
|
||||||
|
|
|
||||||
|
|
@ -90,22 +90,19 @@ class linear(kernpart):
|
||||||
|
|
||||||
def psi0(self,Z,mu,S,target):
|
def psi0(self,Z,mu,S,target):
|
||||||
self._psi_computations(Z,mu,S)
|
self._psi_computations(Z,mu,S)
|
||||||
target += np.sum(self.variances*self.mu2_S)
|
target += np.sum(self.variances*self.mu2_S,1)
|
||||||
|
|
||||||
def dpsi0_dtheta(self,partial,Z,mu,S,target):
|
def dpsi0_dtheta(self,partial,Z,mu,S,target):
|
||||||
self._psi_computations(Z,mu,S)
|
self._psi_computations(Z,mu,S)
|
||||||
tmp = (partial[:, None] * (np.sum(self.mu2_S,0)))
|
tmp = partial[:, None] * self.mu2_S
|
||||||
if self.ARD:
|
if self.ARD:
|
||||||
target += tmp.sum(0)
|
target += tmp.sum(0)
|
||||||
else:
|
else:
|
||||||
target += tmp.sum()
|
target += tmp.sum()
|
||||||
|
|
||||||
def dpsi0_dmuS(self,partial, Z,mu,S,target_mu,target_S):
|
def dpsi0_dmuS(self,partial, Z,mu,S,target_mu,target_S):
|
||||||
target_mu += np.sum(partial[:, None],0) * (2.0*mu*self.variances)
|
target_mu += partial[:, None] * (2.0*mu*self.variances)
|
||||||
target_S += np.sum(partial[:, None] * self.variances, 0)
|
target_S += partial[:, None] * self.variances
|
||||||
|
|
||||||
def dpsi0_dZ(self,Z,mu,S,target):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def psi1(self,Z,mu,S,target):
|
def psi1(self,Z,mu,S,target):
|
||||||
"""the variance, it does nothing"""
|
"""the variance, it does nothing"""
|
||||||
|
|
@ -149,7 +146,7 @@ class linear(kernpart):
|
||||||
def dpsi2_dZ(self,partial,Z,mu,S,target):
|
def dpsi2_dZ(self,partial,Z,mu,S,target):
|
||||||
self._psi_computations(Z,mu,S)
|
self._psi_computations(Z,mu,S)
|
||||||
mu2_S = np.sum(self.mu2_S,0)# Q,
|
mu2_S = np.sum(self.mu2_S,0)# Q,
|
||||||
target += (partial[:,:,:,None]* (Z * mu2_S * np.square(self.variances))).sum(0).sum(1)
|
target += (partial[:,:,:,None] * (self.mu2_S[:,None,None,:]*(Z*np.square(self.variances)[None,:])[None,None,:,:])).sum(0).sum(1)
|
||||||
|
|
||||||
#---------------------------------------#
|
#---------------------------------------#
|
||||||
# Precomputations #
|
# Precomputations #
|
||||||
|
|
|
||||||
|
|
@ -155,21 +155,20 @@ class rbf(kernpart):
|
||||||
self._psi_computations(Z,mu,S)
|
self._psi_computations(Z,mu,S)
|
||||||
d_var = 2.*self._psi2/self.variance
|
d_var = 2.*self._psi2/self.variance
|
||||||
d_length = self._psi2[:,:,:,None]*(0.5*self._psi2_Zdist_sq*self._psi2_denom + 2.*self._psi2_mudist_sq + 2.*S[:,None,None,:]/self.lengthscale2)/(self.lengthscale*self._psi2_denom)
|
d_length = self._psi2[:,:,:,None]*(0.5*self._psi2_Zdist_sq*self._psi2_denom + 2.*self._psi2_mudist_sq + 2.*S[:,None,None,:]/self.lengthscale2)/(self.lengthscale*self._psi2_denom)
|
||||||
d_length = d_length.sum(0)
|
|
||||||
target[0] += np.sum(partial*d_var)
|
target[0] += np.sum(partial*d_var)
|
||||||
dpsi2_dlength = d_length*partial[:,:,:,None]
|
dpsi2_dlength = d_length*partial[:,:,:,None]
|
||||||
if not self.ARD:
|
if not self.ARD:
|
||||||
target[1] += dpsi2_dlength.sum()
|
target[1] += dpsi2_dlength.sum()
|
||||||
else:
|
else:
|
||||||
target[1:] += dpsi2_dlength.sum(0).sum(0).sum(0)
|
target[1:] += dpsi2_dlength.sum(0).sum(0).sum(0)
|
||||||
|
|
||||||
def dpsi2_dZ(self,partial,Z,mu,S,target):
|
def dpsi2_dZ(self,partial,Z,mu,S,target):
|
||||||
self._psi_computations(Z,mu,S)
|
self._psi_computations(Z,mu,S)
|
||||||
term1 = 0.5*self._psi2_Zdist/self.lengthscale2 # M, M, Q
|
term1 = 0.5*self._psi2_Zdist/self.lengthscale2 # M, M, Q
|
||||||
term2 = self._psi2_mudist/self._psi2_denom/self.lengthscale2 # N, M, M, Q
|
term2 = self._psi2_mudist/self._psi2_denom/self.lengthscale2 # N, M, M, Q
|
||||||
dZ = self._psi2[:,:,:,None] * (term1[None] + term2)
|
dZ = self._psi2[:,:,:,None] * (term1[None] + term2)
|
||||||
target += (partial[:,:,:,None]*dZ).sum(0).sum(0) # <----------------- TODO not sure about the first ':' here, should be a None (WAS a none in the debug branch)
|
target += (partial[:,:,:,None]*dZ).sum(0).sum(0)
|
||||||
|
|
||||||
|
|
||||||
def dpsi2_dmuS(self,partial,Z,mu,S,target_mu,target_S):
|
def dpsi2_dmuS(self,partial,Z,mu,S,target_mu,target_S):
|
||||||
"""Think N,M,M,Q """
|
"""Think N,M,M,Q """
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ class sparse_GP(GP):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, X, likelihood, kernel, Z, X_uncertainty=None, Xslices=None,Zslices=None, normalize_X=False):
|
def __init__(self, X, likelihood, kernel, Z, X_uncertainty=None, Xslices=None,Zslices=None, normalize_X=False):
|
||||||
self.scale_factor = 1.0# a scaling factor to help keep the algorithm stable
|
self.scale_factor = 100.0# a scaling factor to help keep the algorithm stable
|
||||||
|
|
||||||
self.Z = Z
|
self.Z = Z
|
||||||
self.Zslices = Zslices
|
self.Zslices = Zslices
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue