psi statistics working for linear ARD kernel

This commit is contained in:
Nicolò Fusi 2013-02-07 10:01:05 +00:00
parent 2e948d888a
commit baa06181e8
3 changed files with 16 additions and 8 deletions

View file

@ -17,7 +17,7 @@ K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,D).T Y = np.random.multivariate_normal(np.zeros(N),K,D).T
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001) # k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
k = GPy.kern.linear(Q, ARD = False) + GPy.kern.white(Q, 0.00001) k = GPy.kern.linear(Q, ARD = True) + GPy.kern.white(Q, 0.00001)
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M=M) m = GPy.models.Bayesian_GPLVM(Y, Q, kernel = k, M=M)
m.constrain_positive('(rbf|bias|noise|white|S)') m.constrain_positive('(rbf|bias|noise|white|S)')
# m.constrain_fixed('S', 1) # m.constrain_fixed('S', 1)

View file

@ -205,7 +205,6 @@ class opt_SGD(Optimizer):
self.x_opt -= step + momentum_term self.x_opt -= step + momentum_term
if self.messages == 2: if self.messages == 2:
if count == last_printed_count + 20 or count == 0:
status = "evaluating {feature: 5d}/{tot: 5d} \t f: {f: 2.3f} \t non-missing: {nm: 4d}\r".format(feature = count, tot = len(features), f = f, nm = Nj) status = "evaluating {feature: 5d}/{tot: 5d} \t f: {f: 2.3f} \t non-missing: {nm: 4d}\r".format(feature = count, tot = len(features), f = f, nm = Nj)
sys.stdout.write(status) sys.stdout.write(status)
sys.stdout.flush() sys.stdout.flush()

View file

@ -95,7 +95,11 @@ class linear(kernpart):
def dpsi0_dtheta(self,partial,Z,mu,S,target): def dpsi0_dtheta(self,partial,Z,mu,S,target):
self._psi_computations(Z,mu,S) self._psi_computations(Z,mu,S)
target += (partial[:, None] * (np.sum(self.mu2_S,0))).sum() tmp = (partial[:, None] * (np.sum(self.mu2_S,0)))
if self.ARD:
target += tmp.sum(0)
else:
target += tmp.sum()
def dpsi0_dmuS(self,partial, Z,mu,S,target_mu,target_S): def dpsi0_dmuS(self,partial, Z,mu,S,target_mu,target_S):
target_mu += partial[:, None] * (2.0*mu*self.variances) * mu.shape[0] target_mu += partial[:, None] * (2.0*mu*self.variances) * mu.shape[0]
@ -130,7 +134,12 @@ class linear(kernpart):
def dpsi2_dtheta(self,partial,Z,mu,S,target): def dpsi2_dtheta(self,partial,Z,mu,S,target):
self._psi_computations(Z,mu,S) self._psi_computations(Z,mu,S)
target += (partial[:,:,:,None]*(2.*self.ZZ*self.mu2_S[:,None,None,:]*self.variances)).sum() tmp = (partial[:,:,:,None]*(2.*self.ZZ*self.mu2_S[:,None,None,:]*self.variances))
if self.ARD:
target += tmp.sum(0).sum(0).sum(0)
else:
target += tmp.sum()
def dpsi2_dmuS(self,partial,Z,mu,S,target_mu,target_S): def dpsi2_dmuS(self,partial,Z,mu,S,target_mu,target_S):
"""Think N,M,M,Q """ """Think N,M,M,Q """