DeprecationWarning: Substituded all (\!) flapack occ. with lapack (scipy said so)

This commit is contained in:
Max Zwiessele 2013-05-30 10:37:03 +01:00
parent 32034872af
commit a53690ab7f
15 changed files with 101 additions and 96 deletions

View file

@ -62,7 +62,7 @@ class model(parameterised):
if len(tie_matches) > 1:
raise ValueError, "cannot place prior across multiple ties"
elif len(tie_matches) == 1:
which = which[:1] # just place a prior object on the first parameter
which = which[:1] # just place a prior object on the first parameter
# check constraints are okay
@ -89,18 +89,18 @@ class model(parameterised):
for w in which:
self.priors[w] = what
def get_gradient(self, name, return_names=False):
def get_gradient(self, cd48_name, return_names=False):
"""
Get model gradient(s) by name. The name is applied as a regular expression and all parameters that match that regular expression are returned.
Get model gradient(s) by cd48_name. The cd48_name is applied as a regular expression and all parameters that match that regular expression are returned.
"""
matches = self.grep_param_names(name)
matches = self.grep_param_names(cd48_name)
if len(matches):
if return_names:
return self._log_likelihood_gradients()[matches], np.asarray(self._get_param_names())[matches].tolist()
else:
return self._log_likelihood_gradients()[matches]
else:
raise AttributeError, "no parameter matches %s" % name
raise AttributeError, "no parameter matches %s" % cd48_name
def log_prior(self):
"""evaluate the prior"""
@ -137,7 +137,7 @@ class model(parameterised):
x = self._get_params()
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
self._set_params(x)
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
def optimize_restarts(self, Nrestarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
@ -174,8 +174,8 @@ class model(parameterised):
job = pool.apply_async(opt_wrapper, args=(self,), kwds=kwargs)
jobs.append(job)
pool.close() # signal that no more data coming in
pool.join() # wait for all the tasks to complete
pool.close() # signal that no more data coming in
pool.join() # wait for all the tasks to complete
except KeyboardInterrupt:
print "Ctrl+c received, terminating and joining pool."
pool.terminate()
@ -214,10 +214,10 @@ class model(parameterised):
for s in positive_strings:
for i in self.grep_param_names(s):
if not (i in currently_constrained):
#to_make_positive.append(re.escape(param_names[i]))
# to_make_positive.append(re.escape(param_names[i]))
to_make_positive.append(i)
if len(to_make_positive):
#self.constrain_positive('(' + '|'.join(to_make_positive) + ')')
# self.constrain_positive('(' + '|'.join(to_make_positive) + ')')
self.constrain_positive(np.asarray(to_make_positive))
@ -430,7 +430,7 @@ class model(parameterised):
return 1. / k.variances
def pseudo_EM(self, epsilon=.1, **kwargs):
def pseudo_EM(self, epsilon=.1, max_EM_iterations=np.inf, **kwargs):
"""
TODO: Should this not bein the GP class?
EM - like algorithm for Expectation Propagation and Laplace approximation
@ -453,7 +453,7 @@ class model(parameterised):
alpha = 0
stop = False
while not stop:
while not stop and iteration < max_EM_iterations:
last_approximation = self.likelihood.copy()
last_params = self._get_params()
@ -464,8 +464,8 @@ class model(parameterised):
ll_change = new_ll - last_ll
if ll_change < 0:
self.likelihood = last_approximation # restore previous likelihood approximation
self._set_params(last_params) # restore model parameters
self.likelihood = last_approximation # restore previous likelihood approximation
self._set_params(last_params) # restore model parameters
print "Log-likelihood decrement: %s \nLast likelihood update discarded." % ll_change
stop = True
else:

View file

@ -64,36 +64,36 @@ class parameterised(object):
m['var'] = 2. # > sets all parameters matching 'var' to 2.
m['var'] = <array-like> # > sets parameters matching 'var' to <array-like>
"""
def get(self, name):
def get(self, cd48_name):
warnings.warn(self._get_set_deprecation, FutureWarning, stacklevel=2)
return self[name]
return self[cd48_name]
def set(self, name, val):
def set(self, cd48_name, val):
warnings.warn(self._get_set_deprecation, FutureWarning, stacklevel=2)
self[name] = val
self[cd48_name] = val
def __getitem__(self, name, return_names=False):
def __getitem__(self, cd48_name, return_names=False):
"""
Get a model parameter by name. The name is applied as a regular
Get a model parameter by cd48_name. The cd48_name is applied as a regular
expression and all parameters that match that regular expression are
returned.
"""
matches = self.grep_param_names(name)
matches = self.grep_param_names(cd48_name)
if len(matches):
if return_names:
return self._get_params()[matches], np.asarray(self._get_param_names())[matches].tolist()
else:
return self._get_params()[matches]
else:
raise AttributeError, "no parameter matches %s" % name
raise AttributeError, "no parameter matches %s" % cd48_name
def __setitem__(self, name, val):
def __setitem__(self, cd48_name, val):
"""
Set model parameter(s) by name. The name is provided as a regular
Set model parameter(s) by cd48_name. The cd48_name is provided as a regular
expression. All parameters matching that regular expression are set to
the given value.
"""
matches = self.grep_param_names(name)
matches = self.grep_param_names(cd48_name)
if len(matches):
val = np.array(val)
assert (val.size == 1) or val.size == len(matches), "Shape mismatch: {}:({},)".format(val.size, len(matches))
@ -101,7 +101,7 @@ class parameterised(object):
x[matches] = val
self.params = x
else:
raise AttributeError, "no parameter matches %s" % name
raise AttributeError, "no parameter matches %s" % cd48_name
def tie_params(self, which):
matches = self.grep_param_names(which)
@ -136,9 +136,9 @@ class parameterised(object):
if type(expr) in [str, np.string_, np.str]:
expr = re.compile(expr)
return np.nonzero([expr.search(name) for name in self._get_param_names()])[0]
return np.nonzero([expr.search(cd48_name) for cd48_name in self._get_param_names()])[0]
elif type(expr) is re._pattern_type:
return np.nonzero([expr.search(name) for name in self._get_param_names()])[0]
return np.nonzero([expr.search(cd48_name) for cd48_name in self._get_param_names()])[0]
else:
return expr

View file

@ -17,11 +17,11 @@ def BGPLVM(seed=default_seed):
D = 4
# generate GPLVM-like data
X = np.random.rand(N, Q)
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, D).T
k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q)
k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q)
# k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q)
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
@ -48,9 +48,10 @@ def GPLVM_oil_100(optimize=True):
Y = data['X']
# create simple GP model
kernel = GPy.kern.rbf(6, ARD=True) + GPy.kern.bias(6)
kernel = GPy.kern.rbf(6, ARD=True) + GPy.kern.bias(6) + GPy.kern.white(6)
m = GPy.models.GPLVM(Y, 6, kernel=kernel)
m.data_labels = data['Y'].argmax(axis=1)
m['noise'] = .01
# optimize
m.ensure_default_constraints()
@ -293,7 +294,7 @@ def mrd_simulation(optimize=True, plot_sim=False, **kw):
for i, Y in enumerate(Ylist):
m['{}_noise'.format(i + 1)] = Y.var() / 100.
# m.constrain('variance|noise', logexp_clipped(1e-6))
m.constrain('variance|noise', logexp_clipped(1e-6))
m.ensure_default_constraints()
# DEBUG
@ -323,7 +324,7 @@ def brendan_faces():
m.ensure_default_constraints()
m.optimize('scg', messages=1, max_f_eval=10000)
ax = m.plot_latent(which_indices=(0,1))
ax = m.plot_latent(which_indices=(0, 1))
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.image_show(y[None, :], dimensions=(20, 28), transpose=True, invert=False, scale=False)
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)

View file

@ -199,13 +199,13 @@ class kern(parameterised):
[p._set_params(x[s]) for p, s in zip(self.parts, self.param_slices)]
def _get_param_names(self):
# this is a bit nasty: we wat to distinguish between parts with the same name by appending a count
# this is a bit nasty: we wat to distinguish between parts with the same cd48_name by appending a count
part_names = np.array([k.name for k in self.parts], dtype=np.str)
counts = [np.sum(part_names == ni) for i, ni in enumerate(part_names)]
cum_counts = [np.sum(part_names[i:] == ni) for i, ni in enumerate(part_names)]
names = [name + '_' + str(cum_count) if count > 1 else name for name, count, cum_count in zip(part_names, counts, cum_counts)]
names = [cd48_name + '_' + str(cum_count) if count > 1 else cd48_name for cd48_name, count, cum_count in zip(part_names, counts, cum_counts)]
return sum([[name + '_' + n for n in k._get_param_names()] for name, k in zip(names, self.parts)], [])
return sum([[cd48_name + '_' + n for n in k._get_param_names()] for cd48_name, k in zip(names, self.parts)], [])
def K(self, X, X2=None, which_parts='all'):
if which_parts == 'all':

View file

@ -98,7 +98,11 @@ class linear(kernpart):
target += tmp.sum()
def dK_dX(self, dL_dK, X, X2, target):
target += (((X2[:, None, :] * self.variances)) * dL_dK[:, :, None]).sum(0)
# dim = np.where(np.array(X2.shape)[:, None] == dL_dK.shape)[0].flat[0]
# X2_ = np.expand_dims(X2, dim)
# target += ((X2_ * self.variances) * dL_dK[:, :, None]).sum(int(not dim))
target += (((X2[None, :, :] * self.variances)) * dL_dK[:, :, None]).sum(1)
# target += (((X2[None, :, :] * self.variances)) * dL_dK[:, :, None]).sum(0)
#---------------------------------------#
# PSI statistics #
@ -134,7 +138,7 @@ class linear(kernpart):
target_mu += (dL_dpsi1.T[:, :, None] * (Z * self.variances)).sum(1)
def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, target):
self.dK_dX(dL_dpsi1.T, Z, mu, target)
self.dK_dX(dL_dpsi1, Z, mu, target)
def psi2(self, Z, mu, S, target):
"""

View file

@ -123,7 +123,7 @@ class EP(likelihood):
Sroot_tilde_K = np.sqrt(self.tau_tilde)[:,None]*K
B = np.eye(self.N) + np.sqrt(self.tau_tilde)[None,:]*Sroot_tilde_K
L = jitchol(B)
V,info = linalg.lapack.flapack.dtrtrs(L,Sroot_tilde_K,lower=1)
V, info = linalg.lapack.dtrtrs(L, Sroot_tilde_K, lower=1)
Sigma = K - np.dot(V.T,V)
mu = np.dot(Sigma,self.v_tilde)
epsilon_np1 = sum((self.tau_tilde-self.np1[-1])**2)/self.N
@ -209,7 +209,7 @@ class EP(likelihood):
DSYR(LLT,Kmn[:,i].copy(),Delta_tau) #LLT = LLT + np.outer(Kmn[:,i],Kmn[:,i])*Delta_tau
L = jitchol(LLT)
#cholUpdate(L,Kmn[:,i]*np.sqrt(Delta_tau))
V,info = linalg.lapack.flapack.dtrtrs(L,Kmn,lower=1)
V, info = linalg.lapack.dtrtrs(L, Kmn, lower=1)
Sigma_diag = np.sum(V*V,-2)
si = np.sum(V.T*V[:,i],-1)
mu += (Delta_v-Delta_tau*mu[i])*si
@ -217,8 +217,8 @@ class EP(likelihood):
#Sigma recomputation with Cholesky decompositon
LLT = LLT0 + np.dot(Kmn*self.tau_tilde[None,:],Kmn.T)
L = jitchol(LLT)
V,info = linalg.lapack.flapack.dtrtrs(L,Kmn,lower=1)
V2,info = linalg.lapack.flapack.dtrtrs(L.T,V,lower=0)
V, info = linalg.lapack.dtrtrs(L, Kmn, lower=1)
V2, info = linalg.lapack.dtrtrs(L.T, V, lower=0)
Sigma_diag = np.sum(V*V,-2)
Knmv_tilde = np.dot(Kmn,self.v_tilde)
mu = np.dot(V2.T,Knmv_tilde)
@ -320,7 +320,7 @@ class EP(likelihood):
Diag = Diag0 * Iplus_Dprod_i
P = Iplus_Dprod_i[:,None] * P0
L = jitchol(np.eye(M) + np.dot(RPT0,((1. - Iplus_Dprod_i)/Diag0)[:,None]*RPT0.T))
R,info = linalg.lapack.flapack.dtrtrs(L,R0,lower=1)
R, info = linalg.lapack.dtrtrs(L, R0, lower=1)
RPT = np.dot(R,P.T)
Sigma_diag = Diag + np.sum(RPT.T*RPT.T,-1)
self.w = Diag * self.v_tilde

View file

@ -95,7 +95,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
return x
def _clipped(self, x):
return x # np.clip(x, -1e300, 1e300)
return np.clip(x, -1e300, 1e300)
def _set_params(self, x, save_old=True, save_count=0):
# try:

View file

@ -11,8 +11,8 @@ from sparse_GP import sparse_GP
def backsub_both_sides(L,X):
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1)
return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T
tmp, _ = linalg.lapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
return linalg.lapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
class FITC(sparse_GP):
@ -36,7 +36,7 @@ class FITC(sparse_GP):
#factor Kmm
self.Lm = jitchol(self.Kmm)
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
self.Lmi, info = linalg.lapack.dtrtrs(self.Lm, np.eye(self.M), lower=1)
Lmipsi1 = np.dot(self.Lmi,self.psi1)
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1).copy()
self.Diag0 = self.psi0 - np.diag(self.Qnn)
@ -50,7 +50,7 @@ class FITC(sparse_GP):
if self.likelihood.is_heteroscedastic:
assert self.likelihood.D == 1
tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.N)))
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
tmp, _ = linalg.lapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
self.A = tdot(tmp)
# factor B
@ -59,8 +59,8 @@ class FITC(sparse_GP):
self.LBi = chol_inv(self.LB)
self.psi1V = np.dot(self.psi1, self.V_star)
Lmi_psi1V, info = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(Lmi_psi1V), lower=1, trans=0)
Lmi_psi1V, info = linalg.lapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
self._LBi_Lmi_psi1V, _ = linalg.lapack.dtrtrs(self.LB, np.asfortranarray(Lmi_psi1V), lower=1, trans=0)
Kmmipsi1 = np.dot(self.Lmi.T,Lmipsi1)
b_psi1_Ki = self.beta_star * Kmmipsi1.T
@ -190,7 +190,7 @@ class FITC(sparse_GP):
self.P = Iplus_Dprod_i[:,None] * self.psi1.T
self.RPT0 = np.dot(self.Lmi,self.psi1)
self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T))
self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1)
self.R, info = linalg.dtrtrs(self.L, self.Lmi, lower=1)
self.RPT = np.dot(self.R,self.P.T)
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
self.w = self.Diag * self.likelihood.v_tilde
@ -212,7 +212,7 @@ class FITC(sparse_GP):
# = I - [RPT0] * (U*U.T)^-1 * [RPT0].T
# = I - V.T * V
U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn)
V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1)
V, info = linalg.dtrtrs(U, self.RPT0.T, lower=1)
C = np.eye(self.M) - np.dot(V.T,V)
mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:])
#self.C = C

View file

@ -74,13 +74,13 @@ class GP(model):
# the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
#alpha = np.dot(self.Ki, self.likelihood.Y)
alpha,_ = linalg.lapack.flapack.dpotrs(self.L, self.likelihood.Y,lower=1)
alpha, _ = linalg.lapack.dpotrs(self.L, self.likelihood.Y, lower=1)
self.dL_dK = 0.5 * (tdot(alpha) - self.D * self.Ki)
else:
#tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1)
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
tmp, _ = linalg.lapack.dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1)
tmp, _ = linalg.lapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
self.dL_dK = 0.5 * (tmp - self.D * self.Ki)
def _get_params(self):
@ -104,7 +104,7 @@ class GP(model):
Computes the model fit using YYT if it's available
"""
if self.likelihood.YYT is None:
tmp, _ = linalg.lapack.flapack.dtrtrs(self.L, np.asfortranarray(self.likelihood.Y), lower=1)
tmp, _ = linalg.lapack.dtrtrs(self.L, np.asfortranarray(self.likelihood.Y), lower=1)
return -0.5 * np.sum(np.square(tmp))
#return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y)))
else:
@ -136,7 +136,7 @@ class GP(model):
"""
Kx = self.kern.K(_Xnew,self.X,which_parts=which_parts).T
#KiKx = np.dot(self.Ki, Kx)
KiKx, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(Kx), lower=1)
KiKx, _ = linalg.lapack.dpotrs(self.L, np.asfortranarray(Kx), lower=1)
mu = np.dot(KiKx.T, self.likelihood.Y)
if full_cov:
Kxx = self.kern.K(_Xnew, which_parts=which_parts)

View file

@ -11,8 +11,8 @@ from sparse_GP import sparse_GP
def backsub_both_sides(L,X):
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1)
return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T
tmp, _ = linalg.lapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
return linalg.lapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
class generalized_FITC(sparse_GP):
@ -82,7 +82,7 @@ class generalized_FITC(sparse_GP):
if self.likelihood.is_heteroscedastic:
# Compute generalized FITC's diagonal term of the covariance
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
self.Lmi, info = linalg.lapack.dtrtrs(self.Lm, np.eye(self.M), lower=1)
Lmipsi1 = np.dot(self.Lmi,self.psi1)
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1)
#self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm)
@ -95,7 +95,7 @@ class generalized_FITC(sparse_GP):
self.P = Iplus_Dprod_i[:,None] * self.psi1.T
self.RPT0 = np.dot(self.Lmi,self.psi1)
self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T))
self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1)
self.R, info = linalg.dtrtrs(self.L, self.Lmi, lower=1)
self.RPT = np.dot(self.R,self.P.T)
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
self.w = self.Diag * self.likelihood.v_tilde
@ -182,7 +182,7 @@ class generalized_FITC(sparse_GP):
# = I - [RPT0] * (U*U.T)^-1 * [RPT0].T
# = I - V.T * V
U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn)
V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1)
V, info = linalg.dtrtrs(U, self.RPT0.T, lower=1)
C = np.eye(self.M) - np.dot(V.T,V)
mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:])
#self.C = C

View file

@ -65,7 +65,7 @@ class MRD(model):
self._init = True
X = self._init_X(initx, likelihood_or_Y_list)
Z = self._init_Z(initz, X)
self.bgplvms = [Bayesian_GPLVM(l, Q=Q, kernel=k, X=X, Z=Z, M=self.M, **kw) for l, k in zip(likelihood_or_Y_list, kernels)]
self.bgplvms = [Bayesian_GPLVM(l, Q=Q, kernel=k, X=X, Z=Z, M=self.M, _debug=_debug, ** kw) for l, k in zip(likelihood_or_Y_list, kernels)]
del self._init
self.gref = self.bgplvms[0]
@ -143,9 +143,9 @@ class MRD(model):
# S_names = sum([['X_variance_%i_%i' % (n, q) for q in range(self.Q)] for n in range(self.N)], [])
n1 = self.gref._get_param_names()
n1var = n1[:self.NQ * 2 + self.MQ]
map_names = lambda ns, name: map(lambda x: "{1}_{0}".format(*x),
map_names = lambda ns, cd48_name: map(lambda x: "{1}_{0}".format(*x),
itertools.izip(ns,
itertools.repeat(name)))
itertools.repeat(cd48_name)))
return list(itertools.chain(n1var, *(map_names(\
sparse_GP._get_param_names(g)[self.MQ:], n) \
for g, n in zip(self.bgplvms, self.names))))
@ -213,12 +213,12 @@ class MRD(model):
dLdmuS = numpy.hstack((dLdmu.flatten(), dLdS.flatten())).flatten()
dldzt1 = reduce(lambda a, b: a + b, (sparse_GP._log_likelihood_gradients(g)[:self.MQ] for g in self.bgplvms))
return numpy.hstack((dLdmuS,
return self.gref._clipped(numpy.hstack((dLdmuS,
dldzt1,
numpy.hstack([numpy.hstack([g.dL_dtheta(),
g.likelihood._gradients(\
partial=g.partial_for_likelihood)]) \
for g in self.bgplvms])))
for g in self.bgplvms]))))
def _init_X(self, init='PCA', likelihood_list=None):
if likelihood_list is None:

View file

@ -80,7 +80,7 @@ class sparse_GP(GP):
tmp = self.psi1 * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.N)))
else:
tmp = self.psi1 * (np.sqrt(self.likelihood.precision))
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
tmp, _ = linalg.lapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
self.A = tdot(tmp)
@ -92,10 +92,10 @@ class sparse_GP(GP):
self.psi1V = np.dot(self.psi1, self.likelihood.V)
# back substutue C into psi1V
tmp, info1 = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(tmp), lower=1, trans=0)
tmp, info2 = linalg.lapack.flapack.dpotrs(self.LB, tmp, lower=1)
self.Cpsi1V, info3 = linalg.lapack.flapack.dtrtrs(self.Lm, tmp, lower=1, trans=1)
tmp, info1 = linalg.lapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
self._LBi_Lmi_psi1V, _ = linalg.lapack.dtrtrs(self.LB, np.asfortranarray(tmp), lower=1, trans=0)
tmp, info2 = linalg.lapack.dpotrs(self.LB, tmp, lower=1)
self.Cpsi1V, info3 = linalg.lapack.dtrtrs(self.Lm, tmp, lower=1, trans=1)
# Compute dL_dKmm
tmp = tdot(self._LBi_Lmi_psi1V)
@ -220,7 +220,7 @@ class sparse_GP(GP):
def _raw_predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=False):
"""Internal helper function for making predictions, does not account for normalization"""
Bi, _ = linalg.lapack.flapack.dpotri(self.LB, lower=0) # WTH? this lower switch should be 1, but that doesn't work!
Bi, _ = linalg.lapack.dpotri(self.LB, lower=0) # WTH? this lower switch should be 1, but that doesn't work!
symmetrify(Bi)
Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.M) - Bi)

View file

@ -23,7 +23,7 @@ class MRDTests(unittest.TestCase):
Ylist = [np.random.multivariate_normal(np.zeros(N), K, D).T for _ in range(num_m)]
likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist]
m = GPy.models.MRD(*likelihood_list, Q=Q, kernel=k, M=M)
m = GPy.models.MRD(likelihood_list, Q=Q, kernels=k, M=M)
m.ensure_default_constraints()
self.assertTrue(m.checkgrad())

View file

@ -63,7 +63,7 @@ def _mdot_r(a,b):
def jitchol(A,maxtries=5):
A = np.asfortranarray(A)
L,info = linalg.lapack.flapack.dpotrf(A,lower=1)
L, info = linalg.lapack.dpotrf(A, lower=1)
if info ==0:
return L
else:
@ -124,7 +124,7 @@ def pdinv(A, *args):
L = jitchol(A, *args)
logdet = 2.*np.sum(np.log(np.diag(L)))
Li = chol_inv(L)
Ai, _ = linalg.lapack.flapack.dpotri(L)
Ai, _ = linalg.lapack.dpotri(L)
#Ai = np.tril(Ai) + np.tril(Ai,-1).T
symmetrify(Ai)
@ -140,7 +140,7 @@ def chol_inv(L):
"""
return linalg.lapack.flapack.dtrtri(L, lower = True)[0]
return linalg.lapack.dtrtri(L, lower=True)[0]
def multiple_pdinv(A):
@ -157,7 +157,7 @@ def multiple_pdinv(A):
N = A.shape[-1]
chols = [jitchol(A[:,:,i]) for i in range(N)]
halflogdets = [np.sum(np.log(np.diag(L[0]))) for L in chols]
invs = [linalg.lapack.flapack.dpotri(L[0],True)[0] for L in chols]
invs = [linalg.lapack.dpotri(L[0], True)[0] for L in chols]
invs = [np.triu(I)+np.triu(I,1).T for I in invs]
return np.dstack(invs),np.array(halflogdets)
@ -355,9 +355,9 @@ def cholupdate(L,x):
def backsub_both_sides(L, X,transpose='left'):
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
if transpose=='left':
tmp, _ = linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
return linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
tmp, _ = linalg.lapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
return linalg.lapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
else:
tmp, _ = linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=0)
return linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=0)[0].T
tmp, _ = linalg.lapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=0)
return linalg.lapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=0)[0].T

View file

@ -5,8 +5,8 @@ from GPy.util import datasets as dat
import urllib2
class vertex:
def __init__(self, name, id, parents=[], children=[], meta = {}):
self.name = name
def __init__(self, cd48_name, id, parents=[], children=[], meta = {}):
self.name = cd48_name
self.id = id
self.parents = parents
self.children = children
@ -18,7 +18,7 @@ class vertex:
class tree:
def __init__(self):
self.vertices = []
self.vertices.append(vertex(name='root', id=0))
self.vertices.append(vertex(cd48_name='root', id=0))
def __str__(self):
index = self.find_root()
@ -69,12 +69,12 @@ class tree:
return i
raise Error, 'Reverse look up of id failed.'
def get_index_by_name(self, name):
"""Give the index associated with a given vertex name."""
def get_index_by_name(self, cd48_name):
"""Give the index associated with a given vertex cd48_name."""
for i in range(len(self.vertices)):
if self.vertices[i].name == name:
if self.vertices[i].name == cd48_name:
return i
raise Error, 'Reverse look up of name failed.'
raise Error, 'Reverse look up of cd48_name failed.'
def order_vertices(self):
"""Order vertices in the graph such that parents always have a lower index than children."""
@ -203,7 +203,7 @@ class acclaim_skeleton(skeleton):
self.length = 1.0
self.mass = 1.0
self.type = 'acclaim'
self.vertices[0] = vertex(name='root', id=0,
self.vertices[0] = vertex(cd48_name='root', id=0,
parents = [0], children=[],
meta = {'orientation': [],
'axis': [0., 0., 0.],
@ -303,7 +303,7 @@ class acclaim_skeleton(skeleton):
"""Loads an ASF file into a skeleton structure.
loads skeleton structure from an acclaim skeleton file.
ARG file_name : the file name to load in.
ARG file_name : the file cd48_name to load in.
RETURN skel : the skeleton for the file."""
fid = open(file_name, 'r')
@ -321,8 +321,8 @@ class acclaim_skeleton(skeleton):
parts = lin.split()
if parts[0] == 'begin':
bone_count += 1
self.vertices.append(vertex(name = '', id=np.NaN,
meta={'name': [],
self.vertices.append(vertex(cd48_name = '', id=np.NaN,
meta={'cd48_name': [],
'id': [],
'offset': [],
'orientation': [],
@ -348,7 +348,7 @@ class acclaim_skeleton(skeleton):
self.vertices[bone_count].children = []
elif parts[0]=='name':
elif parts[0]=='cd48_name':
self.vertices[bone_count].name = parts[1]
lin = self.read_line(fid)
@ -436,7 +436,7 @@ class acclaim_skeleton(skeleton):
if counter != frame_no:
raise Error, 'Unexpected frame number.'
else:
raise Error, 'Single bone name ...'
raise Error, 'Single bone cd48_name ...'
else:
ind = self.get_index_by_name(parts[0])
bones[ind].append(np.array([float(channel) for channel in parts[1:]]))
@ -542,7 +542,7 @@ class acclaim_skeleton(skeleton):
lin = self.read_line(fid)
while lin:
if lin[0]==':':
if lin[1:]== 'name':
if lin[1:]== 'cd48_name':
lin = self.read_line(fid)
self.name = lin
elif lin[1:]=='units':