mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-04 17:22:39 +02:00
REVERT a53690ab7f, flapack back substitued in
This commit is contained in:
parent
43c2f8af87
commit
4eebf99bfb
15 changed files with 96 additions and 101 deletions
|
|
@ -62,7 +62,7 @@ class model(parameterised):
|
||||||
if len(tie_matches) > 1:
|
if len(tie_matches) > 1:
|
||||||
raise ValueError, "cannot place prior across multiple ties"
|
raise ValueError, "cannot place prior across multiple ties"
|
||||||
elif len(tie_matches) == 1:
|
elif len(tie_matches) == 1:
|
||||||
which = which[:1] # just place a prior object on the first parameter
|
which = which[:1] # just place a prior object on the first parameter
|
||||||
|
|
||||||
|
|
||||||
# check constraints are okay
|
# check constraints are okay
|
||||||
|
|
@ -89,18 +89,18 @@ class model(parameterised):
|
||||||
for w in which:
|
for w in which:
|
||||||
self.priors[w] = what
|
self.priors[w] = what
|
||||||
|
|
||||||
def get_gradient(self, cd48_name, return_names=False):
|
def get_gradient(self, name, return_names=False):
|
||||||
"""
|
"""
|
||||||
Get model gradient(s) by cd48_name. The cd48_name is applied as a regular expression and all parameters that match that regular expression are returned.
|
Get model gradient(s) by name. The name is applied as a regular expression and all parameters that match that regular expression are returned.
|
||||||
"""
|
"""
|
||||||
matches = self.grep_param_names(cd48_name)
|
matches = self.grep_param_names(name)
|
||||||
if len(matches):
|
if len(matches):
|
||||||
if return_names:
|
if return_names:
|
||||||
return self._log_likelihood_gradients()[matches], np.asarray(self._get_param_names())[matches].tolist()
|
return self._log_likelihood_gradients()[matches], np.asarray(self._get_param_names())[matches].tolist()
|
||||||
else:
|
else:
|
||||||
return self._log_likelihood_gradients()[matches]
|
return self._log_likelihood_gradients()[matches]
|
||||||
else:
|
else:
|
||||||
raise AttributeError, "no parameter matches %s" % cd48_name
|
raise AttributeError, "no parameter matches %s" % name
|
||||||
|
|
||||||
def log_prior(self):
|
def log_prior(self):
|
||||||
"""evaluate the prior"""
|
"""evaluate the prior"""
|
||||||
|
|
@ -137,7 +137,7 @@ class model(parameterised):
|
||||||
x = self._get_params()
|
x = self._get_params()
|
||||||
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
|
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
|
||||||
self._set_params(x)
|
self._set_params(x)
|
||||||
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
|
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
|
||||||
|
|
||||||
|
|
||||||
def optimize_restarts(self, Nrestarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
|
def optimize_restarts(self, Nrestarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
|
||||||
|
|
@ -174,8 +174,8 @@ class model(parameterised):
|
||||||
job = pool.apply_async(opt_wrapper, args=(self,), kwds=kwargs)
|
job = pool.apply_async(opt_wrapper, args=(self,), kwds=kwargs)
|
||||||
jobs.append(job)
|
jobs.append(job)
|
||||||
|
|
||||||
pool.close() # signal that no more data coming in
|
pool.close() # signal that no more data coming in
|
||||||
pool.join() # wait for all the tasks to complete
|
pool.join() # wait for all the tasks to complete
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print "Ctrl+c received, terminating and joining pool."
|
print "Ctrl+c received, terminating and joining pool."
|
||||||
pool.terminate()
|
pool.terminate()
|
||||||
|
|
@ -214,10 +214,10 @@ class model(parameterised):
|
||||||
for s in positive_strings:
|
for s in positive_strings:
|
||||||
for i in self.grep_param_names(s):
|
for i in self.grep_param_names(s):
|
||||||
if not (i in currently_constrained):
|
if not (i in currently_constrained):
|
||||||
# to_make_positive.append(re.escape(param_names[i]))
|
#to_make_positive.append(re.escape(param_names[i]))
|
||||||
to_make_positive.append(i)
|
to_make_positive.append(i)
|
||||||
if len(to_make_positive):
|
if len(to_make_positive):
|
||||||
# self.constrain_positive('(' + '|'.join(to_make_positive) + ')')
|
#self.constrain_positive('(' + '|'.join(to_make_positive) + ')')
|
||||||
self.constrain_positive(np.asarray(to_make_positive))
|
self.constrain_positive(np.asarray(to_make_positive))
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -430,7 +430,7 @@ class model(parameterised):
|
||||||
return 1. / k.variances
|
return 1. / k.variances
|
||||||
|
|
||||||
|
|
||||||
def pseudo_EM(self, epsilon=.1, max_EM_iterations=np.inf, **kwargs):
|
def pseudo_EM(self, epsilon=.1, **kwargs):
|
||||||
"""
|
"""
|
||||||
TODO: Should this not bein the GP class?
|
TODO: Should this not bein the GP class?
|
||||||
EM - like algorithm for Expectation Propagation and Laplace approximation
|
EM - like algorithm for Expectation Propagation and Laplace approximation
|
||||||
|
|
@ -453,7 +453,7 @@ class model(parameterised):
|
||||||
alpha = 0
|
alpha = 0
|
||||||
stop = False
|
stop = False
|
||||||
|
|
||||||
while not stop and iteration < max_EM_iterations:
|
while not stop:
|
||||||
last_approximation = self.likelihood.copy()
|
last_approximation = self.likelihood.copy()
|
||||||
last_params = self._get_params()
|
last_params = self._get_params()
|
||||||
|
|
||||||
|
|
@ -464,8 +464,8 @@ class model(parameterised):
|
||||||
ll_change = new_ll - last_ll
|
ll_change = new_ll - last_ll
|
||||||
|
|
||||||
if ll_change < 0:
|
if ll_change < 0:
|
||||||
self.likelihood = last_approximation # restore previous likelihood approximation
|
self.likelihood = last_approximation # restore previous likelihood approximation
|
||||||
self._set_params(last_params) # restore model parameters
|
self._set_params(last_params) # restore model parameters
|
||||||
print "Log-likelihood decrement: %s \nLast likelihood update discarded." % ll_change
|
print "Log-likelihood decrement: %s \nLast likelihood update discarded." % ll_change
|
||||||
stop = True
|
stop = True
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
|
|
@ -64,36 +64,36 @@ class parameterised(object):
|
||||||
m['var'] = 2. # > sets all parameters matching 'var' to 2.
|
m['var'] = 2. # > sets all parameters matching 'var' to 2.
|
||||||
m['var'] = <array-like> # > sets parameters matching 'var' to <array-like>
|
m['var'] = <array-like> # > sets parameters matching 'var' to <array-like>
|
||||||
"""
|
"""
|
||||||
def get(self, cd48_name):
|
def get(self, name):
|
||||||
warnings.warn(self._get_set_deprecation, FutureWarning, stacklevel=2)
|
warnings.warn(self._get_set_deprecation, FutureWarning, stacklevel=2)
|
||||||
return self[cd48_name]
|
return self[name]
|
||||||
|
|
||||||
def set(self, cd48_name, val):
|
def set(self, name, val):
|
||||||
warnings.warn(self._get_set_deprecation, FutureWarning, stacklevel=2)
|
warnings.warn(self._get_set_deprecation, FutureWarning, stacklevel=2)
|
||||||
self[cd48_name] = val
|
self[name] = val
|
||||||
|
|
||||||
def __getitem__(self, cd48_name, return_names=False):
|
def __getitem__(self, name, return_names=False):
|
||||||
"""
|
"""
|
||||||
Get a model parameter by cd48_name. The cd48_name is applied as a regular
|
Get a model parameter by name. The name is applied as a regular
|
||||||
expression and all parameters that match that regular expression are
|
expression and all parameters that match that regular expression are
|
||||||
returned.
|
returned.
|
||||||
"""
|
"""
|
||||||
matches = self.grep_param_names(cd48_name)
|
matches = self.grep_param_names(name)
|
||||||
if len(matches):
|
if len(matches):
|
||||||
if return_names:
|
if return_names:
|
||||||
return self._get_params()[matches], np.asarray(self._get_param_names())[matches].tolist()
|
return self._get_params()[matches], np.asarray(self._get_param_names())[matches].tolist()
|
||||||
else:
|
else:
|
||||||
return self._get_params()[matches]
|
return self._get_params()[matches]
|
||||||
else:
|
else:
|
||||||
raise AttributeError, "no parameter matches %s" % cd48_name
|
raise AttributeError, "no parameter matches %s" % name
|
||||||
|
|
||||||
def __setitem__(self, cd48_name, val):
|
def __setitem__(self, name, val):
|
||||||
"""
|
"""
|
||||||
Set model parameter(s) by cd48_name. The cd48_name is provided as a regular
|
Set model parameter(s) by name. The name is provided as a regular
|
||||||
expression. All parameters matching that regular expression are set to
|
expression. All parameters matching that regular expression are set to
|
||||||
the given value.
|
the given value.
|
||||||
"""
|
"""
|
||||||
matches = self.grep_param_names(cd48_name)
|
matches = self.grep_param_names(name)
|
||||||
if len(matches):
|
if len(matches):
|
||||||
val = np.array(val)
|
val = np.array(val)
|
||||||
assert (val.size == 1) or val.size == len(matches), "Shape mismatch: {}:({},)".format(val.size, len(matches))
|
assert (val.size == 1) or val.size == len(matches), "Shape mismatch: {}:({},)".format(val.size, len(matches))
|
||||||
|
|
@ -101,7 +101,7 @@ class parameterised(object):
|
||||||
x[matches] = val
|
x[matches] = val
|
||||||
self.params = x
|
self.params = x
|
||||||
else:
|
else:
|
||||||
raise AttributeError, "no parameter matches %s" % cd48_name
|
raise AttributeError, "no parameter matches %s" % name
|
||||||
|
|
||||||
def tie_params(self, which):
|
def tie_params(self, which):
|
||||||
matches = self.grep_param_names(which)
|
matches = self.grep_param_names(which)
|
||||||
|
|
@ -136,9 +136,9 @@ class parameterised(object):
|
||||||
|
|
||||||
if type(expr) in [str, np.string_, np.str]:
|
if type(expr) in [str, np.string_, np.str]:
|
||||||
expr = re.compile(expr)
|
expr = re.compile(expr)
|
||||||
return np.nonzero([expr.search(cd48_name) for cd48_name in self._get_param_names()])[0]
|
return np.nonzero([expr.search(name) for name in self._get_param_names()])[0]
|
||||||
elif type(expr) is re._pattern_type:
|
elif type(expr) is re._pattern_type:
|
||||||
return np.nonzero([expr.search(cd48_name) for cd48_name in self._get_param_names()])[0]
|
return np.nonzero([expr.search(name) for name in self._get_param_names()])[0]
|
||||||
else:
|
else:
|
||||||
return expr
|
return expr
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,11 +17,11 @@ def BGPLVM(seed=default_seed):
|
||||||
D = 4
|
D = 4
|
||||||
# generate GPLVM-like data
|
# generate GPLVM-like data
|
||||||
X = np.random.rand(N, Q)
|
X = np.random.rand(N, Q)
|
||||||
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||||
K = k.K(X)
|
K = k.K(X)
|
||||||
Y = np.random.multivariate_normal(np.zeros(N), K, D).T
|
Y = np.random.multivariate_normal(np.zeros(N), K, D).T
|
||||||
|
|
||||||
k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q)
|
k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q)
|
||||||
# k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q)
|
# k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q)
|
||||||
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||||
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
||||||
|
|
@ -48,10 +48,9 @@ def GPLVM_oil_100(optimize=True):
|
||||||
Y = data['X']
|
Y = data['X']
|
||||||
|
|
||||||
# create simple GP model
|
# create simple GP model
|
||||||
kernel = GPy.kern.rbf(6, ARD=True) + GPy.kern.bias(6) + GPy.kern.white(6)
|
kernel = GPy.kern.rbf(6, ARD=True) + GPy.kern.bias(6)
|
||||||
m = GPy.models.GPLVM(Y, 6, kernel=kernel)
|
m = GPy.models.GPLVM(Y, 6, kernel=kernel)
|
||||||
m.data_labels = data['Y'].argmax(axis=1)
|
m.data_labels = data['Y'].argmax(axis=1)
|
||||||
m['noise'] = .01
|
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
|
|
@ -294,7 +293,7 @@ def mrd_simulation(optimize=True, plot_sim=False, **kw):
|
||||||
for i, Y in enumerate(Ylist):
|
for i, Y in enumerate(Ylist):
|
||||||
m['{}_noise'.format(i + 1)] = Y.var() / 100.
|
m['{}_noise'.format(i + 1)] = Y.var() / 100.
|
||||||
|
|
||||||
m.constrain('variance|noise', logexp_clipped(1e-6))
|
# m.constrain('variance|noise', logexp_clipped(1e-6))
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
|
|
||||||
# DEBUG
|
# DEBUG
|
||||||
|
|
@ -324,7 +323,7 @@ def brendan_faces():
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize('scg', messages=1, max_f_eval=10000)
|
m.optimize('scg', messages=1, max_f_eval=10000)
|
||||||
|
|
||||||
ax = m.plot_latent(which_indices=(0, 1))
|
ax = m.plot_latent(which_indices=(0,1))
|
||||||
y = m.likelihood.Y[0, :]
|
y = m.likelihood.Y[0, :]
|
||||||
data_show = GPy.util.visualize.image_show(y[None, :], dimensions=(20, 28), transpose=True, invert=False, scale=False)
|
data_show = GPy.util.visualize.image_show(y[None, :], dimensions=(20, 28), transpose=True, invert=False, scale=False)
|
||||||
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
|
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
|
||||||
|
|
|
||||||
|
|
@ -199,13 +199,13 @@ class kern(parameterised):
|
||||||
[p._set_params(x[s]) for p, s in zip(self.parts, self.param_slices)]
|
[p._set_params(x[s]) for p, s in zip(self.parts, self.param_slices)]
|
||||||
|
|
||||||
def _get_param_names(self):
|
def _get_param_names(self):
|
||||||
# this is a bit nasty: we wat to distinguish between parts with the same cd48_name by appending a count
|
# this is a bit nasty: we wat to distinguish between parts with the same name by appending a count
|
||||||
part_names = np.array([k.name for k in self.parts], dtype=np.str)
|
part_names = np.array([k.name for k in self.parts], dtype=np.str)
|
||||||
counts = [np.sum(part_names == ni) for i, ni in enumerate(part_names)]
|
counts = [np.sum(part_names == ni) for i, ni in enumerate(part_names)]
|
||||||
cum_counts = [np.sum(part_names[i:] == ni) for i, ni in enumerate(part_names)]
|
cum_counts = [np.sum(part_names[i:] == ni) for i, ni in enumerate(part_names)]
|
||||||
names = [cd48_name + '_' + str(cum_count) if count > 1 else cd48_name for cd48_name, count, cum_count in zip(part_names, counts, cum_counts)]
|
names = [name + '_' + str(cum_count) if count > 1 else name for name, count, cum_count in zip(part_names, counts, cum_counts)]
|
||||||
|
|
||||||
return sum([[cd48_name + '_' + n for n in k._get_param_names()] for cd48_name, k in zip(names, self.parts)], [])
|
return sum([[name + '_' + n for n in k._get_param_names()] for name, k in zip(names, self.parts)], [])
|
||||||
|
|
||||||
def K(self, X, X2=None, which_parts='all'):
|
def K(self, X, X2=None, which_parts='all'):
|
||||||
if which_parts == 'all':
|
if which_parts == 'all':
|
||||||
|
|
|
||||||
|
|
@ -98,11 +98,7 @@ class linear(kernpart):
|
||||||
target += tmp.sum()
|
target += tmp.sum()
|
||||||
|
|
||||||
def dK_dX(self, dL_dK, X, X2, target):
|
def dK_dX(self, dL_dK, X, X2, target):
|
||||||
# dim = np.where(np.array(X2.shape)[:, None] == dL_dK.shape)[0].flat[0]
|
target += (((X2[:, None, :] * self.variances)) * dL_dK[:, :, None]).sum(0)
|
||||||
# X2_ = np.expand_dims(X2, dim)
|
|
||||||
# target += ((X2_ * self.variances) * dL_dK[:, :, None]).sum(int(not dim))
|
|
||||||
target += (((X2[None, :, :] * self.variances)) * dL_dK[:, :, None]).sum(1)
|
|
||||||
# target += (((X2[None, :, :] * self.variances)) * dL_dK[:, :, None]).sum(0)
|
|
||||||
|
|
||||||
#---------------------------------------#
|
#---------------------------------------#
|
||||||
# PSI statistics #
|
# PSI statistics #
|
||||||
|
|
@ -138,7 +134,7 @@ class linear(kernpart):
|
||||||
target_mu += (dL_dpsi1.T[:, :, None] * (Z * self.variances)).sum(1)
|
target_mu += (dL_dpsi1.T[:, :, None] * (Z * self.variances)).sum(1)
|
||||||
|
|
||||||
def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, target):
|
def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, target):
|
||||||
self.dK_dX(dL_dpsi1, Z, mu, target)
|
self.dK_dX(dL_dpsi1.T, Z, mu, target)
|
||||||
|
|
||||||
def psi2(self, Z, mu, S, target):
|
def psi2(self, Z, mu, S, target):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -123,7 +123,7 @@ class EP(likelihood):
|
||||||
Sroot_tilde_K = np.sqrt(self.tau_tilde)[:,None]*K
|
Sroot_tilde_K = np.sqrt(self.tau_tilde)[:,None]*K
|
||||||
B = np.eye(self.N) + np.sqrt(self.tau_tilde)[None,:]*Sroot_tilde_K
|
B = np.eye(self.N) + np.sqrt(self.tau_tilde)[None,:]*Sroot_tilde_K
|
||||||
L = jitchol(B)
|
L = jitchol(B)
|
||||||
V, info = linalg.lapack.dtrtrs(L, Sroot_tilde_K, lower=1)
|
V,info = linalg.lapack.flapack.dtrtrs(L,Sroot_tilde_K,lower=1)
|
||||||
Sigma = K - np.dot(V.T,V)
|
Sigma = K - np.dot(V.T,V)
|
||||||
mu = np.dot(Sigma,self.v_tilde)
|
mu = np.dot(Sigma,self.v_tilde)
|
||||||
epsilon_np1 = sum((self.tau_tilde-self.np1[-1])**2)/self.N
|
epsilon_np1 = sum((self.tau_tilde-self.np1[-1])**2)/self.N
|
||||||
|
|
@ -209,7 +209,7 @@ class EP(likelihood):
|
||||||
DSYR(LLT,Kmn[:,i].copy(),Delta_tau) #LLT = LLT + np.outer(Kmn[:,i],Kmn[:,i])*Delta_tau
|
DSYR(LLT,Kmn[:,i].copy(),Delta_tau) #LLT = LLT + np.outer(Kmn[:,i],Kmn[:,i])*Delta_tau
|
||||||
L = jitchol(LLT)
|
L = jitchol(LLT)
|
||||||
#cholUpdate(L,Kmn[:,i]*np.sqrt(Delta_tau))
|
#cholUpdate(L,Kmn[:,i]*np.sqrt(Delta_tau))
|
||||||
V, info = linalg.lapack.dtrtrs(L, Kmn, lower=1)
|
V,info = linalg.lapack.flapack.dtrtrs(L,Kmn,lower=1)
|
||||||
Sigma_diag = np.sum(V*V,-2)
|
Sigma_diag = np.sum(V*V,-2)
|
||||||
si = np.sum(V.T*V[:,i],-1)
|
si = np.sum(V.T*V[:,i],-1)
|
||||||
mu += (Delta_v-Delta_tau*mu[i])*si
|
mu += (Delta_v-Delta_tau*mu[i])*si
|
||||||
|
|
@ -217,8 +217,8 @@ class EP(likelihood):
|
||||||
#Sigma recomputation with Cholesky decompositon
|
#Sigma recomputation with Cholesky decompositon
|
||||||
LLT = LLT0 + np.dot(Kmn*self.tau_tilde[None,:],Kmn.T)
|
LLT = LLT0 + np.dot(Kmn*self.tau_tilde[None,:],Kmn.T)
|
||||||
L = jitchol(LLT)
|
L = jitchol(LLT)
|
||||||
V, info = linalg.lapack.dtrtrs(L, Kmn, lower=1)
|
V,info = linalg.lapack.flapack.dtrtrs(L,Kmn,lower=1)
|
||||||
V2, info = linalg.lapack.dtrtrs(L.T, V, lower=0)
|
V2,info = linalg.lapack.flapack.dtrtrs(L.T,V,lower=0)
|
||||||
Sigma_diag = np.sum(V*V,-2)
|
Sigma_diag = np.sum(V*V,-2)
|
||||||
Knmv_tilde = np.dot(Kmn,self.v_tilde)
|
Knmv_tilde = np.dot(Kmn,self.v_tilde)
|
||||||
mu = np.dot(V2.T,Knmv_tilde)
|
mu = np.dot(V2.T,Knmv_tilde)
|
||||||
|
|
@ -320,7 +320,7 @@ class EP(likelihood):
|
||||||
Diag = Diag0 * Iplus_Dprod_i
|
Diag = Diag0 * Iplus_Dprod_i
|
||||||
P = Iplus_Dprod_i[:,None] * P0
|
P = Iplus_Dprod_i[:,None] * P0
|
||||||
L = jitchol(np.eye(M) + np.dot(RPT0,((1. - Iplus_Dprod_i)/Diag0)[:,None]*RPT0.T))
|
L = jitchol(np.eye(M) + np.dot(RPT0,((1. - Iplus_Dprod_i)/Diag0)[:,None]*RPT0.T))
|
||||||
R, info = linalg.lapack.dtrtrs(L, R0, lower=1)
|
R,info = linalg.lapack.flapack.dtrtrs(L,R0,lower=1)
|
||||||
RPT = np.dot(R,P.T)
|
RPT = np.dot(R,P.T)
|
||||||
Sigma_diag = Diag + np.sum(RPT.T*RPT.T,-1)
|
Sigma_diag = Diag + np.sum(RPT.T*RPT.T,-1)
|
||||||
self.w = Diag * self.v_tilde
|
self.w = Diag * self.v_tilde
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
||||||
return x
|
return x
|
||||||
|
|
||||||
def _clipped(self, x):
|
def _clipped(self, x):
|
||||||
return np.clip(x, -1e300, 1e300)
|
return x # np.clip(x, -1e300, 1e300)
|
||||||
|
|
||||||
def _set_params(self, x, save_old=True, save_count=0):
|
def _set_params(self, x, save_old=True, save_count=0):
|
||||||
# try:
|
# try:
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,8 @@ from sparse_GP import sparse_GP
|
||||||
|
|
||||||
def backsub_both_sides(L,X):
|
def backsub_both_sides(L,X):
|
||||||
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
||||||
tmp, _ = linalg.lapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
|
tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1)
|
||||||
return linalg.lapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
|
return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T
|
||||||
|
|
||||||
class FITC(sparse_GP):
|
class FITC(sparse_GP):
|
||||||
|
|
||||||
|
|
@ -36,7 +36,7 @@ class FITC(sparse_GP):
|
||||||
|
|
||||||
#factor Kmm
|
#factor Kmm
|
||||||
self.Lm = jitchol(self.Kmm)
|
self.Lm = jitchol(self.Kmm)
|
||||||
self.Lmi, info = linalg.lapack.dtrtrs(self.Lm, np.eye(self.M), lower=1)
|
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
|
||||||
Lmipsi1 = np.dot(self.Lmi,self.psi1)
|
Lmipsi1 = np.dot(self.Lmi,self.psi1)
|
||||||
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1).copy()
|
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1).copy()
|
||||||
self.Diag0 = self.psi0 - np.diag(self.Qnn)
|
self.Diag0 = self.psi0 - np.diag(self.Qnn)
|
||||||
|
|
@ -50,7 +50,7 @@ class FITC(sparse_GP):
|
||||||
if self.likelihood.is_heteroscedastic:
|
if self.likelihood.is_heteroscedastic:
|
||||||
assert self.likelihood.D == 1
|
assert self.likelihood.D == 1
|
||||||
tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.N)))
|
tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.N)))
|
||||||
tmp, _ = linalg.lapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||||
self.A = tdot(tmp)
|
self.A = tdot(tmp)
|
||||||
|
|
||||||
# factor B
|
# factor B
|
||||||
|
|
@ -59,8 +59,8 @@ class FITC(sparse_GP):
|
||||||
self.LBi = chol_inv(self.LB)
|
self.LBi = chol_inv(self.LB)
|
||||||
self.psi1V = np.dot(self.psi1, self.V_star)
|
self.psi1V = np.dot(self.psi1, self.V_star)
|
||||||
|
|
||||||
Lmi_psi1V, info = linalg.lapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
Lmi_psi1V, info = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
||||||
self._LBi_Lmi_psi1V, _ = linalg.lapack.dtrtrs(self.LB, np.asfortranarray(Lmi_psi1V), lower=1, trans=0)
|
self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(Lmi_psi1V), lower=1, trans=0)
|
||||||
|
|
||||||
Kmmipsi1 = np.dot(self.Lmi.T,Lmipsi1)
|
Kmmipsi1 = np.dot(self.Lmi.T,Lmipsi1)
|
||||||
b_psi1_Ki = self.beta_star * Kmmipsi1.T
|
b_psi1_Ki = self.beta_star * Kmmipsi1.T
|
||||||
|
|
@ -190,7 +190,7 @@ class FITC(sparse_GP):
|
||||||
self.P = Iplus_Dprod_i[:,None] * self.psi1.T
|
self.P = Iplus_Dprod_i[:,None] * self.psi1.T
|
||||||
self.RPT0 = np.dot(self.Lmi,self.psi1)
|
self.RPT0 = np.dot(self.Lmi,self.psi1)
|
||||||
self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T))
|
self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T))
|
||||||
self.R, info = linalg.dtrtrs(self.L, self.Lmi, lower=1)
|
self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1)
|
||||||
self.RPT = np.dot(self.R,self.P.T)
|
self.RPT = np.dot(self.R,self.P.T)
|
||||||
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
|
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
|
||||||
self.w = self.Diag * self.likelihood.v_tilde
|
self.w = self.Diag * self.likelihood.v_tilde
|
||||||
|
|
@ -212,7 +212,7 @@ class FITC(sparse_GP):
|
||||||
# = I - [RPT0] * (U*U.T)^-1 * [RPT0].T
|
# = I - [RPT0] * (U*U.T)^-1 * [RPT0].T
|
||||||
# = I - V.T * V
|
# = I - V.T * V
|
||||||
U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn)
|
U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn)
|
||||||
V, info = linalg.dtrtrs(U, self.RPT0.T, lower=1)
|
V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1)
|
||||||
C = np.eye(self.M) - np.dot(V.T,V)
|
C = np.eye(self.M) - np.dot(V.T,V)
|
||||||
mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:])
|
mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:])
|
||||||
#self.C = C
|
#self.C = C
|
||||||
|
|
|
||||||
|
|
@ -74,13 +74,13 @@ class GP(model):
|
||||||
# the gradient of the likelihood wrt the covariance matrix
|
# the gradient of the likelihood wrt the covariance matrix
|
||||||
if self.likelihood.YYT is None:
|
if self.likelihood.YYT is None:
|
||||||
#alpha = np.dot(self.Ki, self.likelihood.Y)
|
#alpha = np.dot(self.Ki, self.likelihood.Y)
|
||||||
alpha, _ = linalg.lapack.dpotrs(self.L, self.likelihood.Y, lower=1)
|
alpha,_ = linalg.lapack.flapack.dpotrs(self.L, self.likelihood.Y,lower=1)
|
||||||
|
|
||||||
self.dL_dK = 0.5 * (tdot(alpha) - self.D * self.Ki)
|
self.dL_dK = 0.5 * (tdot(alpha) - self.D * self.Ki)
|
||||||
else:
|
else:
|
||||||
#tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
|
#tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
|
||||||
tmp, _ = linalg.lapack.dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1)
|
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1)
|
||||||
tmp, _ = linalg.lapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
|
tmp, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
|
||||||
self.dL_dK = 0.5 * (tmp - self.D * self.Ki)
|
self.dL_dK = 0.5 * (tmp - self.D * self.Ki)
|
||||||
|
|
||||||
def _get_params(self):
|
def _get_params(self):
|
||||||
|
|
@ -104,7 +104,7 @@ class GP(model):
|
||||||
Computes the model fit using YYT if it's available
|
Computes the model fit using YYT if it's available
|
||||||
"""
|
"""
|
||||||
if self.likelihood.YYT is None:
|
if self.likelihood.YYT is None:
|
||||||
tmp, _ = linalg.lapack.dtrtrs(self.L, np.asfortranarray(self.likelihood.Y), lower=1)
|
tmp, _ = linalg.lapack.flapack.dtrtrs(self.L, np.asfortranarray(self.likelihood.Y), lower=1)
|
||||||
return -0.5 * np.sum(np.square(tmp))
|
return -0.5 * np.sum(np.square(tmp))
|
||||||
#return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y)))
|
#return -0.5 * np.sum(np.square(np.dot(self.Li, self.likelihood.Y)))
|
||||||
else:
|
else:
|
||||||
|
|
@ -136,7 +136,7 @@ class GP(model):
|
||||||
"""
|
"""
|
||||||
Kx = self.kern.K(_Xnew,self.X,which_parts=which_parts).T
|
Kx = self.kern.K(_Xnew,self.X,which_parts=which_parts).T
|
||||||
#KiKx = np.dot(self.Ki, Kx)
|
#KiKx = np.dot(self.Ki, Kx)
|
||||||
KiKx, _ = linalg.lapack.dpotrs(self.L, np.asfortranarray(Kx), lower=1)
|
KiKx, _ = linalg.lapack.flapack.dpotrs(self.L, np.asfortranarray(Kx), lower=1)
|
||||||
mu = np.dot(KiKx.T, self.likelihood.Y)
|
mu = np.dot(KiKx.T, self.likelihood.Y)
|
||||||
if full_cov:
|
if full_cov:
|
||||||
Kxx = self.kern.K(_Xnew, which_parts=which_parts)
|
Kxx = self.kern.K(_Xnew, which_parts=which_parts)
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,8 @@ from sparse_GP import sparse_GP
|
||||||
|
|
||||||
def backsub_both_sides(L,X):
|
def backsub_both_sides(L,X):
|
||||||
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
||||||
tmp, _ = linalg.lapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
|
tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1)
|
||||||
return linalg.lapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
|
return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T
|
||||||
|
|
||||||
|
|
||||||
class generalized_FITC(sparse_GP):
|
class generalized_FITC(sparse_GP):
|
||||||
|
|
@ -82,7 +82,7 @@ class generalized_FITC(sparse_GP):
|
||||||
if self.likelihood.is_heteroscedastic:
|
if self.likelihood.is_heteroscedastic:
|
||||||
|
|
||||||
# Compute generalized FITC's diagonal term of the covariance
|
# Compute generalized FITC's diagonal term of the covariance
|
||||||
self.Lmi, info = linalg.lapack.dtrtrs(self.Lm, np.eye(self.M), lower=1)
|
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
|
||||||
Lmipsi1 = np.dot(self.Lmi,self.psi1)
|
Lmipsi1 = np.dot(self.Lmi,self.psi1)
|
||||||
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1)
|
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1)
|
||||||
#self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm)
|
#self.Kmmi, Lm, Lmi, Kmm_logdet = pdinv(self.Kmm)
|
||||||
|
|
@ -95,7 +95,7 @@ class generalized_FITC(sparse_GP):
|
||||||
self.P = Iplus_Dprod_i[:,None] * self.psi1.T
|
self.P = Iplus_Dprod_i[:,None] * self.psi1.T
|
||||||
self.RPT0 = np.dot(self.Lmi,self.psi1)
|
self.RPT0 = np.dot(self.Lmi,self.psi1)
|
||||||
self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T))
|
self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T))
|
||||||
self.R, info = linalg.dtrtrs(self.L, self.Lmi, lower=1)
|
self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1)
|
||||||
self.RPT = np.dot(self.R,self.P.T)
|
self.RPT = np.dot(self.R,self.P.T)
|
||||||
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
|
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
|
||||||
self.w = self.Diag * self.likelihood.v_tilde
|
self.w = self.Diag * self.likelihood.v_tilde
|
||||||
|
|
@ -182,7 +182,7 @@ class generalized_FITC(sparse_GP):
|
||||||
# = I - [RPT0] * (U*U.T)^-1 * [RPT0].T
|
# = I - [RPT0] * (U*U.T)^-1 * [RPT0].T
|
||||||
# = I - V.T * V
|
# = I - V.T * V
|
||||||
U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn)
|
U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn)
|
||||||
V, info = linalg.dtrtrs(U, self.RPT0.T, lower=1)
|
V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1)
|
||||||
C = np.eye(self.M) - np.dot(V.T,V)
|
C = np.eye(self.M) - np.dot(V.T,V)
|
||||||
mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:])
|
mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:])
|
||||||
#self.C = C
|
#self.C = C
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ class MRD(model):
|
||||||
self._init = True
|
self._init = True
|
||||||
X = self._init_X(initx, likelihood_or_Y_list)
|
X = self._init_X(initx, likelihood_or_Y_list)
|
||||||
Z = self._init_Z(initz, X)
|
Z = self._init_Z(initz, X)
|
||||||
self.bgplvms = [Bayesian_GPLVM(l, Q=Q, kernel=k, X=X, Z=Z, M=self.M, _debug=_debug, ** kw) for l, k in zip(likelihood_or_Y_list, kernels)]
|
self.bgplvms = [Bayesian_GPLVM(l, Q=Q, kernel=k, X=X, Z=Z, M=self.M, **kw) for l, k in zip(likelihood_or_Y_list, kernels)]
|
||||||
del self._init
|
del self._init
|
||||||
|
|
||||||
self.gref = self.bgplvms[0]
|
self.gref = self.bgplvms[0]
|
||||||
|
|
@ -143,9 +143,9 @@ class MRD(model):
|
||||||
# S_names = sum([['X_variance_%i_%i' % (n, q) for q in range(self.Q)] for n in range(self.N)], [])
|
# S_names = sum([['X_variance_%i_%i' % (n, q) for q in range(self.Q)] for n in range(self.N)], [])
|
||||||
n1 = self.gref._get_param_names()
|
n1 = self.gref._get_param_names()
|
||||||
n1var = n1[:self.NQ * 2 + self.MQ]
|
n1var = n1[:self.NQ * 2 + self.MQ]
|
||||||
map_names = lambda ns, cd48_name: map(lambda x: "{1}_{0}".format(*x),
|
map_names = lambda ns, name: map(lambda x: "{1}_{0}".format(*x),
|
||||||
itertools.izip(ns,
|
itertools.izip(ns,
|
||||||
itertools.repeat(cd48_name)))
|
itertools.repeat(name)))
|
||||||
return list(itertools.chain(n1var, *(map_names(\
|
return list(itertools.chain(n1var, *(map_names(\
|
||||||
sparse_GP._get_param_names(g)[self.MQ:], n) \
|
sparse_GP._get_param_names(g)[self.MQ:], n) \
|
||||||
for g, n in zip(self.bgplvms, self.names))))
|
for g, n in zip(self.bgplvms, self.names))))
|
||||||
|
|
@ -213,12 +213,12 @@ class MRD(model):
|
||||||
dLdmuS = numpy.hstack((dLdmu.flatten(), dLdS.flatten())).flatten()
|
dLdmuS = numpy.hstack((dLdmu.flatten(), dLdS.flatten())).flatten()
|
||||||
dldzt1 = reduce(lambda a, b: a + b, (sparse_GP._log_likelihood_gradients(g)[:self.MQ] for g in self.bgplvms))
|
dldzt1 = reduce(lambda a, b: a + b, (sparse_GP._log_likelihood_gradients(g)[:self.MQ] for g in self.bgplvms))
|
||||||
|
|
||||||
return self.gref._clipped(numpy.hstack((dLdmuS,
|
return numpy.hstack((dLdmuS,
|
||||||
dldzt1,
|
dldzt1,
|
||||||
numpy.hstack([numpy.hstack([g.dL_dtheta(),
|
numpy.hstack([numpy.hstack([g.dL_dtheta(),
|
||||||
g.likelihood._gradients(\
|
g.likelihood._gradients(\
|
||||||
partial=g.partial_for_likelihood)]) \
|
partial=g.partial_for_likelihood)]) \
|
||||||
for g in self.bgplvms]))))
|
for g in self.bgplvms])))
|
||||||
|
|
||||||
def _init_X(self, init='PCA', likelihood_list=None):
|
def _init_X(self, init='PCA', likelihood_list=None):
|
||||||
if likelihood_list is None:
|
if likelihood_list is None:
|
||||||
|
|
|
||||||
|
|
@ -80,7 +80,7 @@ class sparse_GP(GP):
|
||||||
tmp = self.psi1 * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.N)))
|
tmp = self.psi1 * (np.sqrt(self.likelihood.precision.flatten().reshape(1, self.N)))
|
||||||
else:
|
else:
|
||||||
tmp = self.psi1 * (np.sqrt(self.likelihood.precision))
|
tmp = self.psi1 * (np.sqrt(self.likelihood.precision))
|
||||||
tmp, _ = linalg.lapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||||
self.A = tdot(tmp)
|
self.A = tdot(tmp)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -92,10 +92,10 @@ class sparse_GP(GP):
|
||||||
self.psi1V = np.dot(self.psi1, self.likelihood.V)
|
self.psi1V = np.dot(self.psi1, self.likelihood.V)
|
||||||
|
|
||||||
# back substutue C into psi1V
|
# back substutue C into psi1V
|
||||||
tmp, info1 = linalg.lapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
tmp, info1 = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
||||||
self._LBi_Lmi_psi1V, _ = linalg.lapack.dtrtrs(self.LB, np.asfortranarray(tmp), lower=1, trans=0)
|
self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(tmp), lower=1, trans=0)
|
||||||
tmp, info2 = linalg.lapack.dpotrs(self.LB, tmp, lower=1)
|
tmp, info2 = linalg.lapack.flapack.dpotrs(self.LB, tmp, lower=1)
|
||||||
self.Cpsi1V, info3 = linalg.lapack.dtrtrs(self.Lm, tmp, lower=1, trans=1)
|
self.Cpsi1V, info3 = linalg.lapack.flapack.dtrtrs(self.Lm, tmp, lower=1, trans=1)
|
||||||
|
|
||||||
# Compute dL_dKmm
|
# Compute dL_dKmm
|
||||||
tmp = tdot(self._LBi_Lmi_psi1V)
|
tmp = tdot(self._LBi_Lmi_psi1V)
|
||||||
|
|
@ -220,7 +220,7 @@ class sparse_GP(GP):
|
||||||
def _raw_predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=False):
|
def _raw_predict(self, Xnew, X_variance_new=None, which_parts='all', full_cov=False):
|
||||||
"""Internal helper function for making predictions, does not account for normalization"""
|
"""Internal helper function for making predictions, does not account for normalization"""
|
||||||
|
|
||||||
Bi, _ = linalg.lapack.dpotri(self.LB, lower=0) # WTH? this lower switch should be 1, but that doesn't work!
|
Bi, _ = linalg.lapack.flapack.dpotri(self.LB, lower=0) # WTH? this lower switch should be 1, but that doesn't work!
|
||||||
symmetrify(Bi)
|
symmetrify(Bi)
|
||||||
Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.M) - Bi)
|
Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.M) - Bi)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ class MRDTests(unittest.TestCase):
|
||||||
Ylist = [np.random.multivariate_normal(np.zeros(N), K, D).T for _ in range(num_m)]
|
Ylist = [np.random.multivariate_normal(np.zeros(N), K, D).T for _ in range(num_m)]
|
||||||
likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist]
|
likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist]
|
||||||
|
|
||||||
m = GPy.models.MRD(likelihood_list, Q=Q, kernels=k, M=M)
|
m = GPy.models.MRD(*likelihood_list, Q=Q, kernel=k, M=M)
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
|
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,7 @@ def _mdot_r(a,b):
|
||||||
|
|
||||||
def jitchol(A,maxtries=5):
|
def jitchol(A,maxtries=5):
|
||||||
A = np.asfortranarray(A)
|
A = np.asfortranarray(A)
|
||||||
L, info = linalg.lapack.dpotrf(A, lower=1)
|
L,info = linalg.lapack.flapack.dpotrf(A,lower=1)
|
||||||
if info ==0:
|
if info ==0:
|
||||||
return L
|
return L
|
||||||
else:
|
else:
|
||||||
|
|
@ -124,7 +124,7 @@ def pdinv(A, *args):
|
||||||
L = jitchol(A, *args)
|
L = jitchol(A, *args)
|
||||||
logdet = 2.*np.sum(np.log(np.diag(L)))
|
logdet = 2.*np.sum(np.log(np.diag(L)))
|
||||||
Li = chol_inv(L)
|
Li = chol_inv(L)
|
||||||
Ai, _ = linalg.lapack.dpotri(L)
|
Ai, _ = linalg.lapack.flapack.dpotri(L)
|
||||||
#Ai = np.tril(Ai) + np.tril(Ai,-1).T
|
#Ai = np.tril(Ai) + np.tril(Ai,-1).T
|
||||||
symmetrify(Ai)
|
symmetrify(Ai)
|
||||||
|
|
||||||
|
|
@ -140,7 +140,7 @@ def chol_inv(L):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return linalg.lapack.dtrtri(L, lower=True)[0]
|
return linalg.lapack.flapack.dtrtri(L, lower = True)[0]
|
||||||
|
|
||||||
|
|
||||||
def multiple_pdinv(A):
|
def multiple_pdinv(A):
|
||||||
|
|
@ -157,7 +157,7 @@ def multiple_pdinv(A):
|
||||||
N = A.shape[-1]
|
N = A.shape[-1]
|
||||||
chols = [jitchol(A[:,:,i]) for i in range(N)]
|
chols = [jitchol(A[:,:,i]) for i in range(N)]
|
||||||
halflogdets = [np.sum(np.log(np.diag(L[0]))) for L in chols]
|
halflogdets = [np.sum(np.log(np.diag(L[0]))) for L in chols]
|
||||||
invs = [linalg.lapack.dpotri(L[0], True)[0] for L in chols]
|
invs = [linalg.lapack.flapack.dpotri(L[0],True)[0] for L in chols]
|
||||||
invs = [np.triu(I)+np.triu(I,1).T for I in invs]
|
invs = [np.triu(I)+np.triu(I,1).T for I in invs]
|
||||||
return np.dstack(invs),np.array(halflogdets)
|
return np.dstack(invs),np.array(halflogdets)
|
||||||
|
|
||||||
|
|
@ -358,9 +358,9 @@ def cholupdate(L,x):
|
||||||
def backsub_both_sides(L, X,transpose='left'):
|
def backsub_both_sides(L, X,transpose='left'):
|
||||||
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
||||||
if transpose=='left':
|
if transpose=='left':
|
||||||
tmp, _ = linalg.lapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
|
tmp, _ = linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=1)
|
||||||
return linalg.lapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
|
return linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=1)[0].T
|
||||||
else:
|
else:
|
||||||
tmp, _ = linalg.lapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=0)
|
tmp, _ = linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(X), lower=1, trans=0)
|
||||||
return linalg.lapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=0)[0].T
|
return linalg.lapack.flapack.dtrtrs(L, np.asfortranarray(tmp.T), lower=1, trans=0)[0].T
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,8 +5,8 @@ from GPy.util import datasets as dat
|
||||||
import urllib2
|
import urllib2
|
||||||
|
|
||||||
class vertex:
|
class vertex:
|
||||||
def __init__(self, cd48_name, id, parents=[], children=[], meta = {}):
|
def __init__(self, name, id, parents=[], children=[], meta = {}):
|
||||||
self.name = cd48_name
|
self.name = name
|
||||||
self.id = id
|
self.id = id
|
||||||
self.parents = parents
|
self.parents = parents
|
||||||
self.children = children
|
self.children = children
|
||||||
|
|
@ -18,7 +18,7 @@ class vertex:
|
||||||
class tree:
|
class tree:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.vertices = []
|
self.vertices = []
|
||||||
self.vertices.append(vertex(cd48_name='root', id=0))
|
self.vertices.append(vertex(name='root', id=0))
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
index = self.find_root()
|
index = self.find_root()
|
||||||
|
|
@ -69,12 +69,12 @@ class tree:
|
||||||
return i
|
return i
|
||||||
raise Error, 'Reverse look up of id failed.'
|
raise Error, 'Reverse look up of id failed.'
|
||||||
|
|
||||||
def get_index_by_name(self, cd48_name):
|
def get_index_by_name(self, name):
|
||||||
"""Give the index associated with a given vertex cd48_name."""
|
"""Give the index associated with a given vertex name."""
|
||||||
for i in range(len(self.vertices)):
|
for i in range(len(self.vertices)):
|
||||||
if self.vertices[i].name == cd48_name:
|
if self.vertices[i].name == name:
|
||||||
return i
|
return i
|
||||||
raise Error, 'Reverse look up of cd48_name failed.'
|
raise Error, 'Reverse look up of name failed.'
|
||||||
|
|
||||||
def order_vertices(self):
|
def order_vertices(self):
|
||||||
"""Order vertices in the graph such that parents always have a lower index than children."""
|
"""Order vertices in the graph such that parents always have a lower index than children."""
|
||||||
|
|
@ -203,7 +203,7 @@ class acclaim_skeleton(skeleton):
|
||||||
self.length = 1.0
|
self.length = 1.0
|
||||||
self.mass = 1.0
|
self.mass = 1.0
|
||||||
self.type = 'acclaim'
|
self.type = 'acclaim'
|
||||||
self.vertices[0] = vertex(cd48_name='root', id=0,
|
self.vertices[0] = vertex(name='root', id=0,
|
||||||
parents = [0], children=[],
|
parents = [0], children=[],
|
||||||
meta = {'orientation': [],
|
meta = {'orientation': [],
|
||||||
'axis': [0., 0., 0.],
|
'axis': [0., 0., 0.],
|
||||||
|
|
@ -303,7 +303,7 @@ class acclaim_skeleton(skeleton):
|
||||||
|
|
||||||
"""Loads an ASF file into a skeleton structure.
|
"""Loads an ASF file into a skeleton structure.
|
||||||
loads skeleton structure from an acclaim skeleton file.
|
loads skeleton structure from an acclaim skeleton file.
|
||||||
ARG file_name : the file cd48_name to load in.
|
ARG file_name : the file name to load in.
|
||||||
RETURN skel : the skeleton for the file."""
|
RETURN skel : the skeleton for the file."""
|
||||||
|
|
||||||
fid = open(file_name, 'r')
|
fid = open(file_name, 'r')
|
||||||
|
|
@ -321,8 +321,8 @@ class acclaim_skeleton(skeleton):
|
||||||
parts = lin.split()
|
parts = lin.split()
|
||||||
if parts[0] == 'begin':
|
if parts[0] == 'begin':
|
||||||
bone_count += 1
|
bone_count += 1
|
||||||
self.vertices.append(vertex(cd48_name = '', id=np.NaN,
|
self.vertices.append(vertex(name = '', id=np.NaN,
|
||||||
meta={'cd48_name': [],
|
meta={'name': [],
|
||||||
'id': [],
|
'id': [],
|
||||||
'offset': [],
|
'offset': [],
|
||||||
'orientation': [],
|
'orientation': [],
|
||||||
|
|
@ -348,7 +348,7 @@ class acclaim_skeleton(skeleton):
|
||||||
|
|
||||||
self.vertices[bone_count].children = []
|
self.vertices[bone_count].children = []
|
||||||
|
|
||||||
elif parts[0]=='cd48_name':
|
elif parts[0]=='name':
|
||||||
self.vertices[bone_count].name = parts[1]
|
self.vertices[bone_count].name = parts[1]
|
||||||
lin = self.read_line(fid)
|
lin = self.read_line(fid)
|
||||||
|
|
||||||
|
|
@ -436,7 +436,7 @@ class acclaim_skeleton(skeleton):
|
||||||
if counter != frame_no:
|
if counter != frame_no:
|
||||||
raise Error, 'Unexpected frame number.'
|
raise Error, 'Unexpected frame number.'
|
||||||
else:
|
else:
|
||||||
raise Error, 'Single bone cd48_name ...'
|
raise Error, 'Single bone name ...'
|
||||||
else:
|
else:
|
||||||
ind = self.get_index_by_name(parts[0])
|
ind = self.get_index_by_name(parts[0])
|
||||||
bones[ind].append(np.array([float(channel) for channel in parts[1:]]))
|
bones[ind].append(np.array([float(channel) for channel in parts[1:]]))
|
||||||
|
|
@ -542,7 +542,7 @@ class acclaim_skeleton(skeleton):
|
||||||
lin = self.read_line(fid)
|
lin = self.read_line(fid)
|
||||||
while lin:
|
while lin:
|
||||||
if lin[0]==':':
|
if lin[0]==':':
|
||||||
if lin[1:]== 'cd48_name':
|
if lin[1:]== 'name':
|
||||||
lin = self.read_line(fid)
|
lin = self.read_line(fid)
|
||||||
self.name = lin
|
self.name = lin
|
||||||
elif lin[1:]=='units':
|
elif lin[1:]=='units':
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue