bug fix (kernel copy) in mrd

This commit is contained in:
Max Zwiessele 2013-05-22 17:39:27 +01:00
parent 3e7b833d0f
commit 8f5d577bbb
4 changed files with 12 additions and 12 deletions

View file

@ -39,8 +39,8 @@ class logexp(transformation):
return '(+ve)'
class logexp_clipped(transformation):
max_bound = 1e250
min_bound = 1e-9
max_bound = 1e300
min_bound = 1e-10
log_max_bound = np.log(max_bound)
log_min_bound = np.log(min_bound)
def __init__(self, lower=1e-6):
@ -51,7 +51,7 @@ class logexp_clipped(transformation):
f = np.log(1. + exp)
# if np.isnan(f).any():
# import ipdb;ipdb.set_trace()
return f
return np.clip(f, self.min_bound, self.max_bound)
def finv(self, f):
return np.log(np.exp(np.clip(f, self.min_bound, self.max_bound)) - 1.)
def gradfactor(self, f):

View file

@ -278,7 +278,7 @@ def bgplvm_simulation(optimize='scg',
return m
def mrd_simulation(optimize=True, plot_sim=False, **kw):
D1, D2, D3, N, M, Q = 15, 8, 8, 100, 3, 7
D1, D2, D3, N, M, Q = 150, 200, 400, 700, 3, 7
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim)
from GPy.models import mrd
@ -288,12 +288,12 @@ def mrd_simulation(optimize=True, plot_sim=False, **kw):
reload(mrd); reload(kern)
k = kern.linear(Q, [0.01] * Q, True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
m = mrd.MRD(Ylist, Q=Q, M=M, kernel=k, initx="concat", initz='permute', **kw)
m = mrd.MRD(Ylist, Q=Q, M=M, kernels=k, initx="concat", initz='permute', **kw)
for i, Y in enumerate(Ylist):
m['{}_noise'.format(i + 1)] = Y.var() / 100.
m.constrain('variance|noise', logexp_clipped())
# m.constrain('variance|noise', logexp_clipped(1e-6))
m.ensure_default_constraints()
# DEBUG

View file

@ -95,7 +95,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
return x
def _clipped(self, x):
return x # np.clip(x, -1e100, 1e100)
return x # np.clip(x, -1e300, 1e300)
def _set_params(self, x, save_old=True, save_count=0):
# try:

View file

@ -44,7 +44,7 @@ class MRD(model):
"""
def __init__(self, likelihood_or_Y_list, Q, M=10, names=None,
kernels=None, initx='PCA',
initz='permute', _debug=False, **kwargs):
initz='permute', _debug=False, **kw):
if names is None:
self.names = ["{}".format(i + 1) for i in range(len(likelihood_or_Y_list))]
@ -64,7 +64,7 @@ class MRD(model):
self._init = True
X = self._init_X(initx, likelihood_or_Y_list)
Z = self._init_Z(initz, X)
self.bgplvms = [Bayesian_GPLVM(l, k, X=X, Z=Z, M=self.M, **kwargs) for l, k in zip(likelihood_or_Y_list, kernels)]
self.bgplvms = [Bayesian_GPLVM(l, Q=Q, kernel=k, X=X, Z=Z, M=self.M, **kw) for l, k in zip(likelihood_or_Y_list, kernels)]
del self._init
self.gref = self.bgplvms[0]
@ -229,12 +229,12 @@ class MRD(model):
else:
Ylist.append(likelihood_or_Y.Y)
del likelihood_list
if init in "PCA_single":
if init in "PCA_concat":
X = PCA(numpy.hstack(Ylist), self.Q)[0]
elif init in "PCA_single":
X = numpy.zeros((Ylist[0].shape[0], self.Q))
for qs, Y in itertools.izip(numpy.array_split(numpy.arange(self.Q), len(Ylist)), Ylist):
X[:, qs] = PCA(Y, len(qs))[0]
elif init in "PCA_concat":
X = PCA(numpy.hstack(Ylist), self.Q)[0]
else: # init == 'random':
X = numpy.random.randn(Ylist[0].shape[0], self.Q)
self.X = X