mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-12 05:22:38 +02:00
mrd and bgplvm updates to conform new vardtc
This commit is contained in:
parent
3db095338d
commit
1294c24a28
3 changed files with 38 additions and 21 deletions
|
|
@ -277,7 +277,9 @@ def bgplvm_simulation(optimize=True, verbose=1,
|
||||||
k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
|
k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
|
||||||
#k = kern.RBF(Q, ARD=True, lengthscale=10.)
|
#k = kern.RBF(Q, ARD=True, lengthscale=10.)
|
||||||
m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)
|
m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)
|
||||||
|
m.X.variance[:] = _np.random.uniform(0,.01,m.X.shape)
|
||||||
|
m.likelihood.variance = .1
|
||||||
|
|
||||||
if optimize:
|
if optimize:
|
||||||
print "Optimizing model:"
|
print "Optimizing model:"
|
||||||
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
|
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
|
||||||
|
|
@ -299,15 +301,16 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1,
|
||||||
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
|
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
|
||||||
Y = Ylist[0]
|
Y = Ylist[0]
|
||||||
k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
|
k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
|
||||||
|
|
||||||
inan = _np.random.binomial(1, .6, size=Y.shape).astype(bool)
|
inan = _np.random.binomial(1, .6, size=Y.shape).astype(bool)
|
||||||
m = BayesianGPLVM(Y.copy(), Q, init="random", num_inducing=num_inducing, kernel=k)
|
m = BayesianGPLVM(Y.copy(), Q, init="random", num_inducing=num_inducing, kernel=k)
|
||||||
m.inference_method = VarDTCMissingData()
|
m.inference_method = VarDTCMissingData()
|
||||||
m.Y[inan] = _np.nan
|
m.Y[inan] = _np.nan
|
||||||
m.X.variance *= .1
|
m.X.variance[:] = _np.random.uniform(0,.01,m.X.shape)
|
||||||
|
m.likelihood.variance = .01
|
||||||
m.parameters_changed()
|
m.parameters_changed()
|
||||||
m.Yreal = Y
|
m.Yreal = Y
|
||||||
|
|
||||||
if optimize:
|
if optimize:
|
||||||
print "Optimizing model:"
|
print "Optimizing model:"
|
||||||
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
|
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
|
||||||
|
|
@ -325,11 +328,11 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw):
|
||||||
|
|
||||||
D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5
|
D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5
|
||||||
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
|
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
|
||||||
|
|
||||||
#Ylist = [Ylist[0]]
|
#Ylist = [Ylist[0]]
|
||||||
k = [kern.Linear(Q, ARD=True) + kern.White(Q, 1e-4) for _ in range(len(Ylist))]
|
k = [kern.Linear(Q, ARD=True) for _ in range(len(Ylist))]
|
||||||
m = MRD(Ylist, input_dim=Q, num_inducing=num_inducing, kernel=k, initx="", initz='permute', **kw)
|
m = MRD(Ylist, input_dim=Q, num_inducing=num_inducing, kernel=k, initx="", initz='permute', **kw)
|
||||||
|
|
||||||
m['.*noise'] = [Y.var()/500. for Y in Ylist]
|
m['.*noise'] = [Y.var()/500. for Y in Ylist]
|
||||||
#for i, Y in enumerate(Ylist):
|
#for i, Y in enumerate(Ylist):
|
||||||
# m['.*Y_{}.*Gaussian.*noise'.format(i)] = Y.var(1) / 500.
|
# m['.*Y_{}.*Gaussian.*noise'.format(i)] = Y.var(1) / 500.
|
||||||
|
|
|
||||||
|
|
@ -50,6 +50,14 @@ class BayesianGPLVM(SparseGP):
|
||||||
self.variational_prior = NormalPrior()
|
self.variational_prior = NormalPrior()
|
||||||
X = NormalPosterior(X, X_variance)
|
X = NormalPosterior(X, X_variance)
|
||||||
|
|
||||||
|
if inference_method is None:
|
||||||
|
if np.any(np.isnan(Y)):
|
||||||
|
from ..inference.latent_function_inference.var_dtc import VarDTCMissingData
|
||||||
|
inference_method = VarDTCMissingData()
|
||||||
|
else:
|
||||||
|
from ..inference.latent_function_inference.var_dtc import VarDTC
|
||||||
|
inference_method = VarDTC()
|
||||||
|
|
||||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, name, **kwargs)
|
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, name, **kwargs)
|
||||||
self.add_parameter(self.X, index=0)
|
self.add_parameter(self.X, index=0)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -51,24 +51,25 @@ class MRD(Model):
|
||||||
inference_method=None, likelihood=None, name='mrd', Ynames=None):
|
inference_method=None, likelihood=None, name='mrd', Ynames=None):
|
||||||
super(MRD, self).__init__(name)
|
super(MRD, self).__init__(name)
|
||||||
|
|
||||||
|
self.input_dim = input_dim
|
||||||
|
self.num_inducing = num_inducing
|
||||||
|
|
||||||
|
self.Ylist = Ylist
|
||||||
|
self._in_init_ = True
|
||||||
|
X, fracs = self._init_X(initx, Ylist)
|
||||||
|
self.Z = Param('inducing inputs', self._init_Z(initz, X))
|
||||||
|
self.num_inducing = self.Z.shape[0] # ensure M==N if M>N
|
||||||
|
|
||||||
# sort out the kernels
|
# sort out the kernels
|
||||||
if kernel is None:
|
if kernel is None:
|
||||||
from ..kern import RBF
|
from ..kern import RBF
|
||||||
self.kern = [RBF(input_dim, ARD=1, name='rbf'.format(i)) for i in range(len(Ylist))]
|
self.kern = [RBF(input_dim, ARD=1, lengthscale=fracs[i], name='rbf'.format(i)) for i in range(len(Ylist))]
|
||||||
elif isinstance(kernel, Kern):
|
elif isinstance(kernel, Kern):
|
||||||
self.kern = [kernel.copy(name='{}'.format(kernel.name, i)) for i in range(len(Ylist))]
|
self.kern = [kernel.copy(name='{}'.format(kernel.name, i)) for i in range(len(Ylist))]
|
||||||
else:
|
else:
|
||||||
assert len(kernel) == len(Ylist), "need one kernel per output"
|
assert len(kernel) == len(Ylist), "need one kernel per output"
|
||||||
assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!"
|
assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!"
|
||||||
self.kern = kernel
|
self.kern = kernel
|
||||||
self.input_dim = input_dim
|
|
||||||
self.num_inducing = num_inducing
|
|
||||||
|
|
||||||
self.Ylist = Ylist
|
|
||||||
self._in_init_ = True
|
|
||||||
X = self._init_X(initx, Ylist)
|
|
||||||
self.Z = Param('inducing inputs', self._init_Z(initz, X))
|
|
||||||
self.num_inducing = self.Z.shape[0] # ensure M==N if M>N
|
|
||||||
|
|
||||||
if X_variance is None:
|
if X_variance is None:
|
||||||
X_variance = np.random.uniform(0, .1, X.shape)
|
X_variance = np.random.uniform(0, .1, X.shape)
|
||||||
|
|
@ -108,8 +109,7 @@ class MRD(Model):
|
||||||
self._log_marginal_likelihood = 0
|
self._log_marginal_likelihood = 0
|
||||||
self.posteriors = []
|
self.posteriors = []
|
||||||
self.Z.gradient = 0.
|
self.Z.gradient = 0.
|
||||||
self.X.mean.gradient = 0.
|
self.X.gradient = 0.
|
||||||
self.X.variance.gradient = 0.
|
|
||||||
|
|
||||||
for y, k, l, i in itertools.izip(self.Ylist, self.kern, self.likelihood, self.inference_method):
|
for y, k, l, i in itertools.izip(self.Ylist, self.kern, self.likelihood, self.inference_method):
|
||||||
posterior, lml, grad_dict = i.inference(k, self.X, self.Z, l, y)
|
posterior, lml, grad_dict = i.inference(k, self.X, self.Z, l, y)
|
||||||
|
|
@ -147,14 +147,20 @@ class MRD(Model):
|
||||||
if Ylist is None:
|
if Ylist is None:
|
||||||
Ylist = self.Ylist
|
Ylist = self.Ylist
|
||||||
if init in "PCA_concat":
|
if init in "PCA_concat":
|
||||||
X = initialize_latent('PCA', np.hstack(Ylist), self.input_dim)
|
X, fracs = initialize_latent('PCA', self.input_dim, np.hstack(Ylist))
|
||||||
|
fracs = [fracs]*self.input_dim
|
||||||
elif init in "PCA_single":
|
elif init in "PCA_single":
|
||||||
X = np.zeros((Ylist[0].shape[0], self.input_dim))
|
X = np.zeros((Ylist[0].shape[0], self.input_dim))
|
||||||
|
fracs = []
|
||||||
for qs, Y in itertools.izip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist):
|
for qs, Y in itertools.izip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist):
|
||||||
X[:, qs] = initialize_latent('PCA', Y, len(qs))
|
x,frcs = initialize_latent('PCA', len(qs), Y)
|
||||||
|
X[:, qs] = x
|
||||||
|
fracs.append(frcs)
|
||||||
else: # init == 'random':
|
else: # init == 'random':
|
||||||
X = np.random.randn(Ylist[0].shape[0], self.input_dim)
|
X = np.random.randn(Ylist[0].shape[0], self.input_dim)
|
||||||
return X
|
fracs = X.var(0)
|
||||||
|
fracs = [fracs]*self.input_dim
|
||||||
|
return X, fracs
|
||||||
|
|
||||||
def _init_Z(self, init="permute", X=None):
|
def _init_Z(self, init="permute", X=None):
|
||||||
if X is None:
|
if X is None:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue