mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 12:32:40 +02:00
[MRD] init and sim nicer now
This commit is contained in:
parent
9b2e49c949
commit
a5c3e88f83
4 changed files with 67 additions and 66 deletions
|
|
@ -219,7 +219,9 @@ def _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim=False):
|
|||
import numpy as np
|
||||
np.random.seed(3000)
|
||||
|
||||
k = GPy.kern.Matern32(Q_signal, 10., lengthscale=1+(np.random.uniform(1,6,Q_signal)), ARD=1)
|
||||
k = GPy.kern.Matern32(Q_signal, 1., lengthscale=(np.random.uniform(1, 6, Q_signal)), ARD=1)
|
||||
for i in range(Q_signal):
|
||||
k += GPy.kern.PeriodicExponential(1, variance=1., active_dims=[i], period=3., lower=-2, upper=6)
|
||||
t = np.c_[[np.linspace(-1, 5, N) for _ in range(Q_signal)]].T
|
||||
K = k.K(t)
|
||||
s2, s1, s3, sS = np.random.multivariate_normal(np.zeros(K.shape[0]), K, size=(4))[:, :, None]
|
||||
|
|
@ -365,7 +367,7 @@ def ssgplvm_simulation(optimize=True, verbose=1,
|
|||
|
||||
def bgplvm_simulation_missing_data(optimize=True, verbose=1,
|
||||
plot=True, plot_sim=False,
|
||||
max_iters=2e4,
|
||||
max_iters=2e4, percent_missing=.1,
|
||||
):
|
||||
from GPy import kern
|
||||
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
|
||||
|
|
@ -375,7 +377,7 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1,
|
|||
Y = Ylist[0]
|
||||
k = kern.Linear(Q, ARD=True) # + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
|
||||
|
||||
inan = _np.random.binomial(1, .2, size=Y.shape).astype(bool) # 80% missing data
|
||||
inan = _np.random.binomial(1, percent_missing, size=Y.shape).astype(bool) # 80% missing data
|
||||
Ymissing = Y.copy()
|
||||
Ymissing[inan] = _np.nan
|
||||
|
||||
|
|
|
|||
|
|
@ -81,18 +81,19 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None):
|
||||
posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices)
|
||||
|
||||
kl_fctr = 1.
|
||||
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(
|
||||
variational_posterior=X,
|
||||
Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'],
|
||||
dL_dpsi1=grad_dict['dL_dpsi1'],
|
||||
dL_dpsi2=grad_dict['dL_dpsi2'])
|
||||
|
||||
kl_fctr = self.kl_factr
|
||||
if self.missing_data:
|
||||
d = self.output_dim
|
||||
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)/d
|
||||
else:
|
||||
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)
|
||||
|
||||
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(
|
||||
variational_posterior=X,
|
||||
Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'],
|
||||
dL_dpsi1=grad_dict['dL_dpsi1'],
|
||||
dL_dpsi2=grad_dict['dL_dpsi2'])
|
||||
|
||||
# Subsetting Variational Posterior objects, makes the gradients
|
||||
# empty. We need them to be 0 though:
|
||||
|
|
|
|||
|
|
@ -105,7 +105,6 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
kernels = []
|
||||
for i in range(len(Ylist)):
|
||||
k = kernel.copy()
|
||||
print k is kernel, k.observers, k.constraints
|
||||
kernels.append(k)
|
||||
else:
|
||||
assert len(kernel) == len(Ylist), "need one kernel per output"
|
||||
|
|
@ -144,8 +143,16 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
for i, n, k, l, Y, im, bs in itertools.izip(itertools.count(), Ynames, kernels, likelihoods, Ylist, self.inference_method, batchsize):
|
||||
assert Y.shape[0] == self.num_data, "All datasets need to share the number of datapoints, and those have to correspond to one another"
|
||||
md = np.isnan(Y).any()
|
||||
spgp = SparseGPMiniBatch(self.X, Y, Z, k, l, im, n, None, normalizer, md, stochastic, bs)
|
||||
spgp = BayesianGPLVMMiniBatch(Y, input_dim, X, X_variance,
|
||||
Z=Z, kernel=k, likelihood=l,
|
||||
inference_method=im, name=n,
|
||||
normalizer=normalizer,
|
||||
missing_data=md,
|
||||
stochastic=stochastic,
|
||||
batchsize=bs)
|
||||
spgp.kl_factr = 1./len(Ynames)
|
||||
spgp.unlink_parameter(spgp.Z)
|
||||
spgp.unlink_parameter(spgp.X)
|
||||
del spgp.Z
|
||||
del spgp.X
|
||||
spgp.Z = self.Z
|
||||
|
|
@ -165,20 +172,10 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
|
||||
self.logger.info('working on im <{}>'.format(hex(id(i))))
|
||||
self.Z.gradient[:] += b.full_values['Zgrad']
|
||||
grad_dict = b.grad_dict
|
||||
grad_dict = b.full_values
|
||||
|
||||
if isinstance(self.X, VariationalPosterior):
|
||||
dL_dmean, dL_dS = b.kern.gradients_qX_expectations(
|
||||
grad_dict['dL_dpsi0'],
|
||||
grad_dict['dL_dpsi1'],
|
||||
grad_dict['dL_dpsi2'],
|
||||
self.Z, self.X)
|
||||
self.X.mean.gradient += dL_dmean
|
||||
self.X.variance.gradient += dL_dS
|
||||
|
||||
else:
|
||||
#gradients wrt kernel
|
||||
self.X.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'], self.X, self.Z)
|
||||
self.X.mean.gradient += grad_dict['meangrad']
|
||||
self.X.variance.gradient += grad_dict['vargrad']
|
||||
|
||||
if isinstance(self.X, VariationalPosterior):
|
||||
# update for the KL divergence
|
||||
|
|
@ -238,7 +235,7 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
pass
|
||||
if axes is None:
|
||||
ax = fig.add_subplot(1, len(self.bgplvms), i + 1, sharex=sharex_ax, sharey=sharey_ax)
|
||||
elif isinstance(axes, (tuple, list)):
|
||||
elif isinstance(axes, (tuple, list, np.ndarray)):
|
||||
ax = axes[i]
|
||||
else:
|
||||
raise ValueError("Need one axes per latent dimension input_dim")
|
||||
|
|
@ -286,7 +283,7 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
titles = [r'${}$'.format(name) for name in self.names]
|
||||
ymax = reduce(max, [np.ceil(max(g.kern.input_sensitivity())) for g in self.bgplvms])
|
||||
def plotf(i, g, ax):
|
||||
ax.set_ylim([0,ymax])
|
||||
#ax.set_ylim([0,ymax])
|
||||
return g.kern.plot_ARD(ax=ax, title=titles[i], *args, **kwargs)
|
||||
fig = self._handle_plotting(fignum, ax, plotf, sharex=sharex, sharey=sharey)
|
||||
return fig
|
||||
|
|
|
|||
|
|
@ -56,6 +56,7 @@ Created on 3 Nov 2014
|
|||
raise NotImplementedError, "what to do what to do?"
|
||||
print "defaulting to ", inference_method, "for latent function inference"
|
||||
|
||||
self.kl_factr = 1.
|
||||
self.Z = Param('inducing inputs', Z)
|
||||
self.num_inducing = Z.shape[0]
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue