mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 12:02:38 +02:00
merge with upstream
This commit is contained in:
commit
ba74e29aee
115 changed files with 1178 additions and 531 deletions
|
|
@ -61,7 +61,7 @@ class BayesianGPLVM(SparseGP_MPI):
|
|||
else:
|
||||
from ..inference.latent_function_inference.var_dtc import VarDTC
|
||||
self.logger.debug("creating inference_method var_dtc")
|
||||
inference_method = VarDTC(limit=1 if not missing_data else Y.shape[1])
|
||||
inference_method = VarDTC(limit=3 if not missing_data else Y.shape[1])
|
||||
if isinstance(inference_method,VarDTC_minibatch):
|
||||
inference_method.mpi_comm = mpi_comm
|
||||
|
||||
|
|
|
|||
|
|
@ -40,12 +40,13 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
Z = np.random.permutation(X.copy())[:num_inducing]
|
||||
assert Z.shape[1] == X.shape[1]
|
||||
|
||||
if X_variance == False:
|
||||
if X_variance is False:
|
||||
self.logger.info('no variance on X, activating sparse GPLVM')
|
||||
X = Param("latent space", X)
|
||||
elif X_variance is None:
|
||||
self.logger.info("initializing latent space variance ~ uniform(0,.1)")
|
||||
X_variance = np.random.uniform(0,.1,X.shape)
|
||||
else:
|
||||
if X_variance is None:
|
||||
self.logger.info("initializing latent space variance ~ uniform(0,.1)")
|
||||
X_variance = np.random.uniform(0,.1,X.shape)
|
||||
self.variational_prior = NormalPrior()
|
||||
X = NormalPosterior(X, X_variance)
|
||||
|
||||
|
|
@ -61,7 +62,7 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
if inference_method is None:
|
||||
from ..inference.latent_function_inference.var_dtc import VarDTC
|
||||
self.logger.debug("creating inference_method var_dtc")
|
||||
inference_method = VarDTC(limit=1 if not missing_data else Y.shape[1])
|
||||
inference_method = VarDTC(limit=3 if not missing_data else Y.shape[1])
|
||||
|
||||
super(BayesianGPLVMMiniBatch,self).__init__(X, Y, Z, kernel, likelihood=likelihood,
|
||||
name=name, inference_method=inference_method,
|
||||
|
|
@ -71,13 +72,13 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
self.X = X
|
||||
self.link_parameter(self.X, 0)
|
||||
|
||||
def set_X_gradients(self, X, X_grad):
|
||||
"""Set the gradients of the posterior distribution of X in its specific form."""
|
||||
X.mean.gradient, X.variance.gradient = X_grad
|
||||
#def set_X_gradients(self, X, X_grad):
|
||||
# """Set the gradients of the posterior distribution of X in its specific form."""
|
||||
# X.mean.gradient, X.variance.gradient = X_grad
|
||||
|
||||
def get_X_gradients(self, X):
|
||||
"""Get the gradients of the posterior distribution of X in its specific form."""
|
||||
return X.mean.gradient, X.variance.gradient
|
||||
#def get_X_gradients(self, X):
|
||||
# """Get the gradients of the posterior distribution of X in its specific form."""
|
||||
# return X.mean.gradient, X.variance.gradient
|
||||
|
||||
def _outer_values_update(self, full_values):
|
||||
"""
|
||||
|
|
@ -106,7 +107,7 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
super(BayesianGPLVMMiniBatch,self).parameters_changed()
|
||||
|
||||
kl_fctr = self.kl_factr
|
||||
if kl_fctr > 0:
|
||||
if kl_fctr > 0 and self.has_uncertain_inputs():
|
||||
Xgrad = self.X.gradient.copy()
|
||||
self.X.gradient[:] = 0
|
||||
self.variational_prior.update_gradients_KL(self.X)
|
||||
|
|
@ -122,8 +123,8 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
|||
|
||||
if self.missing_data or not self.stochastics:
|
||||
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
|
||||
elif self.stochastics:
|
||||
else: #self.stochastics is given:
|
||||
d = self.output_dim
|
||||
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)*self.stochastics.batchsize/d
|
||||
|
||||
self._Xgrad = self.X.gradient.copy()
|
||||
self._Xgrad = self.X.gradient.copy()
|
||||
|
|
|
|||
|
|
@ -41,4 +41,4 @@ class GPLVM(GP):
|
|||
|
||||
def parameters_changed(self):
|
||||
super(GPLVM, self).parameters_changed()
|
||||
self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None)
|
||||
self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None)
|
||||
|
|
|
|||
|
|
@ -5,14 +5,14 @@ import numpy as np
|
|||
import itertools, logging
|
||||
|
||||
from ..kern import Kern
|
||||
from GPy.core.parameterization.variational import NormalPrior
|
||||
from ..core.parameterization.variational import NormalPrior
|
||||
from ..core.parameterization import Param
|
||||
from paramz import ObsAr
|
||||
from ..inference.latent_function_inference.var_dtc import VarDTC
|
||||
from ..inference.latent_function_inference import InferenceMethodList
|
||||
from ..likelihoods import Gaussian
|
||||
from ..util.initialization import initialize_latent
|
||||
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
|
||||
from ..models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
|
||||
|
||||
class MRD(BayesianGPLVMMiniBatch):
|
||||
"""
|
||||
|
|
@ -215,40 +215,6 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
Z = np.random.randn(self.num_inducing, self.input_dim) * X.var()
|
||||
return Z
|
||||
|
||||
def _handle_plotting(self, fignum, axes, plotf, sharex=False, sharey=False):
|
||||
import matplotlib.pyplot as plt
|
||||
if axes is None:
|
||||
fig = plt.figure(num=fignum)
|
||||
sharex_ax = None
|
||||
sharey_ax = None
|
||||
plots = []
|
||||
for i, g in enumerate(self.bgplvms):
|
||||
try:
|
||||
if sharex:
|
||||
sharex_ax = ax # @UndefinedVariable
|
||||
sharex = False # dont set twice
|
||||
if sharey:
|
||||
sharey_ax = ax # @UndefinedVariable
|
||||
sharey = False # dont set twice
|
||||
except:
|
||||
pass
|
||||
if axes is None:
|
||||
ax = fig.add_subplot(1, len(self.bgplvms), i + 1, sharex=sharex_ax, sharey=sharey_ax)
|
||||
elif isinstance(axes, (tuple, list, np.ndarray)):
|
||||
ax = axes[i]
|
||||
else:
|
||||
raise ValueError("Need one axes per latent dimension input_dim")
|
||||
plots.append(plotf(i, g, ax))
|
||||
if sharey_ax is not None:
|
||||
plt.setp(ax.get_yticklabels(), visible=False)
|
||||
plt.draw()
|
||||
if axes is None:
|
||||
try:
|
||||
fig.tight_layout()
|
||||
except:
|
||||
pass
|
||||
return plots
|
||||
|
||||
def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None, Yindex=0):
|
||||
"""
|
||||
Prediction for data set Yindex[default=0].
|
||||
|
|
@ -270,59 +236,50 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
# sharex=sharex, sharey=sharey)
|
||||
# return fig
|
||||
|
||||
def plot_scales(self, fignum=None, ax=None, titles=None, sharex=False, sharey=True, *args, **kwargs):
|
||||
def plot_scales(self, titles=None, fig_kwargs={}, **kwargs):
|
||||
"""
|
||||
|
||||
TODO: Explain other parameters
|
||||
Plot input sensitivity for all datasets, to see which input dimensions are
|
||||
significant for which dataset.
|
||||
|
||||
:param titles: titles for axes of datasets
|
||||
|
||||
kwargs go into plot_ARD for each kernel.
|
||||
"""
|
||||
from ..plotting import plotting_library as pl
|
||||
|
||||
if titles is None:
|
||||
titles = [r'${}$'.format(name) for name in self.names]
|
||||
ymax = reduce(max, [np.ceil(max(g.kern.input_sensitivity())) for g in self.bgplvms])
|
||||
def plotf(i, g, ax):
|
||||
#ax.set_ylim([0,ymax])
|
||||
return g.kern.plot_ARD(ax=ax, title=titles[i], *args, **kwargs)
|
||||
fig = self._handle_plotting(fignum, ax, plotf, sharex=sharex, sharey=sharey)
|
||||
return fig
|
||||
|
||||
M = len(self.bgplvms)
|
||||
fig = pl().figure(rows=1, cols=M, **fig_kwargs)
|
||||
for c in range(M):
|
||||
canvas = self.bgplvms[c].kern.plot_ARD(title=titles[c], figure=fig, col=c+1, **kwargs)
|
||||
return canvas
|
||||
|
||||
def plot_latent(self, labels=None, which_indices=None,
|
||||
resolution=50, ax=None, marker='o', s=40,
|
||||
fignum=None, plot_inducing=True, legend=True,
|
||||
resolution=60, legend=True,
|
||||
plot_limits=None,
|
||||
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
|
||||
updates=False,
|
||||
kern=None, marker='<>^vsd',
|
||||
num_samples=1000, projection='2d',
|
||||
predict_kwargs={},
|
||||
scatter_kwargs=None, **imshow_kwargs):
|
||||
"""
|
||||
see plotting.matplot_dep.dim_reduction_plots.plot_latent
|
||||
if predict_kwargs is None, will plot latent spaces for 0th dataset (and kernel), otherwise give
|
||||
predict_kwargs=dict(Yindex='index') for plotting only the latent space of dataset with 'index'.
|
||||
"""
|
||||
import sys
|
||||
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
|
||||
from matplotlib import pyplot as plt
|
||||
from ..plotting.matplot_dep import dim_reduction_plots
|
||||
from ..plotting.gpy_plot.latent_plots import plot_latent
|
||||
|
||||
if "Yindex" not in predict_kwargs:
|
||||
predict_kwargs['Yindex'] = 0
|
||||
|
||||
Yindex = predict_kwargs['Yindex']
|
||||
if ax is None:
|
||||
fig = plt.figure(num=fignum)
|
||||
ax = fig.add_subplot(111)
|
||||
else:
|
||||
fig = ax.figure
|
||||
|
||||
self.kern = self.bgplvms[Yindex].kern
|
||||
self.likelihood = self.bgplvms[Yindex].likelihood
|
||||
plot = dim_reduction_plots.plot_latent(self, labels, which_indices,
|
||||
resolution, ax, marker, s,
|
||||
fignum, plot_inducing, legend,
|
||||
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
|
||||
ax.set_title(self.bgplvms[Yindex].name)
|
||||
try:
|
||||
fig.tight_layout()
|
||||
except:
|
||||
pass
|
||||
|
||||
return plot
|
||||
return plot_latent(self, labels, which_indices, resolution, legend, plot_limits, updates, kern, marker, num_samples, projection, scatter_kwargs)
|
||||
|
||||
def __getstate__(self):
|
||||
state = super(MRD, self).__getstate__()
|
||||
|
|
|
|||
|
|
@ -41,11 +41,12 @@ class SparseGPMiniBatch(SparseGP):
|
|||
def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None,
|
||||
name='sparse gp', Y_metadata=None, normalizer=False,
|
||||
missing_data=False, stochastic=False, batchsize=1):
|
||||
self._update_stochastics = False
|
||||
|
||||
# pick a sensible inference method
|
||||
if inference_method is None:
|
||||
if isinstance(likelihood, likelihoods.Gaussian):
|
||||
inference_method = var_dtc.VarDTC(limit=1 if not missing_data else Y.shape[1])
|
||||
inference_method = var_dtc.VarDTC(limit=3 if not missing_data else Y.shape[1])
|
||||
else:
|
||||
#inference_method = ??
|
||||
raise NotImplementedError("what to do what to do?")
|
||||
|
|
@ -73,7 +74,14 @@ class SparseGPMiniBatch(SparseGP):
|
|||
logger.info("Adding Z as parameter")
|
||||
self.link_parameter(self.Z, index=0)
|
||||
self.posterior = None
|
||||
|
||||
|
||||
def optimize(self, optimizer=None, start=None, **kwargs):
|
||||
try:
|
||||
self._update_stochastics = True
|
||||
SparseGP.optimize(self, optimizer=optimizer, start=start, **kwargs)
|
||||
finally:
|
||||
self._update_stochastics = False
|
||||
|
||||
def has_uncertain_inputs(self):
|
||||
return isinstance(self.X, VariationalPosterior)
|
||||
|
||||
|
|
@ -226,16 +234,16 @@ class SparseGPMiniBatch(SparseGP):
|
|||
woodbury_inv = self.posterior._woodbury_inv
|
||||
woodbury_vector = self.posterior._woodbury_vector
|
||||
|
||||
if not self.stochastics:
|
||||
m_f = lambda i: "Inference with missing_data: {: >7.2%}".format(float(i+1)/self.output_dim)
|
||||
message = m_f(-1)
|
||||
print(message, end=' ')
|
||||
#if not self.stochastics:
|
||||
# m_f = lambda i: "Inference with missing_data: {: >7.2%}".format(float(i+1)/self.output_dim)
|
||||
# message = m_f(-1)
|
||||
# print(message, end=' ')
|
||||
|
||||
for d, ninan in self.stochastics.d:
|
||||
if not self.stochastics:
|
||||
print(' '*(len(message)) + '\r', end=' ')
|
||||
message = m_f(d)
|
||||
print(message, end=' ')
|
||||
#if not self.stochastics:
|
||||
# print(' '*(len(message)) + '\r', end=' ')
|
||||
# message = m_f(d)
|
||||
# print(message, end=' ')
|
||||
|
||||
psi0ni = self.psi0[ninan]
|
||||
psi1ni = self.psi1[ninan]
|
||||
|
|
@ -262,8 +270,8 @@ class SparseGPMiniBatch(SparseGP):
|
|||
woodbury_vector[:, d] = posterior.woodbury_vector
|
||||
self._log_marginal_likelihood += log_marginal_likelihood
|
||||
|
||||
if not self.stochastics:
|
||||
print('')
|
||||
#if not self.stochastics:
|
||||
# print('')
|
||||
|
||||
if self.posterior is None:
|
||||
self.posterior = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector,
|
||||
|
|
@ -314,6 +322,8 @@ class SparseGPMiniBatch(SparseGP):
|
|||
if self.missing_data:
|
||||
self._outer_loop_for_missing_data()
|
||||
elif self.stochastics:
|
||||
if self._update_stochastics:
|
||||
self.stochastics.do_stochastics()
|
||||
self._outer_loop_without_missing_data()
|
||||
else:
|
||||
self.posterior, self._log_marginal_likelihood, self.grad_dict = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata)
|
||||
|
|
|
|||
|
|
@ -62,4 +62,4 @@ class SparseGPRegression(SparseGP_MPI):
|
|||
if isinstance(self.inference_method,VarDTC_minibatch):
|
||||
update_gradients_sparsegp(self, mpi_comm=self.mpi_comm)
|
||||
else:
|
||||
super(SparseGPRegression, self).parameters_changed()
|
||||
super(SparseGPRegression, self).parameters_changed()
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
import sys
|
||||
from .sparse_gp_regression import SparseGPRegression
|
||||
from ..core import Param
|
||||
|
||||
class SparseGPLVM(SparseGPRegression):
|
||||
"""
|
||||
|
|
@ -21,7 +22,9 @@ class SparseGPLVM(SparseGPRegression):
|
|||
if X is None:
|
||||
from ..util.initialization import initialize_latent
|
||||
X, fracs = initialize_latent(init, input_dim, Y)
|
||||
X = Param('latent space', X)
|
||||
SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
|
||||
self.link_parameter(self.X, 0)
|
||||
|
||||
def parameters_changed(self):
|
||||
super(SparseGPLVM, self).parameters_changed()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue