Merge branch 'master' of github.com:SheffieldML/GPy

This commit is contained in:
Nicolo Fusi 2015-01-10 12:49:31 -08:00
commit 98f632e92e
11 changed files with 146 additions and 101 deletions

View file

@ -239,7 +239,7 @@ class Model(Parameterized):
print 'nothing to optimize' print 'nothing to optimize'
if not self.update_model(): if not self.update_model():
print "setting updates on again" print "Updates were off, setting updates on again"
self.update_model(True) self.update_model(True)
if start == None: if start == None:
@ -400,7 +400,11 @@ class Model(Parameterized):
['<b>Log-likelihood</b>', '{}<br>'.format(float(self.log_likelihood()))], ['<b>Log-likelihood</b>', '{}<br>'.format(float(self.log_likelihood()))],
["<b>Number of Parameters</b>", '{}<br>'.format(self.size)]] ["<b>Number of Parameters</b>", '{}<br>'.format(self.size)]]
from operator import itemgetter from operator import itemgetter
to_print = [""] + ["{}: {}".format(name, detail) for name, detail in model_details] + ["<br><b>Parameters</b>:"] to_print = ["""<style type="text/css">
.pd{
font-family:"Courier New", Courier, monospace !important;
}
</style>\n"""] + ["<p class=pd>"] + ["{}: {}".format(name, detail) for name, detail in model_details] + ["</p>"]
to_print.append(super(Model, self)._repr_html_()) to_print.append(super(Model, self)._repr_html_())
return "\n".join(to_print) return "\n".join(to_print)

View file

@ -264,15 +264,21 @@ class Param(Parameterizable, ObsAr):
ties = [' '.join(map(lambda x: x, t)) for t in ties] ties = [' '.join(map(lambda x: x, t)) for t in ties]
header_format = """ header_format = """
<tr> <tr>
<td><b>{i}</b></td> <th><b>{i}</b></th>
<td><b>{x}</b></td> <th><b>{x}</b></th>
<td><b>{c}</b></td> <th><b>{c}</b></th>
<td><b>{p}</b></td> <th><b>{p}</b></th>
<td><b>{t}</b></td> <th><b>{t}</b></th>
</tr>""" </tr>"""
header = header_format.format(x=self.hierarchy_name(), c=__constraints_name__, i=__index_name__, t=__tie_name__, p=__priors_name__) # nice header for printing header = header_format.format(x=self.hierarchy_name(), c=__constraints_name__, i=__index_name__, t=__tie_name__, p=__priors_name__) # nice header for printing
if not ties: ties = itertools.cycle(['']) if not ties: ties = itertools.cycle([''])
return "\n".join(['<table>'] + [header] + ["<tr><td>{i}</td><td align=\"right\">{x}</td><td>{c}</td><td>{p}</td><td>{t}</td></tr>".format(x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in itertools.izip(indices, vals, constr_matrix, ties, prirs)] + ["</table>"]) return "\n".join(["""<style type="text/css">
.tg {border-collapse:collapse;border-spacing:0;border-color:#999;}
.tg td{font-family:Arial, sans-serif;font-size:14px;padding:2px 3px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#999;color:#444;background-color:#F7FDFA;}
.tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:2px 3px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#999;color:#fff;background-color:#26ADE4;}
.tg .tg-left{font-family:"Courier New", Courier, monospace !important;;text-align:left}
.tg .tg-right{font-family:"Courier New", Courier, monospace !important;;text-align:right}
</style>"""] + ['<table class="tg">'] + [header] + ["<tr><td class=tg-left>{i}</td><td class=tg-right>{x}</td><td class=tg-left>{c}</td><td class=tg-left>{p}</td><td class=tg-left>{t}</td></tr>".format(x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in itertools.izip(indices, vals, constr_matrix, ties, prirs)] + ["</table>"])
def __str__(self, constr_matrix=None, indices=None, prirs=None, ties=None, lc=None, lx=None, li=None, lp=None, lt=None, only_name=False): def __str__(self, constr_matrix=None, indices=None, prirs=None, ties=None, lc=None, lx=None, li=None, lp=None, lt=None, only_name=False):
filter_ = self._current_slice_ filter_ = self._current_slice_

View file

@ -377,7 +377,7 @@ class Parameterized(Parameterizable):
cl = max([len(str(x)) if x else 0 for x in constrs + ["Constraint"]]) cl = max([len(str(x)) if x else 0 for x in constrs + ["Constraint"]])
tl = max([len(str(x)) if x else 0 for x in ts + ["Tied to"]]) tl = max([len(str(x)) if x else 0 for x in ts + ["Tied to"]])
pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]]) pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]])
format_spec = "<tr><td>{{name:<{0}s}}</td><td align=\"right\">{{desc:>{1}s}}</td><td>{{const:^{2}s}}</td><td>{{pri:^{3}s}}</td><td>{{t:^{4}s}}</td></tr>".format(nl, sl, cl, pl, tl) format_spec = "<tr><td class=tg-left>{{name:<{0}s}}</td><td class=tg-right>{{desc:>{1}s}}</td><td class=tg-left>{{const:^{2}s}}</td><td class=tg-left>{{pri:^{3}s}}</td><td class=tg-left>{{t:^{4}s}}</td></tr>".format(nl, sl, cl, pl, tl)
to_print = [] to_print = []
for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs): for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs):
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p)) to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))
@ -385,13 +385,21 @@ class Parameterized(Parameterizable):
if header: if header:
header = """ header = """
<tr> <tr>
<td><b>{name}</b> <th><b>{name}</b></th>
<td><b>Value</b></td> <th><b>Value</b></th>
<td><b>Constraint</b></td> <th><b>Constraint</b></th>
<td><b>Prior</b></td> <th><b>Prior</b></th>
<td><b>Tied to</b></td>""".format(name=name) <th><b>Tied to</b></th>
</tr>""".format(name=name)
to_print.insert(0, header) to_print.insert(0, header)
return '<table>' + '\n'.format(sep).join(to_print) + '\n</table>' style = """<style type="text/css">
.tg {border-collapse:collapse;border-spacing:0;border-color:#999;}
.tg td{font-family:Arial, sans-serif;font-size:14px;padding:2px 3px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#999;color:#444;background-color:#F7FDFA;}
.tg th{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;padding:2px 3px;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#999;color:#fff;background-color:#26ADE4;}
.tg .tg-left{font-family:"Courier New", Courier, monospace !important;;text-align:left}
.tg .tg-right{font-family:"Courier New", Courier, monospace !important;;text-align:right}
</style>"""
return style + '\n' + '<table class="tg">' + '\n'.format(sep).join(to_print) + '\n</table>'
def __str__(self, header=True): def __str__(self, header=True):
name = adjust_name_for_printing(self.name) + "." name = adjust_name_for_printing(self.name) + "."

View file

@ -243,6 +243,41 @@ class NormalNaturalThroughTheta(NormalTheta):
dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices] dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#======================================================================= #=======================================================================
#=======================================================================
# This is by going through theta fully and then going into eta direction:
#dmu = dmuvar[self.mu_indices]
#dmuvar[self.var_indices] += dmu*mu*(var + 4/var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def __str__(self):
return "natgrad"
class NormalNaturalWhooot(NormalTheta):
_instances = []
def __new__(cls, mu_indices, var_indices):
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
#mu = muvar[self.mu_indices]
#var = muvar[self.var_indices]
#=======================================================================
# This is just eta direction:
#dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
#======================================================================= #=======================================================================
# This is by going through theta fully and then going into eta direction: # This is by going through theta fully and then going into eta direction:

View file

@ -15,14 +15,6 @@ class Updateable(Observable):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(Updateable, self).__init__(*args, **kwargs) super(Updateable, self).__init__(*args, **kwargs)
@property
def updates(self):
raise DeprecationWarning("updates is now a function, see update(True|False|None)")
@updates.setter
def updates(self, ups):
raise DeprecationWarning("updates is now a function, see update(True|False|None)")
def update_model(self, updates=None): def update_model(self, updates=None):
""" """
Get or set, whether automatic updates are performed. When updates are Get or set, whether automatic updates are performed. When updates are

View file

@ -121,12 +121,15 @@ class SparseGP(GP):
Kxx = kern.Kdiag(Xnew) Kxx = kern.Kdiag(Xnew)
var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T
else: else:
Kx = kern.psi1(self.Z, Xnew) Kx = kern.psi1(self.Z, Xnew).T
mu = np.dot(Kx, self.posterior.woodbury_vector) mu = np.dot(Kx.T, self.posterior.woodbury_vector)
if full_cov: if full_cov:
raise NotImplementedError, "TODO" Kxx = kern.K(Xnew.mean)
if self.posterior.woodbury_inv.ndim == 2:
var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
elif self.posterior.woodbury_inv.ndim == 3:
var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2)
else: else:
Kxx = kern.psi0(self.Z, Xnew) Kxx = kern.psi0(self.Z, Xnew)
psi2 = kern.psi2(self.Z, Xnew) var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T
var = Kxx - np.sum(np.sum(psi2 * Kmmi_LmiBLmi[None, :, :], 1), 1)
return mu, var return mu, var

View file

@ -79,8 +79,14 @@ class MLP(Kern):
+ 2*self.bias_variance + 2.))*base_cov_grad).sum() + 2*self.bias_variance + 2.))*base_cov_grad).sum()
def update_gradients_diag(self, X): def update_gradients_diag(self, X):
raise NotImplementedError, "TODO" self._K_diag_computations(X)
self.variance.gradient = np.sum(self._K_diag_dvar*dL_dKdiag)
base = four_over_tau*self.variance/np.sqrt(1-self._K_diag_asin_arg*self._K_diag_asin_arg)
base_cov_grad = base*dL_dKdiag/np.square(self._K_diag_denom)
self.weight_variance.gradient = (base_cov_grad*np.square(X).sum(axis=1)).sum()
self.bias_variance.gradient = base_cov_grad.sum()
def gradients_X(self, dL_dK, X, X2): def gradients_X(self, dL_dK, X, X2):
"""Derivative of the covariance matrix with respect to X""" """Derivative of the covariance matrix with respect to X"""

View file

@ -159,7 +159,7 @@ class Stationary(Kern):
#self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum(0).sum(0)/self.lengthscale**3 #self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum(0).sum(0)/self.lengthscale**3
tmp = dL_dr*self._inv_dist(X, X2) tmp = dL_dr*self._inv_dist(X, X2)
if X2 is None: X2 = X if X2 is None: X2 = X
if config.getboolean('weave', 'working'): if config.getboolean('weave', 'working'):
try: try:
@ -261,7 +261,7 @@ class Stationary(Kern):
ret(n,d) = retnd; ret(n,d) = retnd;
} }
} }
""" """
if hasattr(X, 'values'):X = X.values #remove the GPy wrapping to make passing into weave safe if hasattr(X, 'values'):X = X.values #remove the GPy wrapping to make passing into weave safe
if hasattr(X2, 'values'):X2 = X2.values if hasattr(X2, 'values'):X2 = X2.values
@ -278,12 +278,12 @@ class Stationary(Kern):
'extra_link_args' : ['-lgomp']} 'extra_link_args' : ['-lgomp']}
weave.inline(code, ['ret', 'N', 'D', 'M', 'tmp', 'X', 'X2'], type_converters=weave.converters.blitz, support_code=support_code, **weave_options) weave.inline(code, ['ret', 'N', 'D', 'M', 'tmp', 'X', 'X2'], type_converters=weave.converters.blitz, support_code=support_code, **weave_options)
return ret/self.lengthscale**2 return ret/self.lengthscale**2
def gradients_X_diag(self, dL_dKdiag, X): def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape) return np.zeros(X.shape)
def input_sensitivity(self, summarize=True): def input_sensitivity(self, summarize=True):
return np.ones(self.input_dim)/self.lengthscale**2 return self.variance*np.ones(self.input_dim)/self.lengthscale**2
class Exponential(Stationary): class Exponential(Stationary):
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Exponential'): def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Exponential'):

View file

@ -8,6 +8,7 @@ from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
import logging import logging
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
from GPy.core.parameterization.param import Param
class BayesianGPLVMMiniBatch(SparseGPMiniBatch): class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
""" """
@ -35,15 +36,20 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
self.init = init self.init = init
if X_variance is None:
self.logger.info("initializing latent space variance ~ uniform(0,.1)")
X_variance = np.random.uniform(0,.1,X.shape)
if Z is None: if Z is None:
self.logger.info("initializing inducing inputs") self.logger.info("initializing inducing inputs")
Z = np.random.permutation(X.copy())[:num_inducing] Z = np.random.permutation(X.copy())[:num_inducing]
assert Z.shape[1] == X.shape[1] assert Z.shape[1] == X.shape[1]
if X_variance == False:
self.logger.info('no variance on X, activating sparse GPLVM')
X = Param("latent space", X)
elif X_variance is None:
self.logger.info("initializing latent space variance ~ uniform(0,.1)")
X_variance = np.random.uniform(0,.1,X.shape)
self.variational_prior = NormalPrior()
X = NormalPosterior(X, X_variance)
if kernel is None: if kernel is None:
self.logger.info("initializing kernel RBF") self.logger.info("initializing kernel RBF")
kernel = kern.RBF(input_dim, lengthscale=1./fracs, ARD=True) #+ kern.Bias(input_dim) + kern.White(input_dim) kernel = kern.RBF(input_dim, lengthscale=1./fracs, ARD=True) #+ kern.Bias(input_dim) + kern.White(input_dim)
@ -51,9 +57,6 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
if likelihood is None: if likelihood is None:
likelihood = Gaussian() likelihood = Gaussian()
self.variational_prior = NormalPrior()
X = NormalPosterior(X, X_variance)
self.kl_factr = 1. self.kl_factr = 1.
if inference_method is None: if inference_method is None:
@ -83,36 +86,42 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None): def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None):
posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices) posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices)
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations( if self.has_uncertain_inputs():
variational_posterior=X, current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(
Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'], variational_posterior=X,
dL_dpsi1=grad_dict['dL_dpsi1'], Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'],
dL_dpsi2=grad_dict['dL_dpsi2']) dL_dpsi1=grad_dict['dL_dpsi1'],
dL_dpsi2=grad_dict['dL_dpsi2'])
else:
current_values['Xgrad'] = self.kern.gradients_X(grad_dict['dL_dKnm'], X, Z)
current_values['Xgrad'] += self.kern.gradients_X_diag(grad_dict['dL_dKdiag'], X)
if subset_indices is not None:
value_indices['Xgrad'] = subset_indices['samples']
kl_fctr = self.kl_factr kl_fctr = self.kl_factr
if self.missing_data: if self.has_uncertain_inputs():
d = self.output_dim if self.missing_data:
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)/d d = self.output_dim
else: log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)/d
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X) else:
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)
# Subsetting Variational Posterior objects, makes the gradients
# empty. We need them to be 0 though:
X.mean.gradient[:] = 0
X.variance.gradient[:] = 0
# Subsetting Variational Posterior objects, makes the gradients self.variational_prior.update_gradients_KL(X)
# empty. We need them to be 0 though: if self.missing_data:
X.mean.gradient[:] = 0 current_values['meangrad'] += kl_fctr*X.mean.gradient/d
X.variance.gradient[:] = 0 current_values['vargrad'] += kl_fctr*X.variance.gradient/d
else:
current_values['meangrad'] += kl_fctr*X.mean.gradient
current_values['vargrad'] += kl_fctr*X.variance.gradient
self.variational_prior.update_gradients_KL(X) if subset_indices is not None:
if self.missing_data: value_indices['meangrad'] = subset_indices['samples']
current_values['meangrad'] += kl_fctr*X.mean.gradient/d value_indices['vargrad'] = subset_indices['samples']
current_values['vargrad'] += kl_fctr*X.variance.gradient/d
else:
current_values['meangrad'] += kl_fctr*X.mean.gradient
current_values['vargrad'] += kl_fctr*X.variance.gradient
if subset_indices is not None:
value_indices['meangrad'] = subset_indices['samples']
value_indices['vargrad'] = subset_indices['samples']
return posterior, log_marginal_likelihood, grad_dict, current_values, value_indices return posterior, log_marginal_likelihood, grad_dict, current_values, value_indices
def _outer_values_update(self, full_values): def _outer_values_update(self, full_values):
@ -121,42 +130,24 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
E.g. set the gradients of parameters, etc. E.g. set the gradients of parameters, etc.
""" """
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values) super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
self.X.mean.gradient = full_values['meangrad'] if self.has_uncertain_inputs():
self.X.variance.gradient = full_values['vargrad'] self.X.mean.gradient = full_values['meangrad']
self.X.variance.gradient = full_values['vargrad']
else:
self.X.gradient = full_values['Xgrad']
def _outer_init_full_values(self): def _outer_init_full_values(self):
return dict(meangrad=np.zeros(self.X.mean.shape), if self.has_uncertain_inputs():
vargrad=np.zeros(self.X.variance.shape)) return dict(meangrad=np.zeros(self.X.mean.shape),
vargrad=np.zeros(self.X.variance.shape))
else:
return dict(Xgrad=np.zeros(self.X.shape))
def parameters_changed(self): def parameters_changed(self):
super(BayesianGPLVMMiniBatch,self).parameters_changed() super(BayesianGPLVMMiniBatch,self).parameters_changed()
if isinstance(self.inference_method, VarDTC_minibatch): if isinstance(self.inference_method, VarDTC_minibatch):
return return
#super(BayesianGPLVM, self).parameters_changed()
#self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
#self.X.mean.gradient, self.X.variance.gradient = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.grad_dict['dL_dpsi0'], dL_dpsi1=self.grad_dict['dL_dpsi1'], dL_dpsi2=self.grad_dict['dL_dpsi2'])
# This is testing code -------------------------
# i = np.random.randint(self.X.shape[0])
# X_ = self.X.mean
# which = np.sqrt(((X_ - X_[i:i+1])**2).sum(1)).argsort()>(max(0, self.X.shape[0]-51))
# _, _, grad_dict = self.inference_method.inference(self.kern, self.X[which], self.Z, self.likelihood, self.Y[which], self.Y_metadata)
# grad = self.kern.gradients_qX_expectations(variational_posterior=self.X[which], Z=self.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2'])
#
# self.X.mean.gradient[:] = 0
# self.X.variance.gradient[:] = 0
# self.X.mean.gradient[which] = grad[0]
# self.X.variance.gradient[which] = grad[1]
# update for the KL divergence
# self.variational_prior.update_gradients_KL(self.X, which)
# -----------------------------------------------
# update for the KL divergence
#self.variational_prior.update_gradients_KL(self.X)
def plot_latent(self, labels=None, which_indices=None, def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40, resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True, fignum=None, plot_inducing=True, legend=True,

View file

@ -111,9 +111,6 @@ class MRD(BayesianGPLVMMiniBatch):
assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!" assert all([isinstance(k, Kern) for k in kernel]), "invalid kernel object detected!"
kernels = kernel kernels = kernel
if X_variance is None:
X_variance = np.random.uniform(0.1, 0.2, X.shape)
self.variational_prior = NormalPrior() self.variational_prior = NormalPrior()
#self.X = NormalPosterior(X, X_variance) #self.X = NormalPosterior(X, X_variance)
@ -174,10 +171,13 @@ class MRD(BayesianGPLVMMiniBatch):
self.Z.gradient[:] += b.full_values['Zgrad'] self.Z.gradient[:] += b.full_values['Zgrad']
grad_dict = b.full_values grad_dict = b.full_values
self.X.mean.gradient += grad_dict['meangrad'] if self.has_uncertain_inputs():
self.X.variance.gradient += grad_dict['vargrad'] self.X.mean.gradient += grad_dict['meangrad']
self.X.variance.gradient += grad_dict['vargrad']
else:
self.X.gradient += grad_dict['Xgrad']
if isinstance(self.X, VariationalPosterior): if self.has_uncertain_inputs():
# update for the KL divergence # update for the KL divergence
self.variational_prior.update_gradients_KL(self.X) self.variational_prior.update_gradients_KL(self.X)
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X) self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)

View file

@ -5,7 +5,7 @@ import os
from setuptools import setup from setuptools import setup
# Version number # Version number
version = '0.6.0' version = '0.6.1'
def read(fname): def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read() return open(os.path.join(os.path.dirname(__file__), fname)).read()