mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 11:32:39 +02:00
Merge branch 'devel' of github.com:SheffieldML/GPy into devel
Conflicts: GPy/inference/latent_function_inference/inferenceX.py
This commit is contained in:
commit
118ed2733e
2 changed files with 23 additions and 27 deletions
|
|
@ -169,7 +169,7 @@ class SpikeAndSlabPosterior(VariationalPosterior):
|
|||
def gamma_probabilities(self):
|
||||
prob = np.zeros_like(param_to_array(self.gamma))
|
||||
prob[self.gamma>-710] = 1./(1.+np.exp(-self.gamma[self.gamma>-710]))
|
||||
prob1 = np.zeros_like(param_to_array(self.gamma))
|
||||
prob1 = -np.zeros_like(param_to_array(self.gamma))
|
||||
prob1[self.gamma<710] = 1./(1.+np.exp(self.gamma[self.gamma<710]))
|
||||
return prob, prob1
|
||||
|
||||
|
|
@ -177,8 +177,8 @@ class SpikeAndSlabPosterior(VariationalPosterior):
|
|||
def gamma_log_prob(self):
|
||||
loggamma = param_to_array(self.gamma).copy()
|
||||
loggamma[loggamma>-40] = -np.log1p(np.exp(-loggamma[loggamma>-40]))
|
||||
loggamma1 = param_to_array(self.gamma).copy()
|
||||
loggamma1[loggamma1<40] = -np.log1p(np.exp(loggamma1[loggamma1<40]))
|
||||
loggamma1 = -param_to_array(self.gamma).copy()
|
||||
loggamma1[loggamma1>-40] = -np.log1p(np.exp(-loggamma1[loggamma1>-40]))
|
||||
return loggamma,loggamma1
|
||||
|
||||
def set_gradients(self, grad):
|
||||
|
|
|
|||
|
|
@ -7,38 +7,37 @@ from ...core.parameterization import variational
|
|||
def infer_newX(model, Y_new, optimize=True, init='L2'):
|
||||
"""
|
||||
Infer the distribution of X for the new observed data *Y_new*.
|
||||
|
||||
|
||||
:param model: the GPy model used in inference
|
||||
:type model: GPy.core.Model
|
||||
:param Y_new: the new observed data for inference
|
||||
:type Y_new: numpy.ndarray
|
||||
:param optimize: whether to optimize the location of new X (True by default)
|
||||
:type optimize: boolean
|
||||
:return: a tuple containing the estimated posterior distribution of X and the model that optimize X
|
||||
:return: a tuple containing the estimated posterior distribution of X and the model that optimize X
|
||||
:rtype: (GPy.core.parameterization.variational.VariationalPosterior, GPy.core.Model)
|
||||
"""
|
||||
infr_m = InferenceX(model, Y_new, init=init)
|
||||
|
||||
|
||||
if optimize:
|
||||
infr_m.optimize()
|
||||
|
||||
|
||||
return infr_m.X, infr_m
|
||||
|
||||
class InferenceX(Model):
|
||||
"""
|
||||
The class for inference of new X with given new Y. (do_test_latent)
|
||||
|
||||
|
||||
:param model: the GPy model used in inference
|
||||
:type model: GPy.core.Model
|
||||
:param Y: the new observed data for inference
|
||||
:type Y: numpy.ndarray
|
||||
"""
|
||||
def __init__(self, model, Y, name='inferenceX', init='L2'):
|
||||
if np.isnan(Y).any() or getattr(model, 'missing_data', False):
|
||||
if np.isnan(Y).any():
|
||||
assert Y.shape[0]==1, "The current implementation of inference X only support one data point at a time with missing data!"
|
||||
self.missing_data = True
|
||||
self.valid_dim = np.logical_not(np.isnan(Y[0]))
|
||||
self.ninan = getattr(model, 'ninan', None)
|
||||
else:
|
||||
self.missing_data = False
|
||||
super(InferenceX, self).__init__(name)
|
||||
|
|
@ -67,12 +66,12 @@ class InferenceX(Model):
|
|||
self.Y = Y
|
||||
self.X = self._init_X(model, Y, init=init)
|
||||
self.compute_dL()
|
||||
|
||||
|
||||
self.link_parameter(self.X)
|
||||
|
||||
|
||||
def _init_X(self, model, Y_new, init='L2'):
|
||||
# Initialize the new X by finding the nearest point in Y space.
|
||||
|
||||
|
||||
Y = model.Y
|
||||
if self.missing_data:
|
||||
Y = Y[:,self.valid_dim]
|
||||
|
|
@ -86,7 +85,7 @@ class InferenceX(Model):
|
|||
elif init=='rand':
|
||||
dist = np.random.rand(Y_new.shape[0],Y.shape[0])
|
||||
idx = dist.argmin(axis=1)
|
||||
|
||||
|
||||
from ...models import SSGPLVM
|
||||
from ...util.misc import param_to_array
|
||||
if isinstance(model, SSGPLVM):
|
||||
|
|
@ -99,9 +98,9 @@ class InferenceX(Model):
|
|||
else:
|
||||
from ...core import Param
|
||||
X = Param('latent mean',param_to_array(model.X[idx]).copy())
|
||||
|
||||
|
||||
return X
|
||||
|
||||
|
||||
def compute_dL(self):
|
||||
# Common computation
|
||||
beta = 1./np.fmax(self.likelihood.variance, 1e-6)
|
||||
|
|
@ -109,18 +108,15 @@ class InferenceX(Model):
|
|||
wv = self.posterior.woodbury_vector
|
||||
if self.missing_data:
|
||||
wv = wv[:,self.valid_dim]
|
||||
if self.ninan is not None:
|
||||
self.dL_dpsi2 = beta/2.*(self.posterior.woodbury_inv[:, :, self.valid_dim] - np.einsum('md,od->mo',wv, wv)[:, :, None])
|
||||
self.dL_dpsi2 = self.dL_dpsi2.sum(-1)
|
||||
else:
|
||||
self.dL_dpsi2 = beta/2.*(self.valid_dim.sum() * self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))
|
||||
output_dim = self.valid_dim.sum()
|
||||
self.dL_dpsi2 = beta*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))/2.
|
||||
self.dL_dpsi1 = beta*np.dot(self.Y[:,self.valid_dim], wv.T)
|
||||
self.dL_dpsi0 = - beta/2.* np.ones(self.Y.shape[0])
|
||||
else:
|
||||
self.dL_dpsi2 = beta/2.*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))
|
||||
self.dL_dpsi2 = beta*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))/2.
|
||||
self.dL_dpsi1 = beta*np.dot(self.Y, wv.T)
|
||||
self.dL_dpsi0 = -beta/2.* np.ones(self.Y.shape[0])
|
||||
|
||||
self.dL_dpsi0 = -beta/2.*output_dim* np.ones(self.Y.shape[0])
|
||||
|
||||
def parameters_changed(self):
|
||||
if self.uncertain_input:
|
||||
psi0 = self.kern.psi0(self.Z, self.X)
|
||||
|
|
@ -132,7 +128,7 @@ class InferenceX(Model):
|
|||
psi2 = np.dot(psi1.T,psi1)
|
||||
|
||||
self._log_marginal_likelihood = (self.dL_dpsi2*psi2).sum()+(self.dL_dpsi1*psi1).sum()+(self.dL_dpsi0*psi0).sum()
|
||||
|
||||
|
||||
if self.uncertain_input:
|
||||
X_grad = self.kern.gradients_qX_expectations(variational_posterior=self.X, Z=self.Z, dL_dpsi0=self.dL_dpsi0, dL_dpsi1=self.dL_dpsi1, dL_dpsi2=self.dL_dpsi2)
|
||||
self.X.set_gradients(X_grad)
|
||||
|
|
@ -141,7 +137,7 @@ class InferenceX(Model):
|
|||
X_grad = self.kern.gradients_X_diag(self.dL_dpsi0, self.X)
|
||||
X_grad += self.kern.gradients_X(dL_dpsi1, self.X, self.Z)
|
||||
self.X.gradient = X_grad
|
||||
|
||||
|
||||
if self.uncertain_input:
|
||||
from ...core.parameterization.variational import SpikeAndSlabPrior
|
||||
if isinstance(self.variational_prior, SpikeAndSlabPrior):
|
||||
|
|
@ -155,7 +151,7 @@ class InferenceX(Model):
|
|||
# update for the KL divergence
|
||||
self.variational_prior.update_gradients_KL(self.X)
|
||||
self._log_marginal_likelihood += -KL_div
|
||||
|
||||
|
||||
def log_likelihood(self):
|
||||
return self._log_marginal_likelihood
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue