mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 03:52:39 +02:00
Merge branch 'devel' of github.com:SheffieldML/GPy into devel
Conflicts: GPy/inference/latent_function_inference/inferenceX.py
This commit is contained in:
commit
118ed2733e
2 changed files with 23 additions and 27 deletions
|
|
@ -169,7 +169,7 @@ class SpikeAndSlabPosterior(VariationalPosterior):
|
||||||
def gamma_probabilities(self):
|
def gamma_probabilities(self):
|
||||||
prob = np.zeros_like(param_to_array(self.gamma))
|
prob = np.zeros_like(param_to_array(self.gamma))
|
||||||
prob[self.gamma>-710] = 1./(1.+np.exp(-self.gamma[self.gamma>-710]))
|
prob[self.gamma>-710] = 1./(1.+np.exp(-self.gamma[self.gamma>-710]))
|
||||||
prob1 = np.zeros_like(param_to_array(self.gamma))
|
prob1 = -np.zeros_like(param_to_array(self.gamma))
|
||||||
prob1[self.gamma<710] = 1./(1.+np.exp(self.gamma[self.gamma<710]))
|
prob1[self.gamma<710] = 1./(1.+np.exp(self.gamma[self.gamma<710]))
|
||||||
return prob, prob1
|
return prob, prob1
|
||||||
|
|
||||||
|
|
@ -177,8 +177,8 @@ class SpikeAndSlabPosterior(VariationalPosterior):
|
||||||
def gamma_log_prob(self):
|
def gamma_log_prob(self):
|
||||||
loggamma = param_to_array(self.gamma).copy()
|
loggamma = param_to_array(self.gamma).copy()
|
||||||
loggamma[loggamma>-40] = -np.log1p(np.exp(-loggamma[loggamma>-40]))
|
loggamma[loggamma>-40] = -np.log1p(np.exp(-loggamma[loggamma>-40]))
|
||||||
loggamma1 = param_to_array(self.gamma).copy()
|
loggamma1 = -param_to_array(self.gamma).copy()
|
||||||
loggamma1[loggamma1<40] = -np.log1p(np.exp(loggamma1[loggamma1<40]))
|
loggamma1[loggamma1>-40] = -np.log1p(np.exp(-loggamma1[loggamma1>-40]))
|
||||||
return loggamma,loggamma1
|
return loggamma,loggamma1
|
||||||
|
|
||||||
def set_gradients(self, grad):
|
def set_gradients(self, grad):
|
||||||
|
|
|
||||||
|
|
@ -34,11 +34,10 @@ class InferenceX(Model):
|
||||||
:type Y: numpy.ndarray
|
:type Y: numpy.ndarray
|
||||||
"""
|
"""
|
||||||
def __init__(self, model, Y, name='inferenceX', init='L2'):
|
def __init__(self, model, Y, name='inferenceX', init='L2'):
|
||||||
if np.isnan(Y).any() or getattr(model, 'missing_data', False):
|
if np.isnan(Y).any():
|
||||||
assert Y.shape[0]==1, "The current implementation of inference X only support one data point at a time with missing data!"
|
assert Y.shape[0]==1, "The current implementation of inference X only support one data point at a time with missing data!"
|
||||||
self.missing_data = True
|
self.missing_data = True
|
||||||
self.valid_dim = np.logical_not(np.isnan(Y[0]))
|
self.valid_dim = np.logical_not(np.isnan(Y[0]))
|
||||||
self.ninan = getattr(model, 'ninan', None)
|
|
||||||
else:
|
else:
|
||||||
self.missing_data = False
|
self.missing_data = False
|
||||||
super(InferenceX, self).__init__(name)
|
super(InferenceX, self).__init__(name)
|
||||||
|
|
@ -109,17 +108,14 @@ class InferenceX(Model):
|
||||||
wv = self.posterior.woodbury_vector
|
wv = self.posterior.woodbury_vector
|
||||||
if self.missing_data:
|
if self.missing_data:
|
||||||
wv = wv[:,self.valid_dim]
|
wv = wv[:,self.valid_dim]
|
||||||
if self.ninan is not None:
|
output_dim = self.valid_dim.sum()
|
||||||
self.dL_dpsi2 = beta/2.*(self.posterior.woodbury_inv[:, :, self.valid_dim] - np.einsum('md,od->mo',wv, wv)[:, :, None])
|
self.dL_dpsi2 = beta*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))/2.
|
||||||
self.dL_dpsi2 = self.dL_dpsi2.sum(-1)
|
|
||||||
else:
|
|
||||||
self.dL_dpsi2 = beta/2.*(self.valid_dim.sum() * self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))
|
|
||||||
self.dL_dpsi1 = beta*np.dot(self.Y[:,self.valid_dim], wv.T)
|
self.dL_dpsi1 = beta*np.dot(self.Y[:,self.valid_dim], wv.T)
|
||||||
self.dL_dpsi0 = - beta/2.* np.ones(self.Y.shape[0])
|
self.dL_dpsi0 = - beta/2.* np.ones(self.Y.shape[0])
|
||||||
else:
|
else:
|
||||||
self.dL_dpsi2 = beta/2.*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))
|
self.dL_dpsi2 = beta*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))/2.
|
||||||
self.dL_dpsi1 = beta*np.dot(self.Y, wv.T)
|
self.dL_dpsi1 = beta*np.dot(self.Y, wv.T)
|
||||||
self.dL_dpsi0 = -beta/2.* np.ones(self.Y.shape[0])
|
self.dL_dpsi0 = -beta/2.*output_dim* np.ones(self.Y.shape[0])
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
if self.uncertain_input:
|
if self.uncertain_input:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue