mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 04:22:38 +02:00
[merge] for spgp minibatch and psi NxMxM
This commit is contained in:
commit
9ddec5bc70
11 changed files with 324 additions and 257 deletions
|
|
@ -64,9 +64,7 @@ class VarDTC(LatentFunctionInference):
|
||||||
def get_VVTfactor(self, Y, prec):
|
def get_VVTfactor(self, Y, prec):
|
||||||
return Y * prec # TODO chache this, and make it effective
|
return Y * prec # TODO chache this, and make it effective
|
||||||
|
|
||||||
|
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None):
|
||||||
|
|
||||||
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, Lm=None, dL_dKmm=None):
|
|
||||||
|
|
||||||
_, output_dim = Y.shape
|
_, output_dim = Y.shape
|
||||||
uncertain_inputs = isinstance(X, VariationalPosterior)
|
uncertain_inputs = isinstance(X, VariationalPosterior)
|
||||||
|
|
@ -95,16 +93,27 @@ class VarDTC(LatentFunctionInference):
|
||||||
|
|
||||||
# The rather complex computations of A, and the psi stats
|
# The rather complex computations of A, and the psi stats
|
||||||
if uncertain_inputs:
|
if uncertain_inputs:
|
||||||
|
if psi0 is None:
|
||||||
psi0 = kern.psi0(Z, X)
|
psi0 = kern.psi0(Z, X)
|
||||||
|
if psi1 is None:
|
||||||
psi1 = kern.psi1(Z, X)
|
psi1 = kern.psi1(Z, X)
|
||||||
if het_noise:
|
if het_noise:
|
||||||
|
if psi2 is None:
|
||||||
|
assert len(psi2.shape) == 3 # Need to have not summed out N
|
||||||
|
#FIXME: Need testing
|
||||||
|
psi2_beta = np.sum([psi2[X[i:i+1,:], :, :] * beta_i for i,beta_i in enumerate(beta)],0)
|
||||||
|
else:
|
||||||
psi2_beta = np.sum([kern.psi2(Z,X[i:i+1,:]) * beta_i for i,beta_i in enumerate(beta)],0)
|
psi2_beta = np.sum([kern.psi2(Z,X[i:i+1,:]) * beta_i for i,beta_i in enumerate(beta)],0)
|
||||||
else:
|
else:
|
||||||
psi2_beta = kern.psi2(Z,X) * beta
|
if psi2 is None:
|
||||||
|
psi2 = kern.psi2(Z,X)
|
||||||
|
psi2_beta = psi2 * beta
|
||||||
LmInv = dtrtri(Lm)
|
LmInv = dtrtri(Lm)
|
||||||
A = LmInv.dot(psi2_beta.dot(LmInv.T))
|
A = LmInv.dot(psi2_beta.dot(LmInv.T))
|
||||||
else:
|
else:
|
||||||
|
if psi0 is None:
|
||||||
psi0 = kern.Kdiag(X)
|
psi0 = kern.Kdiag(X)
|
||||||
|
if psi1 is None:
|
||||||
psi1 = kern.K(X, Z)
|
psi1 = kern.K(X, Z)
|
||||||
if het_noise:
|
if het_noise:
|
||||||
tmp = psi1 * (np.sqrt(beta))
|
tmp = psi1 * (np.sqrt(beta))
|
||||||
|
|
|
||||||
|
|
@ -38,16 +38,17 @@ class SparseGPMissing(StochasticStorage):
|
||||||
import numpy as np
|
import numpy as np
|
||||||
self.Y = model.Y_normalized
|
self.Y = model.Y_normalized
|
||||||
bdict = {}
|
bdict = {}
|
||||||
|
#For N > 1000 array2string default crops
|
||||||
|
opt = np.get_printoptions()
|
||||||
|
np.set_printoptions(threshold='nan')
|
||||||
for d in range(self.Y.shape[1]):
|
for d in range(self.Y.shape[1]):
|
||||||
inan = np.isnan(self.Y[:, d])
|
inan = np.isnan(self.Y)[:, d]
|
||||||
arr_str = np.array2string(inan,
|
arr_str = np.array2string(inan, np.inf, 0, True, '', formatter={'bool':lambda x: '1' if x else '0'})
|
||||||
np.inf, 0,
|
|
||||||
True, '',
|
|
||||||
formatter={'bool':lambda x: '1' if x else '0'})
|
|
||||||
try:
|
try:
|
||||||
bdict[arr_str][0].append(d)
|
bdict[arr_str][0].append(d)
|
||||||
except:
|
except:
|
||||||
bdict[arr_str] = [[d], ~inan]
|
bdict[arr_str] = [[d], ~inan]
|
||||||
|
np.set_printoptions(**opt)
|
||||||
self.d = bdict.values()
|
self.d = bdict.values()
|
||||||
|
|
||||||
class SparseGPStochastics(StochasticStorage):
|
class SparseGPStochastics(StochasticStorage):
|
||||||
|
|
@ -55,32 +56,36 @@ class SparseGPStochastics(StochasticStorage):
|
||||||
For the sparse gp we need to store the dimension we are in,
|
For the sparse gp we need to store the dimension we are in,
|
||||||
and the indices corresponding to those
|
and the indices corresponding to those
|
||||||
"""
|
"""
|
||||||
def __init__(self, model, batchsize=1):
|
def __init__(self, model, batchsize=1, missing_data=True):
|
||||||
self.batchsize = batchsize
|
self.batchsize = batchsize
|
||||||
self.output_dim = model.Y.shape[1]
|
self.output_dim = model.Y.shape[1]
|
||||||
self.Y = model.Y_normalized
|
self.Y = model.Y_normalized
|
||||||
|
self.missing_data = missing_data
|
||||||
self.reset()
|
self.reset()
|
||||||
self.do_stochastics()
|
self.do_stochastics()
|
||||||
|
|
||||||
def do_stochastics(self):
|
def do_stochastics(self):
|
||||||
|
import numpy as np
|
||||||
if self.batchsize == 1:
|
if self.batchsize == 1:
|
||||||
self.current_dim = (self.current_dim+1)%self.output_dim
|
self.current_dim = (self.current_dim+1)%self.output_dim
|
||||||
self.d = [[[self.current_dim], np.isnan(self.Y[:, self.d])]]
|
self.d = [[[self.current_dim], np.isnan(self.Y[:, self.current_dim]) if self.missing_data else None]]
|
||||||
else:
|
else:
|
||||||
import numpy as np
|
|
||||||
self.d = np.random.choice(self.output_dim, size=self.batchsize, replace=False)
|
self.d = np.random.choice(self.output_dim, size=self.batchsize, replace=False)
|
||||||
bdict = {}
|
bdict = {}
|
||||||
|
if self.missing_data:
|
||||||
|
opt = np.get_printoptions()
|
||||||
|
np.set_printoptions(threshold='nan')
|
||||||
for d in self.d:
|
for d in self.d:
|
||||||
inan = np.isnan(self.Y[:, d])
|
inan = np.isnan(self.Y[:, d])
|
||||||
arr_str = int(np.array2string(inan,
|
arr_str = np.array2string(inan,np.inf, 0,True, '',formatter={'bool':lambda x: '1' if x else '0'})
|
||||||
np.inf, 0,
|
|
||||||
True, '',
|
|
||||||
formatter={'bool':lambda x: '1' if x else '0'}), 2)
|
|
||||||
try:
|
try:
|
||||||
bdict[arr_str][0].append(d)
|
bdict[arr_str][0].append(d)
|
||||||
except:
|
except:
|
||||||
bdict[arr_str] = [[d], ~inan]
|
bdict[arr_str] = [[d], ~inan]
|
||||||
|
np.set_printoptions(**opt)
|
||||||
self.d = bdict.values()
|
self.d = bdict.values()
|
||||||
|
else:
|
||||||
|
self.d = [[self.d, None]]
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
self.current_dim = -1
|
self.current_dim = -1
|
||||||
|
|
|
||||||
|
|
@ -58,24 +58,10 @@ class Kern(Parameterized):
|
||||||
|
|
||||||
self._sliced_X = 0
|
self._sliced_X = 0
|
||||||
self.useGPU = self._support_GPU and useGPU
|
self.useGPU = self._support_GPU and useGPU
|
||||||
self._return_psi2_n_flag = ObsAr(np.zeros(1)).astype(bool)
|
|
||||||
|
|
||||||
from .psi_comp import PSICOMP_GH
|
from .psi_comp import PSICOMP_GH
|
||||||
self.psicomp = PSICOMP_GH()
|
self.psicomp = PSICOMP_GH()
|
||||||
|
|
||||||
@property
|
|
||||||
def return_psi2_n(self):
|
|
||||||
"""
|
|
||||||
Flag whether to pass back psi2 as NxMxM or MxM, by summing out N.
|
|
||||||
"""
|
|
||||||
return self._return_psi2_n_flag[0]
|
|
||||||
@return_psi2_n.setter
|
|
||||||
def return_psi2_n(self, val):
|
|
||||||
def visit(self):
|
|
||||||
if isinstance(self, Kern):
|
|
||||||
self._return_psi2_n_flag[0]=val
|
|
||||||
self.traverse(visit)
|
|
||||||
|
|
||||||
@Cache_this(limit=20)
|
@Cache_this(limit=20)
|
||||||
def _slice_X(self, X):
|
def _slice_X(self, X):
|
||||||
return X[:, self.active_dims]
|
return X[:, self.active_dims]
|
||||||
|
|
@ -97,7 +83,9 @@ class Kern(Parameterized):
|
||||||
def psi1(self, Z, variational_posterior):
|
def psi1(self, Z, variational_posterior):
|
||||||
return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
|
return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
|
||||||
def psi2(self, Z, variational_posterior):
|
def psi2(self, Z, variational_posterior):
|
||||||
return self.psicomp.psicomputations(self, Z, variational_posterior)[2]
|
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=False)[2]
|
||||||
|
def psi2n(self, Z, variational_posterior):
|
||||||
|
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
|
||||||
def gradients_X(self, dL_dK, X, X2):
|
def gradients_X(self, dL_dK, X, X2):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
def gradients_XX(self, dL_dK, X, X2):
|
def gradients_XX(self, dL_dK, X, X2):
|
||||||
|
|
@ -115,7 +103,8 @@ class Kern(Parameterized):
|
||||||
"""Set the gradients of all parameters when doing full (N) inference."""
|
"""Set the gradients of all parameters when doing full (N) inference."""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=None, psi1=None, psi2=None):
|
||||||
"""
|
"""
|
||||||
Set the gradients of all parameters when doing inference with
|
Set the gradients of all parameters when doing inference with
|
||||||
uncertain inputs, using expectations of the kernel.
|
uncertain inputs, using expectations of the kernel.
|
||||||
|
|
@ -126,22 +115,27 @@ class Kern(Parameterized):
|
||||||
dL_dpsi1 * dpsi1_d{theta_i} +
|
dL_dpsi1 * dpsi1_d{theta_i} +
|
||||||
dL_dpsi2 * dpsi2_d{theta_i}
|
dL_dpsi2 * dpsi2_d{theta_i}
|
||||||
"""
|
"""
|
||||||
dtheta = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[0]
|
dtheta = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=psi0, psi1=psi1, psi2=psi2)[0]
|
||||||
self.gradient[:] = dtheta
|
self.gradient[:] = dtheta
|
||||||
|
|
||||||
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=None, psi1=None, psi2=None):
|
||||||
"""
|
"""
|
||||||
Returns the derivative of the objective wrt Z, using the chain rule
|
Returns the derivative of the objective wrt Z, using the chain rule
|
||||||
through the expectation variables.
|
through the expectation variables.
|
||||||
"""
|
"""
|
||||||
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[1]
|
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=psi0, psi1=psi1, psi2=psi2)[1]
|
||||||
|
|
||||||
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
|
||||||
"""
|
"""
|
||||||
Compute the gradients wrt the parameters of the variational
|
Compute the gradients wrt the parameters of the variational
|
||||||
distruibution q(X), chain-ruling via the expectations of the kernel
|
distruibution q(X), chain-ruling via the expectations of the kernel
|
||||||
"""
|
"""
|
||||||
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2:]
|
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=psi0, psi1=psi1, psi2=psi2)[2:]
|
||||||
|
|
||||||
def plot(self, x=None, fignum=None, ax=None, title=None, plot_limits=None, resolution=None, **mpl_kwargs):
|
def plot(self, x=None, fignum=None, ax=None, title=None, plot_limits=None, resolution=None, **mpl_kwargs):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -137,25 +137,31 @@ def _slice_psi(f):
|
||||||
|
|
||||||
def _slice_update_gradients_expectations(f):
|
def _slice_update_gradients_expectations(f):
|
||||||
@wraps(f)
|
@wraps(f)
|
||||||
def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
|
||||||
with _Slice_wrap(self, Z, variational_posterior) as s:
|
with _Slice_wrap(self, Z, variational_posterior) as s:
|
||||||
ret = f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X, s.X2)
|
ret = f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X, s.X2,
|
||||||
|
psi0=psi0, psi1=psi1, psi2=psi2, Lpsi0=Lpsi0, Lpsi1=Lpsi1, Lpsi2=Lpsi2)
|
||||||
return ret
|
return ret
|
||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
def _slice_gradients_Z_expectations(f):
|
def _slice_gradients_Z_expectations(f):
|
||||||
@wraps(f)
|
@wraps(f)
|
||||||
def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
|
||||||
with _Slice_wrap(self, Z, variational_posterior) as s:
|
with _Slice_wrap(self, Z, variational_posterior) as s:
|
||||||
ret = s.handle_return_array(f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X, s.X2))
|
ret = s.handle_return_array(f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X, s.X2,
|
||||||
|
psi0=psi0, psi1=psi1, psi2=psi2, Lpsi0=Lpsi0, Lpsi1=Lpsi1, Lpsi2=Lpsi2))
|
||||||
return ret
|
return ret
|
||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
def _slice_gradients_qX_expectations(f):
|
def _slice_gradients_qX_expectations(f):
|
||||||
@wraps(f)
|
@wraps(f)
|
||||||
def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
|
||||||
with _Slice_wrap(self, variational_posterior, Z) as s:
|
with _Slice_wrap(self, variational_posterior, Z) as s:
|
||||||
ret = list(f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X2, s.X))
|
ret = list(f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X2, s.X,
|
||||||
|
psi0=psi0, psi1=psi1, psi2=psi2, Lpsi0=Lpsi0, Lpsi1=Lpsi1, Lpsi2=Lpsi2))
|
||||||
r2 = ret[:2]
|
r2 = ret[:2]
|
||||||
ret[0] = s.handle_return_array(r2[0])
|
ret[0] = s.handle_return_array(r2[0])
|
||||||
ret[1] = s.handle_return_array(r2[1])
|
ret[1] = s.handle_return_array(r2[1])
|
||||||
|
|
|
||||||
|
|
@ -12,18 +12,22 @@ from .gaussherm import PSICOMP_GH
|
||||||
|
|
||||||
class PSICOMP_RBF(Pickleable):
|
class PSICOMP_RBF(Pickleable):
|
||||||
@Cache_this(limit=10, ignore_args=(0,))
|
@Cache_this(limit=10, ignore_args=(0,))
|
||||||
def psicomputations(self, variance, lengthscale, Z, variational_posterior):
|
def psicomputations(self, variance, lengthscale, Z, variational_posterior, return_psi2_n=False):
|
||||||
if isinstance(variational_posterior, variational.NormalPosterior):
|
if isinstance(variational_posterior, variational.NormalPosterior):
|
||||||
return rbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior)
|
return rbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior, return_psi2_n=return_psi2_n)
|
||||||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||||
|
if return_psi2_n:
|
||||||
|
raise NotImplementedError('However this function seems to return it by default')
|
||||||
return ssrbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior)
|
return ssrbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior)
|
||||||
else:
|
else:
|
||||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||||
|
|
||||||
@Cache_this(limit=10, ignore_args=(0,1,2,3))
|
@Cache_this(limit=10, ignore_args=(0,1,2,3))
|
||||||
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
|
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior,
|
||||||
|
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
|
||||||
if isinstance(variational_posterior, variational.NormalPosterior):
|
if isinstance(variational_posterior, variational.NormalPosterior):
|
||||||
return rbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior)
|
return rbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior,
|
||||||
|
psi0=psi0, psi1=psi1, psi2=psi2, Lpsi0=Lpsi0, Lpsi1=Lpsi1, Lpsi2=Lpsi2)
|
||||||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||||
return ssrbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior)
|
return ssrbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior)
|
||||||
else:
|
else:
|
||||||
|
|
@ -35,7 +39,9 @@ class PSICOMP_RBF(Pickleable):
|
||||||
class PSICOMP_Linear(Pickleable):
|
class PSICOMP_Linear(Pickleable):
|
||||||
|
|
||||||
@Cache_this(limit=10, ignore_args=(0,))
|
@Cache_this(limit=10, ignore_args=(0,))
|
||||||
def psicomputations(self, variance, Z, variational_posterior):
|
def psicomputations(self, variance, Z, variational_posterior, return_psi2_n=False):
|
||||||
|
if return_psi2_n:
|
||||||
|
raise NotImplementedError
|
||||||
if isinstance(variational_posterior, variational.NormalPosterior):
|
if isinstance(variational_posterior, variational.NormalPosterior):
|
||||||
return linear_psi_comp.psicomputations(variance, Z, variational_posterior)
|
return linear_psi_comp.psicomputations(variance, Z, variational_posterior)
|
||||||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||||
|
|
@ -44,8 +50,9 @@ class PSICOMP_Linear(Pickleable):
|
||||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||||
|
|
||||||
@Cache_this(limit=10, ignore_args=(0,1,2,3))
|
@Cache_this(limit=10, ignore_args=(0,1,2,3))
|
||||||
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior):
|
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior, psi0=None, psi1=None, psi2=None):
|
||||||
if isinstance(variational_posterior, variational.NormalPosterior):
|
if isinstance(variational_posterior, variational.NormalPosterior):
|
||||||
|
#Should pass psi in
|
||||||
return linear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior)
|
return linear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior)
|
||||||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||||
return sslinear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior)
|
return sslinear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior)
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ The package for the Psi statistics computation of the linear kernel for Bayesian
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from ....util.linalg import tdot
|
from ....util.linalg import tdot
|
||||||
|
|
||||||
def psicomputations(variance, Z, variational_posterior):
|
def psicomputations(variance, Z, variational_posterior, return_psi2_n=False):
|
||||||
"""
|
"""
|
||||||
Compute psi-statistics for ss-linear kernel
|
Compute psi-statistics for ss-linear kernel
|
||||||
"""
|
"""
|
||||||
|
|
@ -22,7 +22,10 @@ def psicomputations(variance, Z, variational_posterior):
|
||||||
|
|
||||||
psi0 = (variance*(np.square(mu)+S)).sum(axis=1)
|
psi0 = (variance*(np.square(mu)+S)).sum(axis=1)
|
||||||
psi1 = np.dot(mu,(variance*Z).T)
|
psi1 = np.dot(mu,(variance*Z).T)
|
||||||
|
if return_psi2_n:
|
||||||
psi2 = np.dot(S.sum(axis=0)*np.square(variance)*Z,Z.T)+ tdot(psi1.T)
|
psi2 = np.dot(S.sum(axis=0)*np.square(variance)*Z,Z.T)+ tdot(psi1.T)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
return psi0, psi1, psi2
|
return psi0, psi1, psi2
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ The module for psi-statistics for RBF kernel
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from GPy.util.caching import Cacher
|
from GPy.util.caching import Cacher
|
||||||
|
|
||||||
def psicomputations(variance, lengthscale, Z, variational_posterior):
|
def psicomputations(variance, lengthscale, Z, variational_posterior, return_psi2_n=False):
|
||||||
"""
|
"""
|
||||||
Z - MxQ
|
Z - MxQ
|
||||||
mu - NxQ
|
mu - NxQ
|
||||||
|
|
@ -21,7 +21,9 @@ def psicomputations(variance, lengthscale, Z, variational_posterior):
|
||||||
psi0 = np.empty(mu.shape[0])
|
psi0 = np.empty(mu.shape[0])
|
||||||
psi0[:] = variance
|
psi0[:] = variance
|
||||||
psi1 = _psi1computations(variance, lengthscale, Z, mu, S)
|
psi1 = _psi1computations(variance, lengthscale, Z, mu, S)
|
||||||
psi2 = _psi2computations(variance, lengthscale, Z, mu, S).sum(axis=0)
|
psi2 = _psi2computations(variance, lengthscale, Z, mu, S)
|
||||||
|
if not return_psi2_n:
|
||||||
|
psi2 = psi2.sum(axis=0)
|
||||||
return psi0, psi1, psi2
|
return psi0, psi1, psi2
|
||||||
|
|
||||||
def __psi1computations(variance, lengthscale, Z, mu, S):
|
def __psi1computations(variance, lengthscale, Z, mu, S):
|
||||||
|
|
@ -66,11 +68,12 @@ def __psi2computations(variance, lengthscale, Z, mu, S):
|
||||||
_psi2 = variance*variance*np.exp(_psi2_logdenom[:,None,None]+_psi2_exp1[None,:,:]+_psi2_exp2)
|
_psi2 = variance*variance*np.exp(_psi2_logdenom[:,None,None]+_psi2_exp1[None,:,:]+_psi2_exp2)
|
||||||
return _psi2
|
return _psi2
|
||||||
|
|
||||||
def psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
|
def psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior,
|
||||||
|
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
|
||||||
ARD = (len(lengthscale)!=1)
|
ARD = (len(lengthscale)!=1)
|
||||||
|
|
||||||
dvar_psi1, dl_psi1, dZ_psi1, dmu_psi1, dS_psi1 = _psi1compDer(dL_dpsi1, variance, lengthscale, Z, variational_posterior.mean, variational_posterior.variance)
|
dvar_psi1, dl_psi1, dZ_psi1, dmu_psi1, dS_psi1 = _psi1compDer(dL_dpsi1, variance, lengthscale, Z, variational_posterior.mean, variational_posterior.variance, psi1=psi1, Lpsi1=Lpsi1)
|
||||||
dvar_psi2, dl_psi2, dZ_psi2, dmu_psi2, dS_psi2 = _psi2compDer(dL_dpsi2, variance, lengthscale, Z, variational_posterior.mean, variational_posterior.variance)
|
dvar_psi2, dl_psi2, dZ_psi2, dmu_psi2, dS_psi2 = _psi2compDer(dL_dpsi2, variance, lengthscale, Z, variational_posterior.mean, variational_posterior.variance, psi2=psi2, Lpsi2=Lpsi2)
|
||||||
|
|
||||||
dL_dvar = np.sum(dL_dpsi0) + dvar_psi1 + dvar_psi2
|
dL_dvar = np.sum(dL_dpsi0) + dvar_psi1 + dvar_psi2
|
||||||
|
|
||||||
|
|
@ -84,7 +87,7 @@ def psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscal
|
||||||
|
|
||||||
return dL_dvar, dL_dlengscale, dL_dZ, dL_dmu, dL_dS
|
return dL_dvar, dL_dlengscale, dL_dZ, dL_dmu, dL_dS
|
||||||
|
|
||||||
def _psi1compDer(dL_dpsi1, variance, lengthscale, Z, mu, S):
|
def __psi1compDer(dL_dpsi1, variance, lengthscale, Z, mu, S, psi1=None, Lpsi1=None):
|
||||||
"""
|
"""
|
||||||
dL_dpsi1 - NxM
|
dL_dpsi1 - NxM
|
||||||
Z - MxQ
|
Z - MxQ
|
||||||
|
|
@ -103,8 +106,10 @@ def _psi1compDer(dL_dpsi1, variance, lengthscale, Z, mu, S):
|
||||||
|
|
||||||
lengthscale2 = np.square(lengthscale)
|
lengthscale2 = np.square(lengthscale)
|
||||||
|
|
||||||
_psi1 = _psi1computations(variance, lengthscale, Z, mu, S)
|
if psi1 is None:
|
||||||
Lpsi1 = dL_dpsi1*_psi1
|
psi1 = _psi1computations(variance, lengthscale, Z, mu, S)
|
||||||
|
if Lpsi1 is None:
|
||||||
|
Lpsi1 = dL_dpsi1*psi1
|
||||||
Zmu = Z[None,:,:]-mu[:,None,:] # NxMxQ
|
Zmu = Z[None,:,:]-mu[:,None,:] # NxMxQ
|
||||||
denom = 1./(S+lengthscale2)
|
denom = 1./(S+lengthscale2)
|
||||||
Zmu2_denom = np.square(Zmu)*denom[:,None,:] #NxMxQ
|
Zmu2_denom = np.square(Zmu)*denom[:,None,:] #NxMxQ
|
||||||
|
|
@ -116,7 +121,7 @@ def _psi1compDer(dL_dpsi1, variance, lengthscale, Z, mu, S):
|
||||||
|
|
||||||
return _dL_dvar, _dL_dl, _dL_dZ, _dL_dmu, _dL_dS
|
return _dL_dvar, _dL_dl, _dL_dZ, _dL_dmu, _dL_dS
|
||||||
|
|
||||||
def _psi2compDer(dL_dpsi2, variance, lengthscale, Z, mu, S):
|
def __psi2compDer(dL_dpsi2, variance, lengthscale, Z, mu, S, psi2=None, Lpsi2=None):
|
||||||
"""
|
"""
|
||||||
Z - MxQ
|
Z - MxQ
|
||||||
mu - NxQ
|
mu - NxQ
|
||||||
|
|
@ -137,8 +142,10 @@ def _psi2compDer(dL_dpsi2, variance, lengthscale, Z, mu, S):
|
||||||
denom = 1./(2*S+lengthscale2)
|
denom = 1./(2*S+lengthscale2)
|
||||||
denom2 = np.square(denom)
|
denom2 = np.square(denom)
|
||||||
|
|
||||||
_psi2 = _psi2computations(variance, lengthscale, Z, mu, S) # NxMxM
|
if psi2 is None:
|
||||||
Lpsi2 = dL_dpsi2*_psi2 # dL_dpsi2 is MxM, using broadcast to multiply N out
|
psi2 = _psi2computations(variance, lengthscale, Z, mu, S) # NxMxM
|
||||||
|
if Lpsi2 is None:
|
||||||
|
Lpsi2 = dL_dpsi2*psi2 # dL_dpsi2 is MxM, using broadcast to multiply N out
|
||||||
Lpsi2sum = np.einsum('nmo->n',Lpsi2) #N
|
Lpsi2sum = np.einsum('nmo->n',Lpsi2) #N
|
||||||
Lpsi2Z = np.einsum('nmo,oq->nq',Lpsi2,Z) #NxQ
|
Lpsi2Z = np.einsum('nmo,oq->nq',Lpsi2,Z) #NxQ
|
||||||
Lpsi2Z2 = np.einsum('nmo,oq,oq->nq',Lpsi2,Z,Z) #NxQ
|
Lpsi2Z2 = np.einsum('nmo,oq,oq->nq',Lpsi2,Z,Z) #NxQ
|
||||||
|
|
@ -149,8 +156,14 @@ def _psi2compDer(dL_dpsi2, variance, lengthscale, Z, mu, S):
|
||||||
_dL_dvar = Lpsi2sum.sum()*2/variance
|
_dL_dvar = Lpsi2sum.sum()*2/variance
|
||||||
_dL_dmu = (-2*denom) * (mu*Lpsi2sum[:,None]-Lpsi2Zhat)
|
_dL_dmu = (-2*denom) * (mu*Lpsi2sum[:,None]-Lpsi2Zhat)
|
||||||
_dL_dS = (2*np.square(denom))*(np.square(mu)*Lpsi2sum[:,None]-2*mu*Lpsi2Zhat+Lpsi2Zhat2) - denom*Lpsi2sum[:,None]
|
_dL_dS = (2*np.square(denom))*(np.square(mu)*Lpsi2sum[:,None]-2*mu*Lpsi2Zhat+Lpsi2Zhat2) - denom*Lpsi2sum[:,None]
|
||||||
_dL_dZ = -np.einsum('nmo,oq->oq',Lpsi2,Z)/lengthscale2+np.einsum('nmo,oq->mq',Lpsi2,Z)/lengthscale2+ \
|
_dL_dZ1 = -np.einsum('nmo,oq->oq',Lpsi2,Z)/lengthscale2
|
||||||
2*np.einsum('nmo,nq,nq->mq',Lpsi2,mu,denom) - np.einsum('nmo,nq,mq->mq',Lpsi2,denom,Z) - np.einsum('nmo,oq,nq->mq',Lpsi2,Z,denom)
|
_dL_dZ2 = np.einsum('nmo,oq->mq',Lpsi2,Z)/lengthscale2
|
||||||
|
_dL_dZ3 = 2*np.einsum('nmo,nq,nq->mq',Lpsi2,mu,denom)
|
||||||
|
_dL_dZ4 = - np.einsum('nmo,nq,mq->mq',Lpsi2,denom,Z)
|
||||||
|
_dL_dZ5 = - np.einsum('nmo,oq,nq->mq',Lpsi2,Z,denom)
|
||||||
|
_dL_dZ = _dL_dZ1 + _dL_dZ2 + _dL_dZ3 + _dL_dZ4 + _dL_dZ5
|
||||||
|
#_dL_dZ = -np.einsum('nmo,oq->oq',Lpsi2,Z)/lengthscale2+np.einsum('nmo,oq->mq',Lpsi2,Z)/lengthscale2+ \
|
||||||
|
#2*np.einsum('nmo,nq,nq->mq',Lpsi2,mu,denom) - np.einsum('nmo,nq,mq->mq',Lpsi2,denom,Z) - np.einsum('nmo,oq,nq->mq',Lpsi2,Z,denom)
|
||||||
_dL_dl = 2*lengthscale* ((S/lengthscale2*denom+np.square(mu*denom))*Lpsi2sum[:,None]+(Lpsi2Z2-Lpsi2Z2p)/(2*np.square(lengthscale2))-
|
_dL_dl = 2*lengthscale* ((S/lengthscale2*denom+np.square(mu*denom))*Lpsi2sum[:,None]+(Lpsi2Z2-Lpsi2Z2p)/(2*np.square(lengthscale2))-
|
||||||
(2*mu*denom2)*Lpsi2Zhat+denom2*Lpsi2Zhat2).sum(axis=0)
|
(2*mu*denom2)*Lpsi2Zhat+denom2*Lpsi2Zhat2).sum(axis=0)
|
||||||
|
|
||||||
|
|
@ -158,3 +171,5 @@ def _psi2compDer(dL_dpsi2, variance, lengthscale, Z, mu, S):
|
||||||
|
|
||||||
_psi1computations = Cacher(__psi1computations, limit=5)
|
_psi1computations = Cacher(__psi1computations, limit=5)
|
||||||
_psi2computations = Cacher(__psi2computations, limit=5)
|
_psi2computations = Cacher(__psi2computations, limit=5)
|
||||||
|
_psi1compDer = Cacher(__psi1compDer, limit=5)
|
||||||
|
_psi2compDer = Cacher(__psi2compDer, limit=5)
|
||||||
|
|
|
||||||
|
|
@ -59,16 +59,22 @@ class RBF(Stationary):
|
||||||
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior)[1]
|
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior)[1]
|
||||||
|
|
||||||
def psi2(self, Z, variational_posterior):
|
def psi2(self, Z, variational_posterior):
|
||||||
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior)[2]
|
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior, return_psi2_n=False)[2]
|
||||||
|
|
||||||
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def psi2n(self, Z, variational_posterior):
|
||||||
dL_dvar, dL_dlengscale = self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior)[:2]
|
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior, return_psi2_n=True)[2]
|
||||||
|
|
||||||
|
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
|
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
|
||||||
|
dL_dvar, dL_dlengscale = self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior, psi0=psi0, psi1=psi1, psi2=psi2, Lpsi0=Lpsi0, Lpsi1=Lpsi1, Lpsi2=Lpsi2)[:2]
|
||||||
self.variance.gradient = dL_dvar
|
self.variance.gradient = dL_dvar
|
||||||
self.lengthscale.gradient = dL_dlengscale
|
self.lengthscale.gradient = dL_dlengscale
|
||||||
|
|
||||||
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior)[2]
|
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
|
||||||
|
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior, psi0=psi0, psi1=psi1, psi2=psi2, Lpsi0=Lpsi0, Lpsi1=Lpsi1, Lpsi2=Lpsi2)[2]
|
||||||
|
|
||||||
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
|
||||||
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior)[3:]
|
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
|
||||||
|
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior, psi0=psi0, psi1=psi1, psi2=psi2, Lpsi0=Lpsi0, Lpsi1=Lpsi1, Lpsi2=Lpsi2)[3:]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_miniba
|
||||||
import logging
|
import logging
|
||||||
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
|
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
|
||||||
from GPy.core.parameterization.param import Param
|
from GPy.core.parameterization.param import Param
|
||||||
|
from GPy.core.parameterization.observable_array import ObsAr
|
||||||
|
|
||||||
class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
||||||
"""
|
"""
|
||||||
|
|
@ -80,46 +81,10 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
||||||
"""Get the gradients of the posterior distribution of X in its specific form."""
|
"""Get the gradients of the posterior distribution of X in its specific form."""
|
||||||
return X.mean.gradient, X.variance.gradient
|
return X.mean.gradient, X.variance.gradient
|
||||||
|
|
||||||
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, **kw):
|
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None, **kw):
|
||||||
posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices, **kw)
|
posterior, log_marginal_likelihood, grad_dict = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm,
|
||||||
|
psi0=psi0, psi1=psi1, psi2=psi2, **kw)
|
||||||
if self.has_uncertain_inputs():
|
return posterior, log_marginal_likelihood, grad_dict
|
||||||
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(
|
|
||||||
variational_posterior=X,
|
|
||||||
Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'],
|
|
||||||
dL_dpsi1=grad_dict['dL_dpsi1'],
|
|
||||||
dL_dpsi2=grad_dict['dL_dpsi2'])
|
|
||||||
else:
|
|
||||||
current_values['Xgrad'] = self.kern.gradients_X(grad_dict['dL_dKnm'], X, Z)
|
|
||||||
current_values['Xgrad'] += self.kern.gradients_X_diag(grad_dict['dL_dKdiag'], X)
|
|
||||||
if subset_indices is not None:
|
|
||||||
value_indices['Xgrad'] = subset_indices['samples']
|
|
||||||
|
|
||||||
kl_fctr = self.kl_factr
|
|
||||||
if self.has_uncertain_inputs():
|
|
||||||
if self.missing_data:
|
|
||||||
d = self.output_dim
|
|
||||||
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)/d
|
|
||||||
else:
|
|
||||||
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)
|
|
||||||
|
|
||||||
# Subsetting Variational Posterior objects, makes the gradients
|
|
||||||
# empty. We need them to be 0 though:
|
|
||||||
X.mean.gradient[:] = 0
|
|
||||||
X.variance.gradient[:] = 0
|
|
||||||
|
|
||||||
self.variational_prior.update_gradients_KL(X)
|
|
||||||
if self.missing_data:
|
|
||||||
current_values['meangrad'] += kl_fctr*X.mean.gradient/d
|
|
||||||
current_values['vargrad'] += kl_fctr*X.variance.gradient/d
|
|
||||||
else:
|
|
||||||
current_values['meangrad'] += kl_fctr*X.mean.gradient
|
|
||||||
current_values['vargrad'] += kl_fctr*X.variance.gradient
|
|
||||||
|
|
||||||
if subset_indices is not None:
|
|
||||||
value_indices['meangrad'] = subset_indices['samples']
|
|
||||||
value_indices['vargrad'] = subset_indices['samples']
|
|
||||||
return posterior, log_marginal_likelihood, grad_dict, current_values, value_indices
|
|
||||||
|
|
||||||
def _outer_values_update(self, full_values):
|
def _outer_values_update(self, full_values):
|
||||||
"""
|
"""
|
||||||
|
|
@ -128,20 +93,46 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
|
||||||
"""
|
"""
|
||||||
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
|
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
|
||||||
if self.has_uncertain_inputs():
|
if self.has_uncertain_inputs():
|
||||||
self.X.mean.gradient = full_values['meangrad']
|
meangrad_tmp, vargrad_tmp = self.kern.gradients_qX_expectations(
|
||||||
self.X.variance.gradient = full_values['vargrad']
|
variational_posterior=self.X,
|
||||||
|
Z=self.Z, dL_dpsi0=full_values['dL_dpsi0'],
|
||||||
|
dL_dpsi1=full_values['dL_dpsi1'],
|
||||||
|
dL_dpsi2=full_values['dL_dpsi2'],
|
||||||
|
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
|
||||||
|
|
||||||
|
kl_fctr = self.kl_factr
|
||||||
|
|
||||||
|
self.X.mean.gradient[:] = 0
|
||||||
|
self.X.variance.gradient[:] = 0
|
||||||
|
self.variational_prior.update_gradients_KL(self.X)
|
||||||
|
|
||||||
|
if self.missing_data or not self.stochastics:
|
||||||
|
self.X.mean.gradient = kl_fctr*self.X.mean.gradient
|
||||||
|
self.X.variance.gradient = kl_fctr*self.X.variance.gradient
|
||||||
else:
|
else:
|
||||||
self.X.gradient = full_values['Xgrad']
|
d = self.output_dim
|
||||||
|
self.X.mean.gradient = kl_fctr*self.X.mean.gradient*self.stochastics.batchsize/d
|
||||||
|
self.X.variance.gradient = kl_fctr*self.X.variance.gradient*self.stochastics.batchsize/d
|
||||||
|
self.X.mean.gradient += meangrad_tmp
|
||||||
|
self.X.variance.gradient += vargrad_tmp
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.X.gradient = self.kern.gradients_X(full_values['dL_dKnm'], self.X, self.Z)
|
||||||
|
self.X.gradient += self.kern.gradients_X_diag(full_values['dL_dKdiag'], self.X)
|
||||||
|
|
||||||
def _outer_init_full_values(self):
|
def _outer_init_full_values(self):
|
||||||
if self.has_uncertain_inputs():
|
full_values = super(BayesianGPLVMMiniBatch, self)._outer_init_full_values()
|
||||||
return dict(meangrad=np.zeros(self.X.mean.shape),
|
return full_values
|
||||||
vargrad=np.zeros(self.X.variance.shape))
|
|
||||||
else:
|
|
||||||
return dict(Xgrad=np.zeros(self.X.shape))
|
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
super(BayesianGPLVMMiniBatch,self).parameters_changed()
|
super(BayesianGPLVMMiniBatch,self).parameters_changed()
|
||||||
|
kl_fctr = self.kl_factr
|
||||||
|
if self.missing_data or not self.stochastics:
|
||||||
|
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
|
||||||
|
elif self.stochastics:
|
||||||
|
d = self.output_dim
|
||||||
|
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)*self.stochastics.batchsize/d
|
||||||
|
|
||||||
if isinstance(self.inference_method, VarDTC_minibatch):
|
if isinstance(self.inference_method, VarDTC_minibatch):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -63,10 +63,10 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
|
|
||||||
if stochastic and missing_data:
|
if stochastic and missing_data:
|
||||||
self.missing_data = True
|
self.missing_data = True
|
||||||
self.stochastics = SparseGPStochastics(self, batchsize)
|
self.stochastics = SparseGPStochastics(self, batchsize, self.missing_data)
|
||||||
elif stochastic and not missing_data:
|
elif stochastic and not missing_data:
|
||||||
self.missing_data = False
|
self.missing_data = False
|
||||||
self.stochastics = SparseGPStochastics(self, batchsize)
|
self.stochastics = SparseGPStochastics(self, batchsize, self.missing_data)
|
||||||
elif missing_data:
|
elif missing_data:
|
||||||
self.missing_data = True
|
self.missing_data = True
|
||||||
self.stochastics = SparseGPMissing(self)
|
self.stochastics = SparseGPMissing(self)
|
||||||
|
|
@ -81,7 +81,7 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
def has_uncertain_inputs(self):
|
def has_uncertain_inputs(self):
|
||||||
return isinstance(self.X, VariationalPosterior)
|
return isinstance(self.X, VariationalPosterior)
|
||||||
|
|
||||||
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, **kwargs):
|
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None, **kwargs):
|
||||||
"""
|
"""
|
||||||
This is the standard part, which usually belongs in parameters_changed.
|
This is the standard part, which usually belongs in parameters_changed.
|
||||||
|
|
||||||
|
|
@ -100,47 +100,13 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
like them into this dictionary for inner use of the indices inside the
|
like them into this dictionary for inner use of the indices inside the
|
||||||
algorithm.
|
algorithm.
|
||||||
"""
|
"""
|
||||||
try:
|
if psi2 is None:
|
||||||
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=None, **kwargs)
|
psi2_sum_n = None
|
||||||
except:
|
|
||||||
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata)
|
|
||||||
current_values = {}
|
|
||||||
likelihood.update_gradients(grad_dict['dL_dthetaL'])
|
|
||||||
current_values['likgrad'] = likelihood.gradient.copy()
|
|
||||||
if subset_indices is None:
|
|
||||||
subset_indices = {}
|
|
||||||
if isinstance(X, VariationalPosterior):
|
|
||||||
#gradients wrt kernel
|
|
||||||
dL_dKmm = grad_dict['dL_dKmm']
|
|
||||||
kern.update_gradients_full(dL_dKmm, Z, None)
|
|
||||||
current_values['kerngrad'] = kern.gradient.copy()
|
|
||||||
kern.update_gradients_expectations(variational_posterior=X,
|
|
||||||
Z=Z,
|
|
||||||
dL_dpsi0=grad_dict['dL_dpsi0'],
|
|
||||||
dL_dpsi1=grad_dict['dL_dpsi1'],
|
|
||||||
dL_dpsi2=grad_dict['dL_dpsi2'])
|
|
||||||
current_values['kerngrad'] += kern.gradient
|
|
||||||
|
|
||||||
#gradients wrt Z
|
|
||||||
current_values['Zgrad'] = kern.gradients_X(dL_dKmm, Z)
|
|
||||||
current_values['Zgrad'] += kern.gradients_Z_expectations(
|
|
||||||
grad_dict['dL_dpsi0'],
|
|
||||||
grad_dict['dL_dpsi1'],
|
|
||||||
grad_dict['dL_dpsi2'],
|
|
||||||
Z=Z,
|
|
||||||
variational_posterior=X)
|
|
||||||
else:
|
else:
|
||||||
#gradients wrt kernel
|
psi2_sum_n = psi2.sum(axis=0)
|
||||||
kern.update_gradients_diag(grad_dict['dL_dKdiag'], X)
|
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm,
|
||||||
current_values['kerngrad'] = kern.gradient.copy()
|
dL_dKmm=dL_dKmm, psi0=psi0, psi1=psi1, psi2=psi2_sum_n, **kwargs)
|
||||||
kern.update_gradients_full(grad_dict['dL_dKnm'], X, Z)
|
return posterior, log_marginal_likelihood, grad_dict
|
||||||
current_values['kerngrad'] += kern.gradient
|
|
||||||
kern.update_gradients_full(grad_dict['dL_dKmm'], Z, None)
|
|
||||||
current_values['kerngrad'] += kern.gradient
|
|
||||||
#gradients wrt Z
|
|
||||||
current_values['Zgrad'] = kern.gradients_X(grad_dict['dL_dKmm'], Z)
|
|
||||||
current_values['Zgrad'] += kern.gradients_X(grad_dict['dL_dKnm'].T, Z, X)
|
|
||||||
return posterior, log_marginal_likelihood, grad_dict, current_values, subset_indices
|
|
||||||
|
|
||||||
def _inner_take_over_or_update(self, full_values=None, current_values=None, value_indices=None):
|
def _inner_take_over_or_update(self, full_values=None, current_values=None, value_indices=None):
|
||||||
"""
|
"""
|
||||||
|
|
@ -174,7 +140,10 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
else:
|
else:
|
||||||
index = slice(None)
|
index = slice(None)
|
||||||
if key in full_values:
|
if key in full_values:
|
||||||
|
try:
|
||||||
full_values[key][index] += current_values[key]
|
full_values[key][index] += current_values[key]
|
||||||
|
except:
|
||||||
|
full_values[key] += current_values[key]
|
||||||
else:
|
else:
|
||||||
full_values[key] = current_values[key]
|
full_values[key] = current_values[key]
|
||||||
|
|
||||||
|
|
@ -193,9 +162,43 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
Here you put the values, which were collected before in the right places.
|
Here you put the values, which were collected before in the right places.
|
||||||
E.g. set the gradients of parameters, etc.
|
E.g. set the gradients of parameters, etc.
|
||||||
"""
|
"""
|
||||||
self.likelihood.gradient = full_values['likgrad']
|
if self.has_uncertain_inputs():
|
||||||
self.kern.gradient = full_values['kerngrad']
|
#gradients wrt kernel
|
||||||
self.Z.gradient = full_values['Zgrad']
|
dL_dKmm = full_values['dL_dKmm']
|
||||||
|
self.kern.update_gradients_full(dL_dKmm, self.Z, None)
|
||||||
|
kgrad = self.kern.gradient.copy()
|
||||||
|
self.kern.update_gradients_expectations(
|
||||||
|
variational_posterior=self.X,
|
||||||
|
Z=self.Z, dL_dpsi0=full_values['dL_dpsi0'],
|
||||||
|
dL_dpsi1=full_values['dL_dpsi1'],
|
||||||
|
dL_dpsi2=full_values['dL_dpsi2'],
|
||||||
|
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
|
||||||
|
self.kern.gradient += kgrad
|
||||||
|
|
||||||
|
|
||||||
|
#gradients wrt Z
|
||||||
|
self.Z.gradient = self.kern.gradients_X(dL_dKmm, self.Z)
|
||||||
|
self.Z.gradient += self.kern.gradients_Z_expectations(
|
||||||
|
variational_posterior=self.X,
|
||||||
|
Z=self.Z, dL_dpsi0=full_values['dL_dpsi0'],
|
||||||
|
dL_dpsi1=full_values['dL_dpsi1'],
|
||||||
|
dL_dpsi2=full_values['dL_dpsi2'],
|
||||||
|
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
|
||||||
|
else:
|
||||||
|
#gradients wrt kernel
|
||||||
|
self.kern.update_gradients_diag(full_values['dL_dKdiag'], self.X)
|
||||||
|
kgrad = self.kern.gradient.copy()
|
||||||
|
self.kern.update_gradients_full(full_values['dL_dKnm'], self.X, self.Z)
|
||||||
|
kgrad += self.kern.gradient
|
||||||
|
self.kern.update_gradients_full(full_values['dL_dKmm'], self.Z, None)
|
||||||
|
self.kern.gradient += kgrad
|
||||||
|
#kgrad += self.kern.gradient
|
||||||
|
|
||||||
|
#gradients wrt Z
|
||||||
|
self.Z.gradient = self.kern.gradients_X(full_values['dL_dKmm'], self.Z)
|
||||||
|
self.Z.gradient += self.kern.gradients_X(full_values['dL_dKnm'].T, self.Z, self.X)
|
||||||
|
|
||||||
|
self.likelihood.update_gradients(full_values['dL_dthetaL'])
|
||||||
|
|
||||||
def _outer_init_full_values(self):
|
def _outer_init_full_values(self):
|
||||||
"""
|
"""
|
||||||
|
|
@ -210,7 +213,15 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
to initialize the gradients for the mean and the variance in order to
|
to initialize the gradients for the mean and the variance in order to
|
||||||
have the full gradient for indexing)
|
have the full gradient for indexing)
|
||||||
"""
|
"""
|
||||||
return {}
|
retd = dict(dL_dKmm=np.zeros((self.Z.shape[0], self.Z.shape[0])))
|
||||||
|
if self.has_uncertain_inputs():
|
||||||
|
retd.update(dict(dL_dpsi0=np.zeros(self.X.shape[0]),
|
||||||
|
dL_dpsi1=np.zeros((self.X.shape[0], self.Z.shape[0])),
|
||||||
|
dL_dpsi2=np.zeros((self.X.shape[0], self.Z.shape[0], self.Z.shape[0]))))
|
||||||
|
else:
|
||||||
|
retd.update({'dL_dKdiag': np.zeros(self.X.shape[0]),
|
||||||
|
'dL_dKnm': np.zeros((self.X.shape[0], self.Z.shape[0]))})
|
||||||
|
return retd
|
||||||
|
|
||||||
def _outer_loop_for_missing_data(self):
|
def _outer_loop_for_missing_data(self):
|
||||||
Lm = None
|
Lm = None
|
||||||
|
|
@ -232,28 +243,36 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
print(message, end=' ')
|
print(message, end=' ')
|
||||||
|
|
||||||
for d, ninan in self.stochastics.d:
|
for d, ninan in self.stochastics.d:
|
||||||
|
|
||||||
if not self.stochastics:
|
if not self.stochastics:
|
||||||
print(' '*(len(message)) + '\r', end=' ')
|
print(' '*(len(message)) + '\r', end=' ')
|
||||||
message = m_f(d)
|
message = m_f(d)
|
||||||
print(message, end=' ')
|
print(message, end=' ')
|
||||||
|
|
||||||
posterior, log_marginal_likelihood, \
|
psi0ni = self.psi0[ninan]
|
||||||
grad_dict, current_values, value_indices = self._inner_parameters_changed(
|
psi1ni = self.psi1[ninan]
|
||||||
|
if self.has_uncertain_inputs():
|
||||||
|
psi2ni = self.psi2[ninan]
|
||||||
|
value_indices = dict(outputs=d, samples=ninan, dL_dpsi0=ninan, dL_dpsi1=ninan, dL_dpsi2=ninan)
|
||||||
|
else:
|
||||||
|
psi2ni = None
|
||||||
|
value_indices = dict(outputs=d, samples=ninan, dL_dKdiag=ninan, dL_dKnm=ninan)
|
||||||
|
|
||||||
|
posterior, log_marginal_likelihood, grad_dict = self._inner_parameters_changed(
|
||||||
self.kern, self.X[ninan],
|
self.kern, self.X[ninan],
|
||||||
self.Z, self.likelihood,
|
self.Z, self.likelihood,
|
||||||
self.Y_normalized[ninan][:, d], self.Y_metadata,
|
self.Y_normalized[ninan][:, d], self.Y_metadata,
|
||||||
Lm, dL_dKmm,
|
Lm, dL_dKmm,
|
||||||
subset_indices=dict(outputs=d, samples=ninan))
|
psi0=psi0ni, psi1=psi1ni, psi2=psi2ni)
|
||||||
|
|
||||||
self._inner_take_over_or_update(self.full_values, current_values, value_indices)
|
# Fill out the full values by adding in the apporpriate grad_dict
|
||||||
self._inner_values_update(current_values)
|
# values
|
||||||
|
self._inner_take_over_or_update(self.full_values, grad_dict, value_indices)
|
||||||
|
self._inner_values_update(grad_dict) # What is this for? -> MRD
|
||||||
|
|
||||||
Lm = posterior.K_chol
|
|
||||||
dL_dKmm = grad_dict['dL_dKmm']
|
|
||||||
woodbury_inv[:, :, d] = posterior.woodbury_inv[:,:,None]
|
woodbury_inv[:, :, d] = posterior.woodbury_inv[:,:,None]
|
||||||
woodbury_vector[:, d] = posterior.woodbury_vector
|
woodbury_vector[:, d] = posterior.woodbury_vector
|
||||||
self._log_marginal_likelihood += log_marginal_likelihood
|
self._log_marginal_likelihood += log_marginal_likelihood
|
||||||
|
|
||||||
if not self.stochastics:
|
if not self.stochastics:
|
||||||
print('')
|
print('')
|
||||||
|
|
||||||
|
|
@ -261,10 +280,10 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
self.posterior = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector,
|
self.posterior = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector,
|
||||||
K=posterior._K, mean=None, cov=None, K_chol=posterior.K_chol)
|
K=posterior._K, mean=None, cov=None, K_chol=posterior.K_chol)
|
||||||
self._outer_values_update(self.full_values)
|
self._outer_values_update(self.full_values)
|
||||||
|
if self.has_uncertain_inputs():
|
||||||
|
self.kern.return_psi2_n = False
|
||||||
|
|
||||||
def _outer_loop_without_missing_data(self):
|
def _outer_loop_without_missing_data(self):
|
||||||
self._log_marginal_likelihood = 0
|
|
||||||
|
|
||||||
if self.posterior is None:
|
if self.posterior is None:
|
||||||
woodbury_inv = np.zeros((self.num_inducing, self.num_inducing, self.output_dim))
|
woodbury_inv = np.zeros((self.num_inducing, self.num_inducing, self.output_dim))
|
||||||
woodbury_vector = np.zeros((self.num_inducing, self.output_dim))
|
woodbury_vector = np.zeros((self.num_inducing, self.output_dim))
|
||||||
|
|
@ -272,17 +291,16 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
woodbury_inv = self.posterior._woodbury_inv
|
woodbury_inv = self.posterior._woodbury_inv
|
||||||
woodbury_vector = self.posterior._woodbury_vector
|
woodbury_vector = self.posterior._woodbury_vector
|
||||||
|
|
||||||
d = self.stochastics.d
|
d = self.stochastics.d[0][0]
|
||||||
posterior, log_marginal_likelihood, \
|
posterior, log_marginal_likelihood, grad_dict= self._inner_parameters_changed(
|
||||||
grad_dict, self.full_values, _ = self._inner_parameters_changed(
|
|
||||||
self.kern, self.X,
|
self.kern, self.X,
|
||||||
self.Z, self.likelihood,
|
self.Z, self.likelihood,
|
||||||
self.Y_normalized[:, d], self.Y_metadata)
|
self.Y_normalized[:, d], self.Y_metadata)
|
||||||
self.grad_dict = grad_dict
|
self.grad_dict = grad_dict
|
||||||
|
|
||||||
self._log_marginal_likelihood += log_marginal_likelihood
|
self._log_marginal_likelihood = log_marginal_likelihood
|
||||||
|
|
||||||
self._outer_values_update(self.full_values)
|
self._outer_values_update(self.grad_dict)
|
||||||
|
|
||||||
woodbury_inv[:, :, d] = posterior.woodbury_inv[:, :, None]
|
woodbury_inv[:, :, d] = posterior.woodbury_inv[:, :, None]
|
||||||
woodbury_vector[:, d] = posterior.woodbury_vector
|
woodbury_vector[:, d] = posterior.woodbury_vector
|
||||||
|
|
@ -291,10 +309,23 @@ class SparseGPMiniBatch(SparseGP):
|
||||||
K=posterior._K, mean=None, cov=None, K_chol=posterior.K_chol)
|
K=posterior._K, mean=None, cov=None, K_chol=posterior.K_chol)
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
|
#Compute the psi statistics for N once, but don't sum out N in psi2
|
||||||
|
if self.has_uncertain_inputs():
|
||||||
|
#psi0 = ObsAr(self.kern.psi0(self.Z, self.X))
|
||||||
|
#psi1 = ObsAr(self.kern.psi1(self.Z, self.X))
|
||||||
|
#psi2 = ObsAr(self.kern.psi2(self.Z, self.X))
|
||||||
|
self.psi0 = self.kern.psi0(self.Z, self.X)
|
||||||
|
self.psi1 = self.kern.psi1(self.Z, self.X)
|
||||||
|
self.psi2 = self.kern.psi2n(self.Z, self.X)
|
||||||
|
else:
|
||||||
|
self.psi0 = self.kern.Kdiag(self.X)
|
||||||
|
self.psi1 = self.kern.K(self.X, self.Z)
|
||||||
|
self.psi2 = None
|
||||||
|
|
||||||
if self.missing_data:
|
if self.missing_data:
|
||||||
self._outer_loop_for_missing_data()
|
self._outer_loop_for_missing_data()
|
||||||
elif self.stochastics:
|
elif self.stochastics:
|
||||||
self._outer_loop_without_missing_data()
|
self._outer_loop_without_missing_data()
|
||||||
else:
|
else:
|
||||||
self.posterior, self._log_marginal_likelihood, self.grad_dict, self.full_values, _ = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata)
|
self.posterior, self._log_marginal_likelihood, self.grad_dict = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata)
|
||||||
self._outer_values_update(self.full_values)
|
self._outer_values_update(self.grad_dict)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue