mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-15 06:52:39 +02:00
renaming: posterior_variationa -> variational_posterior
This commit is contained in:
parent
17f9764a55
commit
da4686dd3c
9 changed files with 58 additions and 63 deletions
|
|
@ -16,7 +16,7 @@ def olympic_marathon_men(optimize=True, plot=True):
|
|||
m = GPy.models.GPRegression(data['X'], data['Y'])
|
||||
|
||||
# set the lengthscale to be something sensible (defaults to 1)
|
||||
m['rbf_lengthscale'] = 10
|
||||
m.kern.lengthscale = 10.
|
||||
|
||||
if optimize:
|
||||
m.optimize('bfgs', max_iters=200)
|
||||
|
|
@ -41,11 +41,10 @@ def coregionalization_toy2(optimize=True, plot=True):
|
|||
Y = np.vstack((Y1, Y2))
|
||||
|
||||
#build the kernel
|
||||
k1 = GPy.kern.RBF(1) + GPy.kern.bias(1)
|
||||
k2 = GPy.kern.coregionalize(2,1)
|
||||
k1 = GPy.kern.RBF(1) + GPy.kern.Bias(1)
|
||||
k2 = GPy.kern.Coregionalize(2,1)
|
||||
k = k1**k2
|
||||
m = GPy.models.GPRegression(X, Y, kernel=k)
|
||||
m.constrain_fixed('.*rbf_var', 1.)
|
||||
|
||||
if optimize:
|
||||
m.optimize('bfgs', max_iters=100)
|
||||
|
|
@ -86,11 +85,13 @@ def coregionalization_sparse(optimize=True, plot=True):
|
|||
"""
|
||||
#fetch the data from the non sparse examples
|
||||
m = coregionalization_toy2(optimize=False, plot=False)
|
||||
X, Y = m.X, m.likelihood.Y
|
||||
X, Y = m.X, m.Y
|
||||
|
||||
k = GPy.kern.RBF(1)**GPy.kern.Coregionalize(2)
|
||||
|
||||
#construct a model
|
||||
m = GPy.models.SparseGPRegression(X,Y)
|
||||
m.constrain_fixed('iip_\d+_1') # don't optimize the inducing input indexes
|
||||
m = GPy.models.SparseGPRegression(X,Y, num_inducing=25, kernel=k)
|
||||
m.Z[:,1].fix() # don't optimize the inducing input indexes
|
||||
|
||||
if optimize:
|
||||
m.optimize('bfgs', max_iters=100, messages=1)
|
||||
|
|
@ -128,7 +129,7 @@ def epomeo_gpx(max_iters=200, optimize=True, plot=True):
|
|||
np.random.randint(0, 4, num_inducing)[:, None]))
|
||||
|
||||
k1 = GPy.kern.RBF(1)
|
||||
k2 = GPy.kern.coregionalize(output_dim=5, rank=5)
|
||||
k2 = GPy.kern.Coregionalize(output_dim=5, rank=5)
|
||||
k = k1**k2
|
||||
|
||||
m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True)
|
||||
|
|
@ -322,7 +323,7 @@ def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize
|
|||
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
|
||||
else:
|
||||
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
|
||||
kernel += GPy.kern.White(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
kernel += GPy.kern.White(X.shape[1]) + GPy.kern.Bias(X.shape[1])
|
||||
m = GPy.models.GPRegression(X, Y, kernel)
|
||||
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
# m.set_prior('.*lengthscale',len_prior)
|
||||
|
|
@ -361,7 +362,7 @@ def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, o
|
|||
kernel = GPy.kern.RBF_inv(X.shape[1], ARD=1)
|
||||
else:
|
||||
kernel = GPy.kern.RBF(X.shape[1], ARD=1)
|
||||
#kernel += GPy.kern.bias(X.shape[1])
|
||||
#kernel += GPy.kern.Bias(X.shape[1])
|
||||
X_variance = np.ones(X.shape) * 0.5
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance=X_variance)
|
||||
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
|
|
|
|||
|
|
@ -45,9 +45,6 @@ class Add(Kern):
|
|||
def update_gradients_full(self, dL_dK, X):
|
||||
[p.update_gradients_full(dL_dK, X[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]
|
||||
|
||||
def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z):
|
||||
[p.update_gradients_sparse(dL_dKmm, dL_dKnm, dL_dKdiag, X[:,i_s], Z[:,i_s]) for p, i_s in zip(self._parameters_, self.input_slices)]
|
||||
|
||||
def gradients_X(self, dL_dK, X, X2=None):
|
||||
"""Compute the gradient of the objective function with respect to X.
|
||||
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ class Coregionalize(Kern):
|
|||
|
||||
def update_gradients_diag(self, dL_dKdiag, X):
|
||||
index = np.asarray(X, dtype=np.int).flatten()
|
||||
dL_dKdiag_small = np.array([dL_dKdiag[index==i] for i in xrange(output_dim)])
|
||||
dL_dKdiag_small = np.array([dL_dKdiag[index==i].sum() for i in xrange(self.output_dim)])
|
||||
self.W.gradient = 2.*self.W*dL_dKdiag_small[:, None]
|
||||
self.kappa.gradient = dL_dKdiag_small
|
||||
|
||||
|
|
|
|||
|
|
@ -26,11 +26,11 @@ class Kern(Parameterized):
|
|||
raise NotImplementedError
|
||||
def Kdiag(self, Xa):
|
||||
raise NotImplementedError
|
||||
def psi0(self,Z,posterior_variational):
|
||||
def psi0(self,Z,variational_posterior):
|
||||
raise NotImplementedError
|
||||
def psi1(self,Z,posterior_variational):
|
||||
def psi1(self,Z,variational_posterior):
|
||||
raise NotImplementedError
|
||||
def psi2(self,Z,posterior_variational):
|
||||
def psi2(self,Z,variational_posterior):
|
||||
raise NotImplementedError
|
||||
def gradients_X(self, dL_dK, X, X2):
|
||||
raise NotImplementedError
|
||||
|
|
@ -49,16 +49,16 @@ class Kern(Parameterized):
|
|||
self._collect_gradient(target)
|
||||
self._set_gradient(target)
|
||||
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
"""Set the gradients of all parameters when doing variational (M) inference with uncertain inputs."""
|
||||
raise NotImplementedError
|
||||
def gradients_Z_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z):
|
||||
grad = self.gradients_X(dL_dKmm, Z)
|
||||
grad += self.gradients_X(dL_dKnm.T, Z, X)
|
||||
return grad
|
||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
raise NotImplementedError
|
||||
def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
||||
def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
raise NotImplementedError
|
||||
|
||||
def plot_ARD(self, *args, **kw):
|
||||
|
|
|
|||
|
|
@ -106,52 +106,52 @@ class Linear(Kern):
|
|||
# variational #
|
||||
#---------------------------------------#
|
||||
|
||||
def psi0(self, Z, posterior_variational):
|
||||
return np.sum(self.variances * self._mu2S(posterior_variational), 1)
|
||||
def psi0(self, Z, variational_posterior):
|
||||
return np.sum(self.variances * self._mu2S(variational_posterior), 1)
|
||||
|
||||
def psi1(self, Z, posterior_variational):
|
||||
return self.K(posterior_variational.mean, Z) #the variance, it does nothing
|
||||
def psi1(self, Z, variational_posterior):
|
||||
return self.K(variational_posterior.mean, Z) #the variance, it does nothing
|
||||
|
||||
def psi2(self, Z, posterior_variational):
|
||||
def psi2(self, Z, variational_posterior):
|
||||
ZA = Z * self.variances
|
||||
ZAinner = self._ZAinner(posterior_variational, Z)
|
||||
ZAinner = self._ZAinner(variational_posterior, Z)
|
||||
return np.dot(ZAinner, ZA.T)
|
||||
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, posterior_variational, Z):
|
||||
mu, S = posterior_variational.mean, posterior_variational.variance
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, variational_posterior, Z):
|
||||
mu, S = variational_posterior.mean, variational_posterior.variance
|
||||
# psi0:
|
||||
tmp = dL_dpsi0[:, None] * self._mu2S(posterior_variational)
|
||||
tmp = dL_dpsi0[:, None] * self._mu2S(variational_posterior)
|
||||
if self.ARD: grad = tmp.sum(0)
|
||||
else: grad = np.atleast_1d(tmp.sum())
|
||||
#psi1
|
||||
self.update_gradients_full(dL_dpsi1, mu, Z)
|
||||
grad += self.variances.gradient
|
||||
#psi2
|
||||
tmp = dL_dpsi2[:, :, :, None] * (self._ZAinner(posterior_variational, Z)[:, :, None, :] * (2. * Z)[None, None, :, :])
|
||||
tmp = dL_dpsi2[:, :, :, None] * (self._ZAinner(variational_posterior, Z)[:, :, None, :] * (2. * Z)[None, None, :, :])
|
||||
if self.ARD: grad += tmp.sum(0).sum(0).sum(0)
|
||||
else: grad += tmp.sum()
|
||||
#from Kmm
|
||||
self.update_gradients_full(dL_dKmm, Z, None)
|
||||
self.variances.gradient += grad
|
||||
|
||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, posterior_variational, Z):
|
||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, variational_posterior, Z):
|
||||
# Kmm
|
||||
grad = self.gradients_X(dL_dKmm, Z, None)
|
||||
#psi1
|
||||
grad += self.gradients_X(dL_dpsi1.T, Z, posterior_variational.mean)
|
||||
grad += self.gradients_X(dL_dpsi1.T, Z, variational_posterior.mean)
|
||||
#psi2
|
||||
self._weave_dpsi2_dZ(dL_dpsi2, Z, posterior_variational, grad)
|
||||
self._weave_dpsi2_dZ(dL_dpsi2, Z, variational_posterior, grad)
|
||||
return grad
|
||||
|
||||
def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, posterior_variational, Z):
|
||||
grad_mu, grad_S = np.zeros(posterior_variational.mean.shape), np.zeros(posterior_variational.mean.shape)
|
||||
def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, variational_posterior, Z):
|
||||
grad_mu, grad_S = np.zeros(variational_posterior.mean.shape), np.zeros(variational_posterior.mean.shape)
|
||||
# psi0
|
||||
grad_mu += dL_dpsi0[:, None] * (2.0 * posterior_variational.mean * self.variances)
|
||||
grad_mu += dL_dpsi0[:, None] * (2.0 * variational_posterior.mean * self.variances)
|
||||
grad_S += dL_dpsi0[:, None] * self.variances
|
||||
# psi1
|
||||
grad_mu += (dL_dpsi1[:, :, None] * (Z * self.variances)).sum(1)
|
||||
# psi2
|
||||
self._weave_dpsi2_dmuS(dL_dpsi2, Z, posterior_variational, grad_mu, grad_S)
|
||||
self._weave_dpsi2_dmuS(dL_dpsi2, Z, variational_posterior, grad_mu, grad_S)
|
||||
|
||||
return grad_mu, grad_S
|
||||
|
||||
|
|
|
|||
|
|
@ -42,10 +42,6 @@ class Prod(Kern):
|
|||
self.k1.update_gradients_full(dL_dK*self.k2(X[:,self.slice2]), X[:,self.slice1])
|
||||
self.k2.update_gradients_full(dL_dK*self.k1(X[:,self.slice1]), X[:,self.slice2])
|
||||
|
||||
def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z):
|
||||
self.k1.update_gradients_sparse(dL_dKmm * self.k2.K(Z[:,self.slice2]), dL_dKnm * self.k2(X[:,self.slice2], Z[:,self.slice2]), dL_dKdiag * self.k2.Kdiag(X[:,self.slice2]), X[:,self.slice1], Z[:,self.slice1] )
|
||||
self.k2.update_gradients_sparse(dL_dKmm * self.k1.K(Z[:,self.slice1]), dL_dKnm * self.k1(X[:,self.slice1], Z[:,self.slice1]), dL_dKdiag * self.k1.Kdiag(X[:,self.slice1]), X[:,self.slice2], Z[:,self.slice2] )
|
||||
|
||||
def gradients_X(self, dL_dK, X, X2=None):
|
||||
target = np.zeros(X.shape)
|
||||
if X2 is None:
|
||||
|
|
|
|||
|
|
@ -40,27 +40,27 @@ class RBF(Stationary):
|
|||
self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S
|
||||
|
||||
|
||||
def psi0(self, Z, posterior_variational):
|
||||
return self.Kdiag(posterior_variational.mean)
|
||||
def psi0(self, Z, variational_posterior):
|
||||
return self.Kdiag(variational_posterior.mean)
|
||||
|
||||
def psi1(self, Z, posterior_variational):
|
||||
mu = posterior_variational.mean
|
||||
S = posterior_variational.variance
|
||||
def psi1(self, Z, variational_posterior):
|
||||
mu = variational_posterior.mean
|
||||
S = variational_posterior.variance
|
||||
self._psi_computations(Z, mu, S)
|
||||
return self._psi1
|
||||
|
||||
def psi2(self, Z, posterior_variational):
|
||||
mu = posterior_variational.mean
|
||||
S = posterior_variational.variance
|
||||
def psi2(self, Z, variational_posterior):
|
||||
mu = variational_posterior.mean
|
||||
S = variational_posterior.variance
|
||||
self._psi_computations(Z, mu, S)
|
||||
return self._psi2
|
||||
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
#contributions from Kmm
|
||||
sself.update_gradients_full(dL_dKmm, Z)
|
||||
|
||||
mu = posterior_variational.mean
|
||||
S = posterior_variational.variance
|
||||
mu = variational_posterior.mean
|
||||
S = variational_posterior.variance
|
||||
self._psi_computations(Z, mu, S)
|
||||
l2 = self.lengthscale **2
|
||||
|
||||
|
|
@ -87,9 +87,9 @@ class RBF(Stationary):
|
|||
else:
|
||||
self.lengthscale.gradient += dpsi2_dlength.sum(0).sum(0).sum(0)
|
||||
|
||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
||||
mu = posterior_variational.mean
|
||||
S = posterior_variational.variance
|
||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
mu = variational_posterior.mean
|
||||
S = variational_posterior.variance
|
||||
self._psi_computations(Z, mu, S)
|
||||
l2 = self.lengthscale **2
|
||||
|
||||
|
|
@ -108,9 +108,9 @@ class RBF(Stationary):
|
|||
|
||||
return grad
|
||||
|
||||
def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
||||
mu = posterior_variational.mean
|
||||
S = posterior_variational.variance
|
||||
def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
mu = variational_posterior.mean
|
||||
S = variational_posterior.variance
|
||||
self._psi_computations(Z, mu, S)
|
||||
l2 = self.lengthscale **2
|
||||
#psi1
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ class Static(Kern):
|
|||
|
||||
class White(Static):
|
||||
def __init__(self, input_dim, variance=1., name='white'):
|
||||
super(White, self).__init__(input_dim, name)
|
||||
super(White, self).__init__(input_dim, variance, name)
|
||||
|
||||
def K(self, X, X2=None):
|
||||
if X2 is None:
|
||||
|
|
@ -66,7 +66,7 @@ class White(Static):
|
|||
|
||||
class Bias(Static):
|
||||
def __init__(self, input_dim, variance=1., name='bias'):
|
||||
super(Bias, self).__init__(input_dim, name)
|
||||
super(Bias, self).__init__(input_dim, variance, name)
|
||||
|
||||
def K(self, X, X2=None):
|
||||
shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0])
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ from ..core import SparseGP
|
|||
from .. import likelihoods
|
||||
from .. import kern
|
||||
from ..inference.latent_function_inference import VarDTC
|
||||
from ..util.misc import param_to_array
|
||||
|
||||
class SparseGPRegression(SparseGP):
|
||||
"""
|
||||
|
|
@ -33,18 +34,18 @@ class SparseGPRegression(SparseGP):
|
|||
|
||||
# kern defaults to rbf (plus white for stability)
|
||||
if kernel is None:
|
||||
kernel = kern.rbf(input_dim)# + kern.white(input_dim, variance=1e-3)
|
||||
kernel = kern.RBF(input_dim)# + kern.white(input_dim, variance=1e-3)
|
||||
|
||||
# Z defaults to a subset of the data
|
||||
if Z is None:
|
||||
i = np.random.permutation(num_data)[:min(num_inducing, num_data)]
|
||||
Z = X[i].copy()
|
||||
Z = param_to_array(X)[i].copy()
|
||||
else:
|
||||
assert Z.shape[1] == input_dim
|
||||
|
||||
likelihood = likelihoods.Gaussian()
|
||||
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, X_variance=X_variance, inference_method=VarDTC())
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method=VarDTC())
|
||||
|
||||
def _getstate(self):
|
||||
return SparseGP._getstate(self)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue