mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-29 06:46:22 +02:00
Merge branch 'devel' of github.com:/sheffieldml/GPy into devel
This commit is contained in:
commit
f1da8c0fec
5 changed files with 48 additions and 53 deletions
|
|
@ -124,6 +124,7 @@ class GP(Model):
|
||||||
else:
|
else:
|
||||||
self.X = ObsAr(X)
|
self.X = ObsAr(X)
|
||||||
self.update_model(True)
|
self.update_model(True)
|
||||||
|
self._trigger_params_changed()
|
||||||
|
|
||||||
def set_X(self,X):
|
def set_X(self,X):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -1042,6 +1042,9 @@ class Parameterizable(OptimizationHandlable):
|
||||||
p = param_to_array(p)
|
p = param_to_array(p)
|
||||||
d = f.create_dataset(n,p.shape,dtype=p.dtype)
|
d = f.create_dataset(n,p.shape,dtype=p.dtype)
|
||||||
d[:] = p
|
d[:] = p
|
||||||
|
if hasattr(self, 'param_array'):
|
||||||
|
d = f.create_dataset('param_array',self.param_array.shape, dtype=self.param_array.dtype)
|
||||||
|
d[:] = self.param_array
|
||||||
f.close()
|
f.close()
|
||||||
except:
|
except:
|
||||||
raise 'Fails to write the parameters into a HDF5 file!'
|
raise 'Fails to write the parameters into a HDF5 file!'
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,8 @@ from gp import GP
|
||||||
from parameterization.param import Param
|
from parameterization.param import Param
|
||||||
from ..inference.latent_function_inference import var_dtc
|
from ..inference.latent_function_inference import var_dtc
|
||||||
from .. import likelihoods
|
from .. import likelihoods
|
||||||
from parameterization.variational import VariationalPosterior
|
from parameterization.variational import VariationalPosterior, NormalPosterior
|
||||||
|
from ..util.linalg import mdot
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from GPy.inference.latent_function_inference.posterior import Posterior
|
from GPy.inference.latent_function_inference.posterior import Posterior
|
||||||
|
|
@ -102,7 +103,15 @@ class SparseGP(GP):
|
||||||
|
|
||||||
def _raw_predict(self, Xnew, full_cov=False, kern=None):
|
def _raw_predict(self, Xnew, full_cov=False, kern=None):
|
||||||
"""
|
"""
|
||||||
Make a prediction for the latent function values
|
Make a prediction for the latent function values.
|
||||||
|
|
||||||
|
For certain inputs we give back a full_cov of shape NxN,
|
||||||
|
if there is missing data, each dimension has its own full_cov of shape NxNxD, and if full_cov is of,
|
||||||
|
we take only the diagonal elements across N.
|
||||||
|
|
||||||
|
For uncertain inputs, the SparseGP bound produces a full covariance structure across D, so for full_cov we
|
||||||
|
return a NxDxD matrix and in the not full_cov case, we return the diagonal elements across D (NxD).
|
||||||
|
This is for both with and without missing data.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if kern is None: kern = self.kern
|
if kern is None: kern = self.kern
|
||||||
|
|
@ -121,15 +130,32 @@ class SparseGP(GP):
|
||||||
Kxx = kern.Kdiag(Xnew)
|
Kxx = kern.Kdiag(Xnew)
|
||||||
var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T
|
var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T
|
||||||
else:
|
else:
|
||||||
Kx = kern.psi1(self.Z, Xnew).T
|
psi0_star = self.kern.psi0(self.Z, Xnew)
|
||||||
mu = np.dot(Kx.T, self.posterior.woodbury_vector)
|
psi1_star = self.kern.psi1(self.Z, Xnew)
|
||||||
|
#psi2_star = self.kern.psi2(self.Z, Xnew) # Only possible if we get NxMxM psi2 out of the code.
|
||||||
|
la = self.posterior.woodbury_vector
|
||||||
|
mu = np.dot(psi1_star, la) # TODO: dimensions?
|
||||||
|
|
||||||
if full_cov:
|
if full_cov:
|
||||||
Kxx = kern.K(Xnew.mean)
|
var = np.empty((Xnew.shape[0], la.shape[1], la.shape[1]))
|
||||||
if self.posterior.woodbury_inv.ndim == 2:
|
di = np.diag_indices(la.shape[1])
|
||||||
var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
|
|
||||||
elif self.posterior.woodbury_inv.ndim == 3:
|
|
||||||
var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2)
|
|
||||||
else:
|
else:
|
||||||
Kxx = kern.psi0(self.Z, Xnew)
|
var = np.empty((Xnew.shape[0], la.shape[1]))
|
||||||
var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T
|
|
||||||
|
for i in range(Xnew.shape[0]):
|
||||||
|
_mu, _var = Xnew.mean.values[[i]], Xnew.variance.values[[i]]
|
||||||
|
psi2_star = self.kern.psi2(self.Z, NormalPosterior(_mu, _var))
|
||||||
|
tmp = (psi2_star[:, :] - psi1_star[[i]].T.dot(psi1_star[[i]]))
|
||||||
|
|
||||||
|
var_ = mdot(la.T, tmp, la)
|
||||||
|
p0 = psi0_star[i]
|
||||||
|
t = self.posterior.woodbury_inv
|
||||||
|
t2 = np.trace(t.T.dot(psi2_star), axis1=1, axis2=2)
|
||||||
|
|
||||||
|
if full_cov:
|
||||||
|
var_[di] += p0
|
||||||
|
var_[di] += -t2
|
||||||
|
var[i] = var_
|
||||||
|
else:
|
||||||
|
var[i] = np.diag(var_)+p0-t2
|
||||||
return mu, var
|
return mu, var
|
||||||
|
|
|
||||||
|
|
@ -154,9 +154,9 @@ class Coregionalize(Kern):
|
||||||
def _gradient_reduce_numpy(self, dL_dK, index, index2):
|
def _gradient_reduce_numpy(self, dL_dK, index, index2):
|
||||||
index, index2 = index[:,0], index2[:,0]
|
index, index2 = index[:,0], index2[:,0]
|
||||||
dL_dK_small = np.zeros_like(self.B)
|
dL_dK_small = np.zeros_like(self.B)
|
||||||
for i in range(k.output_dim):
|
for i in range(self.output_dim):
|
||||||
tmp1 = dL_dK[index==i]
|
tmp1 = dL_dK[index==i]
|
||||||
for j in range(k.output_dim):
|
for j in range(self.output_dim):
|
||||||
dL_dK_small[j,i] = tmp1[:,index2==j].sum()
|
dL_dK_small[j,i] = tmp1[:,index2==j].sum()
|
||||||
return dL_dK_small
|
return dL_dK_small
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from ..core.parameterization.param import Param
|
from ..core.parameterization.param import Param
|
||||||
|
from ..core.sparse_gp import SparseGP
|
||||||
from ..core.gp import GP
|
from ..core.gp import GP
|
||||||
from ..inference.latent_function_inference import var_dtc
|
from ..inference.latent_function_inference import var_dtc
|
||||||
from .. import likelihoods
|
from .. import likelihoods
|
||||||
|
|
@ -16,14 +17,9 @@ from GPy.inference.optimization.stochastics import SparseGPStochastics,\
|
||||||
#SparseGPMissing
|
#SparseGPMissing
|
||||||
logger = logging.getLogger("sparse gp")
|
logger = logging.getLogger("sparse gp")
|
||||||
|
|
||||||
class SparseGPMiniBatch(GP):
|
class SparseGPMiniBatch(SparseGP):
|
||||||
"""
|
"""
|
||||||
A general purpose Sparse GP model
|
A general purpose Sparse GP model, allowing missing data and stochastics across dimensions.
|
||||||
'''
|
|
||||||
Created on 3 Nov 2014
|
|
||||||
|
|
||||||
@author: maxz
|
|
||||||
'''
|
|
||||||
|
|
||||||
This model allows (approximate) inference using variational DTC or FITC
|
This model allows (approximate) inference using variational DTC or FITC
|
||||||
(Gaussian likelihoods) as well as non-conjugate sparse methods based on
|
(Gaussian likelihoods) as well as non-conjugate sparse methods based on
|
||||||
|
|
@ -315,34 +311,3 @@ Created on 3 Nov 2014
|
||||||
else:
|
else:
|
||||||
self.posterior, self._log_marginal_likelihood, self.grad_dict, self.full_values, _ = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata)
|
self.posterior, self._log_marginal_likelihood, self.grad_dict, self.full_values, _ = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata)
|
||||||
self._outer_values_update(self.full_values)
|
self._outer_values_update(self.full_values)
|
||||||
|
|
||||||
def _raw_predict(self, Xnew, full_cov=False, kern=None):
|
|
||||||
"""
|
|
||||||
Make a prediction for the latent function values
|
|
||||||
"""
|
|
||||||
|
|
||||||
if kern is None: kern = self.kern
|
|
||||||
|
|
||||||
if not isinstance(Xnew, VariationalPosterior):
|
|
||||||
Kx = kern.K(self.Z, Xnew)
|
|
||||||
mu = np.dot(Kx.T, self.posterior.woodbury_vector)
|
|
||||||
if full_cov:
|
|
||||||
Kxx = kern.K(Xnew)
|
|
||||||
if self.posterior.woodbury_inv.ndim == 2:
|
|
||||||
var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
|
|
||||||
elif self.posterior.woodbury_inv.ndim == 3:
|
|
||||||
var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2)
|
|
||||||
var = var
|
|
||||||
else:
|
|
||||||
Kxx = kern.Kdiag(Xnew)
|
|
||||||
var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T
|
|
||||||
else:
|
|
||||||
Kx = kern.psi1(self.Z, Xnew)
|
|
||||||
mu = np.dot(Kx, self.posterior.woodbury_vector)
|
|
||||||
if full_cov:
|
|
||||||
raise NotImplementedError, "TODO"
|
|
||||||
else:
|
|
||||||
Kxx = kern.psi0(self.Z, Xnew)
|
|
||||||
psi2 = kern.psi2(self.Z, Xnew)
|
|
||||||
var = Kxx - np.sum(np.sum(psi2 * Kmmi_LmiBLmi[None, :, :], 1), 1)
|
|
||||||
return mu, var
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue