mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-12 13:32:39 +02:00
Merge branch 'devel' into params
Conflicts: GPy/core/transformations.py GPy/kern/parts/kernpart.py
This commit is contained in:
commit
c2d217e72c
77 changed files with 3608 additions and 807 deletions
|
|
@ -5,7 +5,7 @@ import numpy as np
|
|||
import pylab as pb
|
||||
from ..util.linalg import mdot, jitchol, tdot, symmetrify, backsub_both_sides, chol_inv, dtrtrs, dpotrs, dpotri
|
||||
from scipy import linalg
|
||||
from ..likelihoods import Gaussian
|
||||
from ..likelihoods import Gaussian, EP,EP_Mixed_Noise
|
||||
from gp_base import GPBase
|
||||
|
||||
class SparseGP(GPBase):
|
||||
|
|
@ -109,7 +109,6 @@ class SparseGP(GPBase):
|
|||
tmp, _ = dtrtrs(self._Lm, np.asfortranarray(tmp.T), lower=1)
|
||||
self._A = tdot(tmp)
|
||||
|
||||
|
||||
# factor B
|
||||
self.B = np.eye(self.num_inducing) + self._A
|
||||
self.LB = jitchol(self.B)
|
||||
|
|
@ -139,6 +138,7 @@ class SparseGP(GPBase):
|
|||
dL_dpsi2_beta = 0.5 * backsub_both_sides(self._Lm, self.output_dim * np.eye(self.num_inducing) - self.DBi_plus_BiPBi)
|
||||
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
|
||||
if self.has_uncertain_inputs:
|
||||
self.dL_dpsi2 = self.likelihood.precision.flatten()[:, None, None] * dL_dpsi2_beta[None, :, :]
|
||||
else:
|
||||
|
|
@ -160,9 +160,27 @@ class SparseGP(GPBase):
|
|||
# save computation here.
|
||||
self.partial_for_likelihood = None
|
||||
elif self.likelihood.is_heteroscedastic:
|
||||
raise NotImplementedError, "heteroscedatic derivates not implemented"
|
||||
|
||||
if self.has_uncertain_inputs:
|
||||
raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented"
|
||||
|
||||
else:
|
||||
|
||||
LBi = chol_inv(self.LB)
|
||||
Lmi_psi1, nil = dtrtrs(self._Lm, np.asfortranarray(self.psi1.T), lower=1, trans=0)
|
||||
_LBi_Lmi_psi1, _ = dtrtrs(self.LB, np.asfortranarray(Lmi_psi1), lower=1, trans=0)
|
||||
|
||||
|
||||
self.partial_for_likelihood = -0.5 * self.likelihood.precision + 0.5 * self.likelihood.V**2
|
||||
self.partial_for_likelihood += 0.5 * self.output_dim * (self.psi0 - np.sum(Lmi_psi1**2,0))[:,None] * self.likelihood.precision**2
|
||||
|
||||
self.partial_for_likelihood += 0.5*np.sum(mdot(LBi.T,LBi,Lmi_psi1)*Lmi_psi1,0)[:,None]*self.likelihood.precision**2
|
||||
|
||||
self.partial_for_likelihood += -np.dot(self._LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * self.likelihood.Y * self.likelihood.precision**2
|
||||
self.partial_for_likelihood += 0.5*np.dot(self._LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T**2 * self.likelihood.precision**2
|
||||
|
||||
else:
|
||||
# likelihood is not heterscedatic
|
||||
# likelihood is not heteroscedatic
|
||||
self.partial_for_likelihood = -0.5 * self.num_data * self.output_dim * self.likelihood.precision + 0.5 * self.likelihood.trYYT * self.likelihood.precision ** 2
|
||||
self.partial_for_likelihood += 0.5 * self.output_dim * (self.psi0.sum() * self.likelihood.precision ** 2 - np.trace(self._A) * self.likelihood.precision)
|
||||
self.partial_for_likelihood += self.likelihood.precision * (0.5 * np.sum(self._A * self.DBi_plus_BiPBi) - self.data_fit)
|
||||
|
|
@ -194,8 +212,8 @@ class SparseGP(GPBase):
|
|||
return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])], [])\
|
||||
+ self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||
|
||||
def _get_print_names(self):
|
||||
return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||
#def _get_print_names(self):
|
||||
# return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||
|
||||
def update_likelihood_approximation(self):
|
||||
"""
|
||||
|
|
@ -240,7 +258,7 @@ class SparseGP(GPBase):
|
|||
"""
|
||||
The derivative of the bound wrt the inducing inputs Z
|
||||
"""
|
||||
dL_dZ = 2.*self.kern.dK_dX(self.dL_dKmm, self.Z) # factor of two becase of vertical and horizontal 'stripes' in dKmm_dZ
|
||||
dL_dZ = self.kern.dK_dX(self.dL_dKmm, self.Z)
|
||||
if self.has_uncertain_inputs:
|
||||
dL_dZ += self.kern.dpsi1_dZ(self.dL_dpsi1, self.Z, self.X, self.X_variance)
|
||||
dL_dZ += self.kern.dpsi2_dZ(self.dL_dpsi2, self.Z, self.X, self.X_variance)
|
||||
|
|
@ -298,7 +316,7 @@ class SparseGP(GPBase):
|
|||
:type X_variance_new: np.ndarray, Nnew x self.input_dim
|
||||
:param which_parts: specifies which outputs kernel(s) to use in prediction
|
||||
:type which_parts: ('all', list of bools)
|
||||
:param full_cov: whether to return the folll covariance matrix, or just the diagonal
|
||||
:param full_cov: whether to return the full covariance matrix, or just the diagonal
|
||||
:type full_cov: bool
|
||||
:rtype: posterior mean, a Numpy array, Nnew x self.input_dim
|
||||
:rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
|
||||
|
|
@ -322,26 +340,133 @@ class SparseGP(GPBase):
|
|||
|
||||
return mean, var, _025pm, _975pm
|
||||
|
||||
def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, fignum=None, ax=None):
|
||||
|
||||
def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, fignum=None, ax=None, output=None):
|
||||
if ax is None:
|
||||
fig = pb.figure(num=fignum)
|
||||
ax = fig.add_subplot(111)
|
||||
if which_data is 'all':
|
||||
which_data = slice(None)
|
||||
|
||||
GPBase.plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, ax=ax)
|
||||
GPBase.plot(self, samples=0, plot_limits=plot_limits, which_data='all', which_parts='all', resolution=None, levels=20, ax=ax, output=output)
|
||||
|
||||
# add the inducing inputs and some errorbars
|
||||
if self.X.shape[1] == 1:
|
||||
if self.has_uncertain_inputs:
|
||||
Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now
|
||||
ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0],
|
||||
xerr=2 * np.sqrt(self.X_variance[which_data, 0]),
|
||||
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
||||
Zu = self.Z * self._Xscale + self._Xoffset
|
||||
ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12)
|
||||
if not hasattr(self,'multioutput'):
|
||||
|
||||
elif self.X.shape[1] == 2:
|
||||
Zu = self.Z * self._Xscale + self._Xoffset
|
||||
ax.plot(Zu[:, 0], Zu[:, 1], 'wo')
|
||||
if self.X.shape[1] == 1:
|
||||
if self.has_uncertain_inputs:
|
||||
Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now
|
||||
ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0],
|
||||
xerr=2 * np.sqrt(self.X_variance[which_data, 0]),
|
||||
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
||||
Zu = self.Z * self._Xscale + self._Xoffset
|
||||
ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12)
|
||||
|
||||
elif self.X.shape[1] == 2:
|
||||
Zu = self.Z * self._Xscale + self._Xoffset
|
||||
ax.plot(Zu[:, 0], Zu[:, 1], 'wo')
|
||||
|
||||
else:
|
||||
pass
|
||||
"""
|
||||
if self.X.shape[1] == 2 and hasattr(self,'multioutput'):
|
||||
Xu = self.X[self.X[:,-1]==output,:]
|
||||
if self.has_uncertain_inputs:
|
||||
Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now
|
||||
|
||||
Xu = self.X[self.X[:,-1]==output ,0:1] #??
|
||||
|
||||
ax.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0],
|
||||
xerr=2 * np.sqrt(self.X_variance[which_data, 0]),
|
||||
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
||||
|
||||
Zu = self.Z[self.Z[:,-1]==output,:]
|
||||
Zu = self.Z * self._Xscale + self._Xoffset
|
||||
Zu = self.Z[self.Z[:,-1]==output ,0:1] #??
|
||||
ax.plot(Zu, np.zeros_like(Zu) + ax.get_ylim()[0], 'r|', mew=1.5, markersize=12)
|
||||
#ax.set_ylim(ax.get_ylim()[0],)
|
||||
|
||||
else:
|
||||
raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
|
||||
"""
|
||||
|
||||
def predict_single_output(self, Xnew, output=0, which_parts='all', full_cov=False):
|
||||
"""
|
||||
For a specific output, predict the function at the new point(s) Xnew.
|
||||
Arguments
|
||||
---------
|
||||
:param Xnew: The points at which to make a prediction
|
||||
:type Xnew: np.ndarray, Nnew x self.input_dim
|
||||
:param output: output to predict
|
||||
:type output: integer in {0,..., num_outputs-1}
|
||||
:param which_parts: specifies which outputs kernel(s) to use in prediction
|
||||
:type which_parts: ('all', list of bools)
|
||||
:param full_cov: whether to return the full covariance matrix, or just the diagonal
|
||||
:type full_cov: bool
|
||||
:rtype: posterior mean, a Numpy array, Nnew x self.input_dim
|
||||
:rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
|
||||
:rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.input_dim
|
||||
|
||||
.. Note:: For multiple output models only
|
||||
"""
|
||||
|
||||
assert hasattr(self,'multioutput')
|
||||
index = np.ones_like(Xnew)*output
|
||||
Xnew = np.hstack((Xnew,index))
|
||||
|
||||
# normalize X values
|
||||
Xnew = (Xnew.copy() - self._Xoffset) / self._Xscale
|
||||
mu, var = self._raw_predict(Xnew, full_cov=full_cov, which_parts=which_parts)
|
||||
|
||||
# now push through likelihood
|
||||
mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov, noise_model = output)
|
||||
return mean, var, _025pm, _975pm
|
||||
|
||||
def _raw_predict_single_output(self, _Xnew, output=0, X_variance_new=None, which_parts='all', full_cov=False,stop=False):
|
||||
"""
|
||||
Internal helper function for making predictions for a specific output,
|
||||
does not account for normalization or likelihood
|
||||
---------
|
||||
|
||||
:param Xnew: The points at which to make a prediction
|
||||
:type Xnew: np.ndarray, Nnew x self.input_dim
|
||||
:param output: output to predict
|
||||
:type output: integer in {0,..., num_outputs-1}
|
||||
:param which_parts: specifies which outputs kernel(s) to use in prediction
|
||||
:type which_parts: ('all', list of bools)
|
||||
:param full_cov: whether to return the full covariance matrix, or just the diagonal
|
||||
|
||||
.. Note:: For multiple output models only
|
||||
"""
|
||||
Bi, _ = dpotri(self.LB, lower=0) # WTH? this lower switch should be 1, but that doesn't work!
|
||||
symmetrify(Bi)
|
||||
Kmmi_LmiBLmi = backsub_both_sides(self._Lm, np.eye(self.num_inducing) - Bi)
|
||||
|
||||
if self.Cpsi1V is None:
|
||||
psi1V = np.dot(self.psi1.T,self.likelihood.V)
|
||||
tmp, _ = dtrtrs(self._Lm, np.asfortranarray(psi1V), lower=1, trans=0)
|
||||
tmp, _ = dpotrs(self.LB, tmp, lower=1)
|
||||
self.Cpsi1V, _ = dtrtrs(self._Lm, tmp, lower=1, trans=1)
|
||||
|
||||
assert hasattr(self,'multioutput')
|
||||
index = np.ones_like(_Xnew)*output
|
||||
_Xnew = np.hstack((_Xnew,index))
|
||||
|
||||
if X_variance_new is None:
|
||||
Kx = self.kern.K(self.Z, _Xnew, which_parts=which_parts)
|
||||
mu = np.dot(Kx.T, self.Cpsi1V)
|
||||
if full_cov:
|
||||
Kxx = self.kern.K(_Xnew, which_parts=which_parts)
|
||||
var = Kxx - mdot(Kx.T, Kmmi_LmiBLmi, Kx) # NOTE this won't work for plotting
|
||||
else:
|
||||
Kxx = self.kern.Kdiag(_Xnew, which_parts=which_parts)
|
||||
var = Kxx - np.sum(Kx * np.dot(Kmmi_LmiBLmi, Kx), 0)
|
||||
else:
|
||||
Kx = self.kern.psi1(self.Z, _Xnew, X_variance_new)
|
||||
mu = np.dot(Kx, self.Cpsi1V)
|
||||
if full_cov:
|
||||
raise NotImplementedError, "TODO"
|
||||
else:
|
||||
Kxx = self.kern.psi0(self.Z, _Xnew, X_variance_new)
|
||||
psi2 = self.kern.psi2(self.Z, _Xnew, X_variance_new)
|
||||
var = Kxx - np.sum(np.sum(psi2 * Kmmi_LmiBLmi[None, :, :], 1), 1)
|
||||
|
||||
return mu, var[:, None]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue