mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-02 00:02:38 +02:00
Fixed merge conflicts, M now num_inducing
This commit is contained in:
commit
39c242a4d5
20 changed files with 537 additions and 232 deletions
312
GPy/core/fitc.py
Normal file
312
GPy/core/fitc.py
Normal file
|
|
@ -0,0 +1,312 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
import pylab as pb
|
||||
from ..util.linalg import mdot, jitchol, chol_inv, tdot, symmetrify,pdinv
|
||||
from ..util.plot import gpplot
|
||||
from .. import kern
|
||||
from scipy import stats, linalg
|
||||
from gp_base import GPBase
|
||||
from sparse_gp import SparseGP
|
||||
|
||||
def backsub_both_sides(L,X):
|
||||
""" Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky"""
|
||||
tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1)
|
||||
return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T
|
||||
|
||||
class FITC(SparseGP):
|
||||
"""
|
||||
sparse FITC approximation
|
||||
|
||||
:param X: inputs
|
||||
:type X: np.ndarray (N x Q)
|
||||
:param likelihood: a likelihood instance, containing the observed data
|
||||
:type likelihood: GPy.likelihood.(Gaussian | EP)
|
||||
:param kernel : the kernel (covariance function). See link kernels
|
||||
:type kernel: a GPy.kern.kern instance
|
||||
:param Z: inducing inputs (optional, see note)
|
||||
:type Z: np.ndarray (M x Q) | None
|
||||
:param M : Number of inducing points (optional, default 10. Ignored if Z is not None)
|
||||
:type M: int
|
||||
:param normalize_(X|Y) : whether to normalize the data before computing (predictions will be in original scales)
|
||||
:type normalize_(X|Y): bool
|
||||
"""
|
||||
|
||||
def __init__(self, X, likelihood, kernel, Z, normalize_X=False):
|
||||
GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||
|
||||
self.Z = Z
|
||||
self.M = Z.shape[0]
|
||||
self.likelihood = likelihood
|
||||
|
||||
X_variance = None
|
||||
if X_variance is None:
|
||||
self.has_uncertain_inputs = False
|
||||
else:
|
||||
assert X_variance.shape == X.shape
|
||||
self.has_uncertain_inputs = True
|
||||
self.X_variance = X_variance
|
||||
|
||||
if normalize_X:
|
||||
self.Z = (self.Z.copy() - self._Xmean) / self._Xstd
|
||||
|
||||
# normalize X uncertainty also
|
||||
if self.has_uncertain_inputs:
|
||||
self.X_variance /= np.square(self._Xstd)
|
||||
|
||||
def _set_params(self, p):
|
||||
self.Z = p[:self.M * self.input_dim].reshape(self.M, self.input_dim)
|
||||
self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam])
|
||||
self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:])
|
||||
self._compute_kernel_matrices()
|
||||
self._computations()
|
||||
|
||||
def _get_params(self):
|
||||
return np.hstack([self.Z.flatten(), self.kern._get_params_transformed(), self.likelihood._get_params()])
|
||||
|
||||
def _get_param_names(self):
|
||||
return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])],[])\
|
||||
+ self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||
|
||||
|
||||
def update_likelihood_approximation(self):
|
||||
"""
|
||||
Approximates a non-Gaussian likelihood using Expectation Propagation
|
||||
|
||||
For a Gaussian likelihood, no iteration is required:
|
||||
this function does nothing
|
||||
"""
|
||||
if self.has_uncertain_inputs:
|
||||
raise NotImplementedError, "FITC approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0)
|
||||
self._set_params(self._get_params()) # update the GP
|
||||
|
||||
def _compute_kernel_matrices(self):
|
||||
# kernel computations, using BGPLVM notation
|
||||
self.Kmm = self.kern.K(self.Z)
|
||||
if self.has_uncertain_inputs:
|
||||
self.psi0 = self.kern.psi0(self.Z, self.X, self.X_variance)
|
||||
self.psi1 = self.kern.psi1(self.Z, self.X, self.X_variance).T
|
||||
self.psi2 = self.kern.psi2(self.Z, self.X, self.X_variance)
|
||||
else:
|
||||
self.psi0 = self.kern.Kdiag(self.X)
|
||||
self.psi1 = self.kern.K(self.Z, self.X)
|
||||
self.psi2 = None
|
||||
|
||||
|
||||
def _computations(self):
|
||||
#factor Kmm
|
||||
self.Lm = jitchol(self.Kmm)
|
||||
self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1)
|
||||
Lmipsi1 = np.dot(self.Lmi,self.psi1)
|
||||
self.Qnn = np.dot(Lmipsi1.T,Lmipsi1).copy()
|
||||
self.Diag0 = self.psi0 - np.diag(self.Qnn)
|
||||
self.beta_star = self.likelihood.precision/(1. + self.likelihood.precision*self.Diag0[:,None]) #Includes Diag0 in the precision
|
||||
self.V_star = self.beta_star * self.likelihood.Y
|
||||
|
||||
# The rather complex computations of self.A
|
||||
if self.has_uncertain_inputs:
|
||||
raise NotImplementedError
|
||||
else:
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
assert self.likelihood.output_dim == 1
|
||||
tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.N)))
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1)
|
||||
self.A = tdot(tmp)
|
||||
|
||||
# factor B
|
||||
self.B = np.eye(self.M) + self.A
|
||||
self.LB = jitchol(self.B)
|
||||
self.LBi = chol_inv(self.LB)
|
||||
self.psi1V = np.dot(self.psi1, self.V_star)
|
||||
|
||||
Lmi_psi1V, info = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0)
|
||||
self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(Lmi_psi1V), lower=1, trans=0)
|
||||
|
||||
Kmmipsi1 = np.dot(self.Lmi.T,Lmipsi1)
|
||||
b_psi1_Ki = self.beta_star * Kmmipsi1.T
|
||||
Ki_pbp_Ki = np.dot(Kmmipsi1,b_psi1_Ki)
|
||||
Kmmi = np.dot(self.Lmi.T,self.Lmi)
|
||||
LBiLmi = np.dot(self.LBi,self.Lmi)
|
||||
LBL_inv = np.dot(LBiLmi.T,LBiLmi)
|
||||
VVT = np.outer(self.V_star,self.V_star)
|
||||
VV_p_Ki = np.dot(VVT,Kmmipsi1.T)
|
||||
Ki_pVVp_Ki = np.dot(Kmmipsi1,VV_p_Ki)
|
||||
psi1beta = self.psi1*self.beta_star.T
|
||||
H = self.Kmm + mdot(self.psi1,psi1beta.T)
|
||||
LH = jitchol(H)
|
||||
LHi = chol_inv(LH)
|
||||
Hi = np.dot(LHi.T,LHi)
|
||||
|
||||
betapsi1TLmiLBi = np.dot(psi1beta.T,LBiLmi.T)
|
||||
alpha = np.array([np.dot(a.T,a) for a in betapsi1TLmiLBi])[:,None]
|
||||
gamma_1 = mdot(VVT,self.psi1.T,Hi)
|
||||
pHip = mdot(self.psi1.T,Hi,self.psi1)
|
||||
gamma_2 = mdot(self.beta_star*pHip,self.V_star)
|
||||
gamma_3 = self.V_star * gamma_2
|
||||
|
||||
self._dL_dpsi0 = -0.5 * self.beta_star#dA_dpsi0: logdet(self.beta_star)
|
||||
self._dL_dpsi0 += .5 * self.V_star**2 #dA_psi0: yT*beta_star*y
|
||||
self._dL_dpsi0 += .5 *alpha #dC_dpsi0
|
||||
self._dL_dpsi0 += 0.5*mdot(self.beta_star*pHip,self.V_star)**2 - self.V_star * mdot(self.V_star.T,pHip*self.beta_star).T #dD_dpsi0
|
||||
|
||||
self._dL_dpsi1 = b_psi1_Ki.copy() #dA_dpsi1: logdet(self.beta_star)
|
||||
self._dL_dpsi1 += -np.dot(psi1beta.T,LBL_inv) #dC_dpsi1
|
||||
self._dL_dpsi1 += gamma_1 - mdot(psi1beta.T,Hi,self.psi1,gamma_1) #dD_dpsi1
|
||||
|
||||
self._dL_dKmm = -0.5 * np.dot(Kmmipsi1,b_psi1_Ki) #dA_dKmm: logdet(self.beta_star)
|
||||
self._dL_dKmm += .5*(LBL_inv - Kmmi) + mdot(LBL_inv,psi1beta,Kmmipsi1.T) #dC_dKmm
|
||||
self._dL_dKmm += -.5 * mdot(Hi,self.psi1,gamma_1) #dD_dKmm
|
||||
|
||||
self._dpsi1_dtheta = 0
|
||||
self._dpsi1_dX = 0
|
||||
self._dKmm_dtheta = 0
|
||||
self._dKmm_dX = 0
|
||||
|
||||
self._dpsi1_dX_jkj = 0
|
||||
self._dpsi1_dtheta_jkj = 0
|
||||
|
||||
for i,V_n,alpha_n,gamma_n,gamma_k in zip(range(self.N),self.V_star,alpha,gamma_2,gamma_3):
|
||||
K_pp_K = np.dot(Kmmipsi1[:,i:(i+1)],Kmmipsi1[:,i:(i+1)].T)
|
||||
|
||||
#Diag_dpsi1 = Diag_dA_dpsi1: yT*beta_star*y + Diag_dC_dpsi1 +Diag_dD_dpsi1
|
||||
_dpsi1 = (-V_n**2 - alpha_n + 2.*gamma_k - gamma_n**2) * Kmmipsi1.T[i:(i+1),:]
|
||||
|
||||
#Diag_dKmm = Diag_dA_dKmm: yT*beta_star*y +Diag_dC_dKmm +Diag_dD_dKmm
|
||||
_dKmm = .5*(V_n**2 + alpha_n + gamma_n**2 - 2.*gamma_k) * K_pp_K #Diag_dD_dKmm
|
||||
|
||||
self._dpsi1_dtheta += self.kern.dK_dtheta(_dpsi1,self.X[i:i+1,:],self.Z)
|
||||
self._dKmm_dtheta += self.kern.dK_dtheta(_dKmm,self.Z)
|
||||
|
||||
self._dKmm_dX += 2.*self.kern.dK_dX(_dKmm ,self.Z)
|
||||
self._dpsi1_dX += self.kern.dK_dX(_dpsi1.T,self.Z,self.X[i:i+1,:])
|
||||
|
||||
# the partial derivative vector for the likelihood
|
||||
if self.likelihood.Nparams == 0:
|
||||
# save computation here.
|
||||
self.partial_for_likelihood = None
|
||||
elif self.likelihood.is_heteroscedastic:
|
||||
raise NotImplementedError, "heteroscedatic derivates not implemented"
|
||||
else:
|
||||
# likelihood is not heterscedatic
|
||||
dbstar_dnoise = self.likelihood.precision * (self.beta_star**2 * self.Diag0[:,None] - self.beta_star)
|
||||
Lmi_psi1 = mdot(self.Lmi,self.psi1)
|
||||
LBiLmipsi1 = np.dot(self.LBi,Lmi_psi1)
|
||||
aux_0 = np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1)
|
||||
aux_1 = self.likelihood.Y.T * np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1)
|
||||
aux_2 = np.dot(LBiLmipsi1.T,self._LBi_Lmi_psi1V)
|
||||
|
||||
dA_dnoise = 0.5 * self.D * (dbstar_dnoise/self.beta_star).sum() - 0.5 * self.D * np.sum(self.likelihood.Y**2 * dbstar_dnoise)
|
||||
dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T)
|
||||
dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T)
|
||||
|
||||
dD_dnoise_1 = mdot(self.V_star*LBiLmipsi1.T,LBiLmipsi1*dbstar_dnoise.T*self.likelihood.Y.T)
|
||||
alpha = mdot(LBiLmipsi1,self.V_star)
|
||||
alpha_ = mdot(LBiLmipsi1.T,alpha)
|
||||
dD_dnoise_2 = -0.5 * self.D * np.sum(alpha_**2 * dbstar_dnoise )
|
||||
|
||||
dD_dnoise_1 = mdot(self.V_star.T,self.psi1.T,self.Lmi.T,self.LBi.T,self.LBi,self.Lmi,self.psi1,dbstar_dnoise*self.likelihood.Y)
|
||||
dD_dnoise_2 = 0.5*mdot(self.V_star.T,self.psi1.T,Hi,self.psi1,dbstar_dnoise*self.psi1.T,Hi,self.psi1,self.V_star)
|
||||
dD_dnoise = dD_dnoise_1 + dD_dnoise_2
|
||||
|
||||
self.partial_for_likelihood = dA_dnoise + dC_dnoise + dD_dnoise
|
||||
|
||||
def log_likelihood(self):
|
||||
""" Compute the (lower bound on the) log marginal likelihood """
|
||||
A = -0.5 * self.N * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y)
|
||||
C = -self.output_dim * (np.sum(np.log(np.diag(self.LB))))
|
||||
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||
return A + C + D
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
pass
|
||||
return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood)))
|
||||
|
||||
def dL_dtheta(self):
|
||||
if self.has_uncertain_inputs:
|
||||
raise NotImplementedError, "FITC approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
dL_dtheta = self.kern.dKdiag_dtheta(self._dL_dpsi0,self.X)
|
||||
dL_dtheta += self.kern.dK_dtheta(self._dL_dpsi1,self.X,self.Z)
|
||||
dL_dtheta += self.kern.dK_dtheta(self._dL_dKmm,X=self.Z)
|
||||
dL_dtheta += self._dKmm_dtheta
|
||||
dL_dtheta += self._dpsi1_dtheta
|
||||
return dL_dtheta
|
||||
|
||||
def dL_dZ(self):
|
||||
if self.has_uncertain_inputs:
|
||||
raise NotImplementedError, "FITC approximation not implemented for uncertain inputs"
|
||||
else:
|
||||
dL_dZ = self.kern.dK_dX(self._dL_dpsi1.T,self.Z,self.X)
|
||||
dL_dZ += 2. * self.kern.dK_dX(self._dL_dKmm,X=self.Z)
|
||||
dL_dZ += self._dpsi1_dX
|
||||
dL_dZ += self._dKmm_dX
|
||||
return dL_dZ
|
||||
|
||||
def _raw_predict(self, Xnew, which_parts, full_cov=False):
|
||||
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
Iplus_Dprod_i = 1./(1.+ self.Diag0 * self.likelihood.precision.flatten())
|
||||
self.Diag = self.Diag0 * Iplus_Dprod_i
|
||||
self.P = Iplus_Dprod_i[:,None] * self.psi1.T
|
||||
self.RPT0 = np.dot(self.Lmi,self.psi1)
|
||||
self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T))
|
||||
self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1)
|
||||
self.RPT = np.dot(self.R,self.P.T)
|
||||
self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT)
|
||||
self.w = self.Diag * self.likelihood.v_tilde
|
||||
self.Gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde))
|
||||
self.mu = self.w + np.dot(self.P,self.Gamma)
|
||||
|
||||
"""
|
||||
Make a prediction for the generalized FITC model
|
||||
|
||||
Arguments
|
||||
---------
|
||||
X : Input prediction data - Nx1 numpy array (floats)
|
||||
"""
|
||||
# q(u|f) = N(u| R0i*mu_u*f, R0i*C*R0i.T)
|
||||
|
||||
# Ci = I + (RPT0)Di(RPT0).T
|
||||
# C = I - [RPT0] * (D+[RPT0].T*[RPT0])^-1*[RPT0].T
|
||||
# = I - [RPT0] * (D + self.Qnn)^-1 * [RPT0].T
|
||||
# = I - [RPT0] * (U*U.T)^-1 * [RPT0].T
|
||||
# = I - V.T * V
|
||||
U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn)
|
||||
V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1)
|
||||
C = np.eye(self.M) - np.dot(V.T,V)
|
||||
mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:])
|
||||
#self.C = C
|
||||
#self.RPT0 = np.dot(self.R0,self.Knm.T) P0.T
|
||||
#self.mu_u = mu_u
|
||||
#self.U = U
|
||||
# q(u|y) = N(u| R0i*mu_H,R0i*Sigma_H*R0i.T)
|
||||
mu_H = np.dot(mu_u,self.mu)
|
||||
self.mu_H = mu_H
|
||||
Sigma_H = C + np.dot(mu_u,np.dot(self.Sigma,mu_u.T))
|
||||
# q(f_star|y) = N(f_star|mu_star,sigma2_star)
|
||||
Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts)
|
||||
KR0T = np.dot(Kx.T,self.Lmi.T)
|
||||
mu_star = np.dot(KR0T,mu_H)
|
||||
if full_cov:
|
||||
Kxx = self.kern.K(Xnew,which_parts=which_parts)
|
||||
var = Kxx + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T))
|
||||
else:
|
||||
Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts)
|
||||
var = (Kxx + np.sum(KR0T.T*np.dot(Sigma_H - np.eye(self.M),KR0T.T),0))[:,None]
|
||||
return mu_star[:,None],var
|
||||
else:
|
||||
raise NotImplementedError, "homoscedastic FITC not implemented"
|
||||
"""
|
||||
Kx = self.kern.K(self.Z, Xnew)
|
||||
mu = mdot(Kx.T, self.C/self.scale_factor, self.psi1V)
|
||||
if full_cov:
|
||||
Kxx = self.kern.K(Xnew)
|
||||
var = Kxx - mdot(Kx.T, (self.Kmmi - self.C/self.scale_factor**2), Kx) #NOTE this won't work for plotting
|
||||
else:
|
||||
Kxx = self.kern.Kdiag(Xnew)
|
||||
var = Kxx - np.sum(Kx*np.dot(self.Kmmi - self.C/self.scale_factor**2, Kx),0)
|
||||
return mu,var[:,None]
|
||||
"""
|
||||
|
|
@ -89,7 +89,7 @@ class GP(GPBase):
|
|||
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
|
||||
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
|
||||
"""
|
||||
return -0.5 * self.input_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z
|
||||
return -0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z
|
||||
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
|
|
|
|||
|
|
@ -224,14 +224,10 @@ class model(parameterised):
|
|||
for s in positive_strings:
|
||||
for i in self.grep_param_names(".*"+s):
|
||||
if not (i in currently_constrained):
|
||||
#to_make_positive.append(re.escape(param_names[i]))
|
||||
to_make_positive.append(i)
|
||||
if len(to_make_positive):
|
||||
#self.constrain_positive('(' + '|'.join(to_make_positive) + ')')
|
||||
self.constrain_positive(np.asarray(to_make_positive))
|
||||
|
||||
|
||||
|
||||
def objective_function(self, x):
|
||||
"""
|
||||
The objective function passed to the optimizer. It combines the likelihood and the priors.
|
||||
|
|
|
|||
|
|
@ -142,17 +142,17 @@ class SparseGP(GPBase):
|
|||
def log_likelihood(self):
|
||||
""" Compute the (lower bound on the) log marginal likelihood """
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
A = -0.5 * self.N * self.input_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
|
||||
B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
|
||||
A = -0.5 * self.N * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
|
||||
B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
|
||||
else:
|
||||
A = -0.5 * self.N * self.input_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
||||
B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A))
|
||||
C = -self.input_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))
|
||||
A = -0.5 * self.N * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
||||
B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A))
|
||||
C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))
|
||||
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||
return A + B + C + D + self.likelihood.Z
|
||||
|
||||
def _set_params(self, p):
|
||||
self.Z = p[:self.num_inducing * self.input_dim].reshape(self.num_inducing, self.input_dim)
|
||||
self.Z = p[:self.num_inducing * self.output_dim].reshape(self.num_inducing, self.input_dim)
|
||||
self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam])
|
||||
self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:])
|
||||
self._compute_kernel_matrices()
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import numpy as np
|
|||
from matplotlib import pyplot as plt
|
||||
|
||||
import GPy
|
||||
from GPy.util.datasets import swiss_roll_generated
|
||||
from GPy.core.transformations import logexp
|
||||
from GPy.models.bayesian_gplvm import BayesianGPLVM
|
||||
|
||||
|
|
@ -64,7 +63,7 @@ def GPLVM_oil_100(optimize=True):
|
|||
return m
|
||||
|
||||
def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False):
|
||||
from GPy.util.datasets import swiss_roll
|
||||
from GPy.util.datasets import swiss_roll_generated
|
||||
from GPy.core.transformations import logexp_clipped
|
||||
|
||||
data = swiss_roll_generated(N=N, sigma=sigma)
|
||||
|
|
@ -109,10 +108,10 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False
|
|||
m.data_colors = c
|
||||
m.data_t = t
|
||||
|
||||
m.constrain('variance|length', logexp_clipped())
|
||||
m['lengthscale'] = 1. # X.var(0).max() / X.var(0)
|
||||
m['noise'] = Y.var() / 100.
|
||||
m.ensure_default_constraints()
|
||||
m['rbf_lengthscale'] = 1. # X.var(0).max() / X.var(0)
|
||||
m['noise_variance'] = Y.var() / 100.
|
||||
m['bias_variance'] = 0.05
|
||||
|
||||
if optimize:
|
||||
m.optimize('scg', messages=1)
|
||||
|
|
|
|||
|
|
@ -159,13 +159,13 @@ def coregionalisation_sparse(optim_iters=100):
|
|||
k = k1.prod(k2,tensor=True) + GPy.kern.white(2,0.001)
|
||||
|
||||
m = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=Z)
|
||||
m.scale_factor = 10000.
|
||||
m.constrain_fixed('.*rbf_var',1.)
|
||||
#m.constrain_positive('kappa')
|
||||
m.constrain_fixed('iip')
|
||||
m.constrain_bounded('noise_variance',1e-3,1e-1)
|
||||
m.ensure_default_constraints()
|
||||
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
|
||||
|
||||
#plotting:
|
||||
pb.figure()
|
||||
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
||||
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
|
||||
|
|
@ -300,7 +300,6 @@ def sparse_GP_regression_2D(N = 400, num_inducing = 50, optim_iters=100):
|
|||
m.checkgrad()
|
||||
|
||||
# optimize and plot
|
||||
pb.figure()
|
||||
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
|
||||
m.plot()
|
||||
print(m)
|
||||
|
|
|
|||
|
|
@ -14,10 +14,10 @@ class Matern32(kernpart):
|
|||
|
||||
.. math::
|
||||
|
||||
k(r) = \\sigma^2 (1 + \\sqrt{3} r) \exp(- \sqrt{3} r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^D \\frac{(x_i-y_i)^2}{\ell_i^2} }
|
||||
k(r) = \\sigma^2 (1 + \\sqrt{3} r) \exp(- \sqrt{3} r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} }
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance: the variance :math:`\sigma^2`
|
||||
:type variance: float
|
||||
:param lengthscale: the vector of lengthscale :math:`\ell_i`
|
||||
|
|
@ -28,8 +28,8 @@ class Matern32(kernpart):
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self,D,variance=1.,lengthscale=None,ARD=False):
|
||||
self.D = D
|
||||
def __init__(self,input_dim,variance=1.,lengthscale=None,ARD=False):
|
||||
self.input_dim = input_dim
|
||||
self.ARD = ARD
|
||||
if ARD == False:
|
||||
self.Nparam = 2
|
||||
|
|
@ -40,13 +40,13 @@ class Matern32(kernpart):
|
|||
else:
|
||||
lengthscale = np.ones(1)
|
||||
else:
|
||||
self.Nparam = self.D + 1
|
||||
self.Nparam = self.input_dim + 1
|
||||
self.name = 'Mat32'
|
||||
if lengthscale is not None:
|
||||
lengthscale = np.asarray(lengthscale)
|
||||
assert lengthscale.size == self.D, "bad number of lengthscales"
|
||||
assert lengthscale.size == self.input_dim, "bad number of lengthscales"
|
||||
else:
|
||||
lengthscale = np.ones(self.D)
|
||||
lengthscale = np.ones(self.input_dim)
|
||||
self._set_params(np.hstack((variance,lengthscale.flatten())))
|
||||
|
||||
def _get_params(self):
|
||||
|
|
@ -111,7 +111,7 @@ class Matern32(kernpart):
|
|||
|
||||
def Gram_matrix(self,F,F1,F2,lower,upper):
|
||||
"""
|
||||
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to D=1.
|
||||
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1.
|
||||
|
||||
:param F: vector of functions
|
||||
:type F: np.array
|
||||
|
|
@ -122,7 +122,7 @@ class Matern32(kernpart):
|
|||
:param lower,upper: boundaries of the input domain
|
||||
:type lower,upper: floats
|
||||
"""
|
||||
assert self.D == 1
|
||||
assert self.input_dim == 1
|
||||
def L(x,i):
|
||||
return(3./self.lengthscale**2*F[i](x) + 2*np.sqrt(3)/self.lengthscale*F1[i](x) + F2[i](x))
|
||||
n = F.shape[0]
|
||||
|
|
|
|||
|
|
@ -13,10 +13,10 @@ class Matern52(kernpart):
|
|||
|
||||
.. math::
|
||||
|
||||
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^D \\frac{(x_i-y_i)^2}{\ell_i^2} }
|
||||
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} }
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance: the variance :math:`\sigma^2`
|
||||
:type variance: float
|
||||
:param lengthscale: the vector of lengthscale :math:`\ell_i`
|
||||
|
|
@ -26,8 +26,8 @@ class Matern52(kernpart):
|
|||
:rtype: kernel object
|
||||
|
||||
"""
|
||||
def __init__(self,D,variance=1.,lengthscale=None,ARD=False):
|
||||
self.D = D
|
||||
def __init__(self,input_dim,variance=1.,lengthscale=None,ARD=False):
|
||||
self.input_dim = input_dim
|
||||
self.ARD = ARD
|
||||
if ARD == False:
|
||||
self.Nparam = 2
|
||||
|
|
@ -38,13 +38,13 @@ class Matern52(kernpart):
|
|||
else:
|
||||
lengthscale = np.ones(1)
|
||||
else:
|
||||
self.Nparam = self.D + 1
|
||||
self.Nparam = self.input_dim + 1
|
||||
self.name = 'Mat52'
|
||||
if lengthscale is not None:
|
||||
lengthscale = np.asarray(lengthscale)
|
||||
assert lengthscale.size == self.D, "bad number of lengthscales"
|
||||
assert lengthscale.size == self.input_dim, "bad number of lengthscales"
|
||||
else:
|
||||
lengthscale = np.ones(self.D)
|
||||
lengthscale = np.ones(self.input_dim)
|
||||
self._set_params(np.hstack((variance,lengthscale.flatten())))
|
||||
|
||||
def _get_params(self):
|
||||
|
|
@ -109,7 +109,7 @@ class Matern52(kernpart):
|
|||
|
||||
def Gram_matrix(self,F,F1,F2,F3,lower,upper):
|
||||
"""
|
||||
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to D=1.
|
||||
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1.
|
||||
|
||||
:param F: vector of functions
|
||||
:type F: np.array
|
||||
|
|
@ -122,7 +122,7 @@ class Matern52(kernpart):
|
|||
:param lower,upper: boundaries of the input domain
|
||||
:type lower,upper: floats
|
||||
"""
|
||||
assert self.D == 1
|
||||
assert self.input_dim == 1
|
||||
def L(x,i):
|
||||
return(5*np.sqrt(5)/self.lengthscale**3*F[i](x) + 15./self.lengthscale**2*F1[i](x)+ 3*np.sqrt(5)/self.lengthscale*F2[i](x) + F3[i](x))
|
||||
n = F.shape[0]
|
||||
|
|
|
|||
|
|
@ -9,14 +9,14 @@ from GPy.util.decorators import silence_errors
|
|||
|
||||
class periodic_Matern32(kernpart):
|
||||
"""
|
||||
Kernel of the periodic subspace (up to a given frequency) of a Matern 3/2 RKHS. Only defined for D=1.
|
||||
Kernel of the periodic subspace (up to a given frequency) of a Matern 3/2 RKHS. Only defined for input_dim=1.
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the Matern kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the Matern kernel
|
||||
:type lengthscale: np.ndarray of size (D,)
|
||||
:type lengthscale: np.ndarray of size (input_dim,)
|
||||
:param period: the period
|
||||
:type period: float
|
||||
:param n_freq: the number of frequencies considered for the periodic subspace
|
||||
|
|
@ -25,10 +25,10 @@ class periodic_Matern32(kernpart):
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self,D=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi):
|
||||
assert D==1, "Periodic kernels are only defined for D=1"
|
||||
def __init__(self,input_dim=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi):
|
||||
assert input_dim==1, "Periodic kernels are only defined for input_dim=1"
|
||||
self.name = 'periodic_Mat32'
|
||||
self.D = D
|
||||
self.input_dim = input_dim
|
||||
if lengthscale is not None:
|
||||
lengthscale = np.asarray(lengthscale)
|
||||
assert lengthscale.size == 1, "Wrong size: only one lengthscale needed"
|
||||
|
|
|
|||
|
|
@ -9,14 +9,14 @@ from GPy.util.decorators import silence_errors
|
|||
|
||||
class periodic_Matern52(kernpart):
|
||||
"""
|
||||
Kernel of the periodic subspace (up to a given frequency) of a Matern 5/2 RKHS. Only defined for D=1.
|
||||
Kernel of the periodic subspace (up to a given frequency) of a Matern 5/2 RKHS. Only defined for input_dim=1.
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the Matern kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the Matern kernel
|
||||
:type lengthscale: np.ndarray of size (D,)
|
||||
:type lengthscale: np.ndarray of size (input_dim,)
|
||||
:param period: the period
|
||||
:type period: float
|
||||
:param n_freq: the number of frequencies considered for the periodic subspace
|
||||
|
|
@ -25,10 +25,10 @@ class periodic_Matern52(kernpart):
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self,D=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi):
|
||||
assert D==1, "Periodic kernels are only defined for D=1"
|
||||
def __init__(self,input_dim=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi):
|
||||
assert input_dim==1, "Periodic kernels are only defined for input_dim=1"
|
||||
self.name = 'periodic_Mat52'
|
||||
self.D = D
|
||||
self.input_dim = input_dim
|
||||
if lengthscale is not None:
|
||||
lengthscale = np.asarray(lengthscale)
|
||||
assert lengthscale.size == 1, "Wrong size: only one lengthscale needed"
|
||||
|
|
|
|||
|
|
@ -9,14 +9,14 @@ from GPy.util.decorators import silence_errors
|
|||
|
||||
class periodic_exponential(kernpart):
|
||||
"""
|
||||
Kernel of the periodic subspace (up to a given frequency) of a exponential (Matern 1/2) RKHS. Only defined for D=1.
|
||||
Kernel of the periodic subspace (up to a given frequency) of a exponential (Matern 1/2) RKHS. Only defined for input_dim=1.
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the Matern kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the Matern kernel
|
||||
:type lengthscale: np.ndarray of size (D,)
|
||||
:type lengthscale: np.ndarray of size (input_dim,)
|
||||
:param period: the period
|
||||
:type period: float
|
||||
:param n_freq: the number of frequencies considered for the periodic subspace
|
||||
|
|
@ -25,10 +25,10 @@ class periodic_exponential(kernpart):
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self,D=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi):
|
||||
assert D==1, "Periodic kernels are only defined for D=1"
|
||||
def __init__(self,input_dim=1,variance=1.,lengthscale=None,period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi):
|
||||
assert input_dim==1, "Periodic kernels are only defined for input_dim=1"
|
||||
self.name = 'periodic_exp'
|
||||
self.D = D
|
||||
self.input_dim = input_dim
|
||||
if lengthscale is not None:
|
||||
lengthscale = np.asarray(lengthscale)
|
||||
assert lengthscale.size == 1, "Wrong size: only one lengthscale needed"
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ class prod_orthogonal(kernpart):
|
|||
|
||||
"""
|
||||
def __init__(self,k1,k2):
|
||||
self.D = k1.D + k2.D
|
||||
self.input_dim = k1.input_dim + k2.input_dim
|
||||
self.Nparam = k1.Nparam + k2.Nparam
|
||||
self.name = k1.name + '<times>' + k2.name
|
||||
self.k1 = k1
|
||||
|
|
@ -45,42 +45,42 @@ class prod_orthogonal(kernpart):
|
|||
"""derivative of the covariance matrix with respect to the parameters."""
|
||||
self._K_computations(X,X2)
|
||||
if X2 is None:
|
||||
self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.D], None, target[:self.k1.Nparam])
|
||||
self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.D:], None, target[self.k1.Nparam:])
|
||||
self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.input_dim], None, target[:self.k1.Nparam])
|
||||
self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.input_dim:], None, target[self.k1.Nparam:])
|
||||
else:
|
||||
self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.D], X2[:,:self.k1.D], target[:self.k1.Nparam])
|
||||
self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.D:], X2[:,self.k1.D:], target[self.k1.Nparam:])
|
||||
self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.input_dim], X2[:,:self.k1.input_dim], target[:self.k1.Nparam])
|
||||
self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.input_dim:], X2[:,self.k1.input_dim:], target[self.k1.Nparam:])
|
||||
|
||||
def Kdiag(self,X,target):
|
||||
"""Compute the diagonal of the covariance matrix associated to X."""
|
||||
target1 = np.zeros(X.shape[0])
|
||||
target2 = np.zeros(X.shape[0])
|
||||
self.k1.Kdiag(X[:,:self.k1.D],target1)
|
||||
self.k2.Kdiag(X[:,self.k1.D:],target2)
|
||||
self.k1.Kdiag(X[:,:self.k1.input_dim],target1)
|
||||
self.k2.Kdiag(X[:,self.k1.input_dim:],target2)
|
||||
target += target1 * target2
|
||||
|
||||
def dKdiag_dtheta(self,dL_dKdiag,X,target):
|
||||
K1 = np.zeros(X.shape[0])
|
||||
K2 = np.zeros(X.shape[0])
|
||||
self.k1.Kdiag(X[:,:self.k1.D],K1)
|
||||
self.k2.Kdiag(X[:,self.k1.D:],K2)
|
||||
self.k1.dKdiag_dtheta(dL_dKdiag*K2,X[:,:self.k1.D],target[:self.k1.Nparam])
|
||||
self.k2.dKdiag_dtheta(dL_dKdiag*K1,X[:,self.k1.D:],target[self.k1.Nparam:])
|
||||
self.k1.Kdiag(X[:,:self.k1.input_dim],K1)
|
||||
self.k2.Kdiag(X[:,self.k1.input_dim:],K2)
|
||||
self.k1.dKdiag_dtheta(dL_dKdiag*K2,X[:,:self.k1.input_dim],target[:self.k1.Nparam])
|
||||
self.k2.dKdiag_dtheta(dL_dKdiag*K1,X[:,self.k1.input_dim:],target[self.k1.Nparam:])
|
||||
|
||||
def dK_dX(self,dL_dK,X,X2,target):
|
||||
"""derivative of the covariance matrix with respect to X."""
|
||||
self._K_computations(X,X2)
|
||||
self.k1.dK_dX(dL_dK*self._K2, X[:,:self.k1.D], X2[:,:self.k1.D], target)
|
||||
self.k2.dK_dX(dL_dK*self._K1, X[:,self.k1.D:], X2[:,self.k1.D:], target)
|
||||
self.k1.dK_dX(dL_dK*self._K2, X[:,:self.k1.input_dim], X2[:,:self.k1.input_dim], target)
|
||||
self.k2.dK_dX(dL_dK*self._K1, X[:,self.k1.input_dim:], X2[:,self.k1.input_dim:], target)
|
||||
|
||||
def dKdiag_dX(self, dL_dKdiag, X, target):
|
||||
K1 = np.zeros(X.shape[0])
|
||||
K2 = np.zeros(X.shape[0])
|
||||
self.k1.Kdiag(X[:,0:self.k1.D],K1)
|
||||
self.k2.Kdiag(X[:,self.k1.D:],K2)
|
||||
self.k1.Kdiag(X[:,0:self.k1.input_dim],K1)
|
||||
self.k2.Kdiag(X[:,self.k1.input_dim:],K2)
|
||||
|
||||
self.k1.dK_dX(dL_dKdiag*K2, X[:,:self.k1.D], target)
|
||||
self.k2.dK_dX(dL_dKdiag*K1, X[:,self.k1.D:], target)
|
||||
self.k1.dK_dX(dL_dKdiag*K2, X[:,:self.k1.input_dim], target)
|
||||
self.k2.dK_dX(dL_dKdiag*K1, X[:,self.k1.input_dim:], target)
|
||||
|
||||
def _K_computations(self,X,X2):
|
||||
if not (np.array_equal(X,self._X) and np.array_equal(X2,self._X2) and np.array_equal(self._params , self._get_params())):
|
||||
|
|
@ -90,12 +90,12 @@ class prod_orthogonal(kernpart):
|
|||
self._X2 = None
|
||||
self._K1 = np.zeros((X.shape[0],X.shape[0]))
|
||||
self._K2 = np.zeros((X.shape[0],X.shape[0]))
|
||||
self.k1.K(X[:,:self.k1.D],None,self._K1)
|
||||
self.k2.K(X[:,self.k1.D:],None,self._K2)
|
||||
self.k1.K(X[:,:self.k1.input_dim],None,self._K1)
|
||||
self.k2.K(X[:,self.k1.input_dim:],None,self._K2)
|
||||
else:
|
||||
self._X2 = X2.copy()
|
||||
self._K1 = np.zeros((X.shape[0],X2.shape[0]))
|
||||
self._K2 = np.zeros((X.shape[0],X2.shape[0]))
|
||||
self.k1.K(X[:,:self.k1.D],X2[:,:self.k1.D],self._K1)
|
||||
self.k2.K(X[:,self.k1.D:],X2[:,self.k1.D:],self._K2)
|
||||
self.k1.K(X[:,:self.k1.input_dim],X2[:,:self.k1.input_dim],self._K1)
|
||||
self.k2.K(X[:,self.k1.input_dim:],X2[:,self.k1.input_dim:],self._K2)
|
||||
|
||||
|
|
|
|||
|
|
@ -13,8 +13,8 @@ class rational_quadratic(kernpart):
|
|||
|
||||
k(r) = \sigma^2 \\bigg( 1 + \\frac{r^2}{2 \ell^2} \\bigg)^{- \\alpha} \ \ \ \ \ \\text{ where } r^2 = (x-y)^2
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int (D=1 is the only value currently supported)
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int (input_dim=1 is the only value currently supported)
|
||||
:param variance: the variance :math:`\sigma^2`
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale :math:`\ell`
|
||||
|
|
@ -24,9 +24,9 @@ class rational_quadratic(kernpart):
|
|||
:rtype: kernpart object
|
||||
|
||||
"""
|
||||
def __init__(self,D,variance=1.,lengthscale=1.,power=1.):
|
||||
assert D == 1, "For this kernel we assume D=1"
|
||||
self.D = D
|
||||
def __init__(self,input_dim,variance=1.,lengthscale=1.,power=1.):
|
||||
assert input_dim == 1, "For this kernel we assume input_dim=1"
|
||||
self.input_dim = input_dim
|
||||
self.Nparam = 3
|
||||
self.name = 'rat_quad'
|
||||
self.variance = variance
|
||||
|
|
|
|||
187
GPy/kern/rbf.py
187
GPy/kern/rbf.py
|
|
@ -31,7 +31,7 @@ class rbf(kernpart):
|
|||
.. Note: this object implements both the ARD and 'spherical' version of the function
|
||||
"""
|
||||
|
||||
def __init__(self,input_dim,variance=1.,lengthscale=None,ARD=False):
|
||||
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False):
|
||||
self.input_dim = input_dim
|
||||
self.name = 'rbf'
|
||||
self.ARD = ARD
|
||||
|
|
@ -50,52 +50,52 @@ class rbf(kernpart):
|
|||
else:
|
||||
lengthscale = np.ones(self.input_dim)
|
||||
|
||||
self._set_params(np.hstack((variance,lengthscale.flatten())))
|
||||
self._set_params(np.hstack((variance, lengthscale.flatten())))
|
||||
|
||||
#initialize cache
|
||||
self._Z, self._mu, self._S = np.empty(shape=(3,1))
|
||||
self._X, self._X2, self._params = np.empty(shape=(3,1))
|
||||
# initialize cache
|
||||
self._Z, self._mu, self._S = np.empty(shape=(3, 1))
|
||||
self._X, self._X2, self._params = np.empty(shape=(3, 1))
|
||||
|
||||
#a set of optional args to pass to weave
|
||||
# a set of optional args to pass to weave
|
||||
self.weave_options = {'headers' : ['<omp.h>'],
|
||||
'extra_compile_args': ['-fopenmp -O3'], #-march=native'],
|
||||
'extra_compile_args': ['-fopenmp -O3'], # -march=native'],
|
||||
'extra_link_args' : ['-lgomp']}
|
||||
|
||||
|
||||
|
||||
def _get_params(self):
|
||||
return np.hstack((self.variance,self.lengthscale))
|
||||
return np.hstack((self.variance, self.lengthscale))
|
||||
|
||||
def _set_params(self,x):
|
||||
assert x.size==(self.Nparam)
|
||||
def _set_params(self, x):
|
||||
assert x.size == (self.Nparam)
|
||||
self.variance = x[0]
|
||||
self.lengthscale = x[1:]
|
||||
self.lengthscale2 = np.square(self.lengthscale)
|
||||
#reset cached results
|
||||
self._X, self._X2, self._params = np.empty(shape=(3,1))
|
||||
self._Z, self._mu, self._S = np.empty(shape=(3,1)) # cached versions of Z,mu,S
|
||||
# reset cached results
|
||||
self._X, self._X2, self._params = np.empty(shape=(3, 1))
|
||||
self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S
|
||||
|
||||
def _get_param_names(self):
|
||||
if self.Nparam == 2:
|
||||
return ['variance','lengthscale']
|
||||
return ['variance', 'lengthscale']
|
||||
else:
|
||||
return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscale.size)]
|
||||
return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)]
|
||||
|
||||
def K(self,X,X2,target):
|
||||
self._K_computations(X,X2)
|
||||
target += self.variance*self._K_dvar
|
||||
def K(self, X, X2, target):
|
||||
self._K_computations(X, X2)
|
||||
target += self.variance * self._K_dvar
|
||||
|
||||
def Kdiag(self,X,target):
|
||||
np.add(target,self.variance,target)
|
||||
def Kdiag(self, X, target):
|
||||
np.add(target, self.variance, target)
|
||||
|
||||
def dK_dtheta(self,dL_dK,X,X2,target):
|
||||
self._K_computations(X,X2)
|
||||
target[0] += np.sum(self._K_dvar*dL_dK)
|
||||
def dK_dtheta(self, dL_dK, X, X2, target):
|
||||
self._K_computations(X, X2)
|
||||
target[0] += np.sum(self._K_dvar * dL_dK)
|
||||
if self.ARD:
|
||||
dvardLdK = self._K_dvar*dL_dK
|
||||
var_len3 = self.variance/np.power(self.lengthscale,3)
|
||||
dvardLdK = self._K_dvar * dL_dK
|
||||
var_len3 = self.variance / np.power(self.lengthscale, 3)
|
||||
if X2 is None:
|
||||
#save computation for the symmetrical case
|
||||
# save computation for the symmetrical case
|
||||
dvardLdK += dvardLdK.T
|
||||
code = """
|
||||
int q,i,j;
|
||||
|
|
@ -126,23 +126,23 @@ class rbf(kernpart):
|
|||
}
|
||||
"""
|
||||
N, num_inducing, input_dim = X.shape[0], X2.shape[0], self.input_dim
|
||||
#[np.add(target[1+q:2+q],var_len3[q]*np.sum(dvardLdK*np.square(X[:,q][:,None]-X2[:,q][None,:])),target[1+q:2+q]) for q in range(self.input_dim)]
|
||||
# [np.add(target[1+q:2+q],var_len3[q]*np.sum(dvardLdK*np.square(X[:,q][:,None]-X2[:,q][None,:])),target[1+q:2+q]) for q in range(self.input_dim)]
|
||||
weave.inline(code, arg_names=['N','num_inducing','input_dim','X','X2','target','dvardLdK','var_len3'],
|
||||
type_converters=weave.converters.blitz,**self.weave_options)
|
||||
type_converters=weave.converters.blitz, **self.weave_options)
|
||||
else:
|
||||
target[1] += (self.variance/self.lengthscale)*np.sum(self._K_dvar*self._K_dist2*dL_dK)
|
||||
target[1] += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK)
|
||||
|
||||
def dKdiag_dtheta(self,dL_dKdiag,X,target):
|
||||
#NB: derivative of diagonal elements wrt lengthscale is 0
|
||||
def dKdiag_dtheta(self, dL_dKdiag, X, target):
|
||||
# NB: derivative of diagonal elements wrt lengthscale is 0
|
||||
target[0] += np.sum(dL_dKdiag)
|
||||
|
||||
def dK_dX(self,dL_dK,X,X2,target):
|
||||
self._K_computations(X,X2)
|
||||
_K_dist = X[:,None,:]-X2[None,:,:] #don't cache this in _K_computations because it is high memory. If this function is being called, chances are we're not in the high memory arena.
|
||||
dK_dX = (-self.variance/self.lengthscale2)*np.transpose(self._K_dvar[:,:,np.newaxis]*_K_dist,(1,0,2))
|
||||
target += np.sum(dK_dX*dL_dK.T[:,:,None],0)
|
||||
def dK_dX(self, dL_dK, X, X2, target):
|
||||
self._K_computations(X, X2)
|
||||
_K_dist = X[:, None, :] - X2[None, :, :] # don't cache this in _K_computations because it is high memory. If this function is being called, chances are we're not in the high memory arena.
|
||||
dK_dX = (-self.variance / self.lengthscale2) * np.transpose(self._K_dvar[:, :, np.newaxis] * _K_dist, (1, 0, 2))
|
||||
target += np.sum(dK_dX * dL_dK.T[:, :, None], 0)
|
||||
|
||||
def dKdiag_dX(self,dL_dKdiag,X,target):
|
||||
def dKdiag_dX(self, dL_dKdiag, X, target):
|
||||
pass
|
||||
|
||||
|
||||
|
|
@ -150,96 +150,95 @@ class rbf(kernpart):
|
|||
# PSI statistics #
|
||||
#---------------------------------------#
|
||||
|
||||
def psi0(self,Z,mu,S,target):
|
||||
def psi0(self, Z, mu, S, target):
|
||||
target += self.variance
|
||||
|
||||
def dpsi0_dtheta(self,dL_dpsi0,Z,mu,S,target):
|
||||
def dpsi0_dtheta(self, dL_dpsi0, Z, mu, S, target):
|
||||
target[0] += np.sum(dL_dpsi0)
|
||||
|
||||
def dpsi0_dmuS(self,dL_dpsi0,Z,mu,S,target_mu,target_S):
|
||||
def dpsi0_dmuS(self, dL_dpsi0, Z, mu, S, target_mu, target_S):
|
||||
pass
|
||||
|
||||
def psi1(self,Z,mu,S,target):
|
||||
self._psi_computations(Z,mu,S)
|
||||
def psi1(self, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
target += self._psi1
|
||||
|
||||
def dpsi1_dtheta(self,dL_dpsi1,Z,mu,S,target):
|
||||
self._psi_computations(Z,mu,S)
|
||||
denom_deriv = S[:,None,:]/(self.lengthscale**3+self.lengthscale*S[:,None,:])
|
||||
d_length = self._psi1[:,:,None]*(self.lengthscale*np.square(self._psi1_dist/(self.lengthscale2+S[:,None,:])) + denom_deriv)
|
||||
target[0] += np.sum(dL_dpsi1*self._psi1/self.variance)
|
||||
dpsi1_dlength = d_length*dL_dpsi1[:,:,None]
|
||||
def dpsi1_dtheta(self, dL_dpsi1, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
denom_deriv = S[:, None, :] / (self.lengthscale ** 3 + self.lengthscale * S[:, None, :])
|
||||
d_length = self._psi1[:, :, None] * (self.lengthscale * np.square(self._psi1_dist / (self.lengthscale2 + S[:, None, :])) + denom_deriv)
|
||||
target[0] += np.sum(dL_dpsi1 * self._psi1 / self.variance)
|
||||
dpsi1_dlength = d_length * dL_dpsi1[:, :, None]
|
||||
if not self.ARD:
|
||||
target[1] += dpsi1_dlength.sum()
|
||||
else:
|
||||
target[1:] += dpsi1_dlength.sum(0).sum(0)
|
||||
|
||||
def dpsi1_dZ(self,dL_dpsi1,Z,mu,S,target):
|
||||
self._psi_computations(Z,mu,S)
|
||||
denominator = (self.lengthscale2*(self._psi1_denom))
|
||||
dpsi1_dZ = - self._psi1[:,:,None] * ((self._psi1_dist/denominator))
|
||||
target += np.sum(dL_dpsi1.T[:,:,None] * dpsi1_dZ, 0)
|
||||
def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
denominator = (self.lengthscale2 * (self._psi1_denom))
|
||||
dpsi1_dZ = -self._psi1[:, :, None] * ((self._psi1_dist / denominator))
|
||||
target += np.sum(dL_dpsi1.T[:, :, None] * dpsi1_dZ, 0)
|
||||
|
||||
def dpsi1_dmuS(self,dL_dpsi1,Z,mu,S,target_mu,target_S):
|
||||
self._psi_computations(Z,mu,S)
|
||||
tmp = self._psi1[:,:,None]/self.lengthscale2/self._psi1_denom
|
||||
target_mu += np.sum(dL_dpsi1.T[:, :, None]*tmp*self._psi1_dist,1)
|
||||
target_S += np.sum(dL_dpsi1.T[:, :, None]*0.5*tmp*(self._psi1_dist_sq-1),1)
|
||||
def dpsi1_dmuS(self, dL_dpsi1, Z, mu, S, target_mu, target_S):
|
||||
self._psi_computations(Z, mu, S)
|
||||
tmp = self._psi1[:, :, None] / self.lengthscale2 / self._psi1_denom
|
||||
target_mu += np.sum(dL_dpsi1.T[:, :, None] * tmp * self._psi1_dist, 1)
|
||||
target_S += np.sum(dL_dpsi1.T[:, :, None] * 0.5 * tmp * (self._psi1_dist_sq - 1), 1)
|
||||
|
||||
def psi2(self,Z,mu,S,target):
|
||||
self._psi_computations(Z,mu,S)
|
||||
def psi2(self, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
target += self._psi2
|
||||
|
||||
def dpsi2_dtheta(self,dL_dpsi2,Z,mu,S,target):
|
||||
def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S, target):
|
||||
"""Shape N,num_inducing,num_inducing,Ntheta"""
|
||||
self._psi_computations(Z,mu,S)
|
||||
d_var = 2.*self._psi2/self.variance
|
||||
d_length = 2.*self._psi2[:,:,:,None]*(self._psi2_Zdist_sq*self._psi2_denom + self._psi2_mudist_sq + S[:,None,None,:]/self.lengthscale2)/(self.lengthscale*self._psi2_denom)
|
||||
self._psi_computations(Z, mu, S)
|
||||
d_var = 2.*self._psi2 / self.variance
|
||||
d_length = 2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] / self.lengthscale2) / (self.lengthscale * self._psi2_denom)
|
||||
|
||||
target[0] += np.sum(dL_dpsi2*d_var)
|
||||
dpsi2_dlength = d_length*dL_dpsi2[:,:,:,None]
|
||||
target[0] += np.sum(dL_dpsi2 * d_var)
|
||||
dpsi2_dlength = d_length * dL_dpsi2[:, :, :, None]
|
||||
if not self.ARD:
|
||||
target[1] += dpsi2_dlength.sum()
|
||||
else:
|
||||
target[1:] += dpsi2_dlength.sum(0).sum(0).sum(0)
|
||||
|
||||
def dpsi2_dZ(self,dL_dpsi2,Z,mu,S,target):
|
||||
self._psi_computations(Z,mu,S)
|
||||
term1 = self._psi2_Zdist/self.lengthscale2 # num_inducing, num_inducing, input_dim
|
||||
term2 = self._psi2_mudist/self._psi2_denom/self.lengthscale2 # N, num_inducing, num_inducing, input_dim
|
||||
dZ = self._psi2[:,:,:,None] * (term1[None] + term2)
|
||||
target += (dL_dpsi2[:,:,:,None]*dZ).sum(0).sum(0)
|
||||
def dpsi2_dZ(self, dL_dpsi2, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
term1 = self._psi2_Zdist / self.lengthscale2 # num_inducing, num_inducing, input_dim
|
||||
term2 = self._psi2_mudist / self._psi2_denom / self.lengthscale2 # N, num_inducing, num_inducing, input_dim
|
||||
dZ = self._psi2[:, :, :, None] * (term1[None] + term2)
|
||||
target += (dL_dpsi2[:, :, :, None] * dZ).sum(0).sum(0)
|
||||
|
||||
def dpsi2_dmuS(self,dL_dpsi2,Z,mu,S,target_mu,target_S):
|
||||
def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S, target_mu, target_S):
|
||||
"""Think N,num_inducing,num_inducing,input_dim """
|
||||
self._psi_computations(Z,mu,S)
|
||||
tmp = self._psi2[:,:,:,None]/self.lengthscale2/self._psi2_denom
|
||||
target_mu += -2.*(dL_dpsi2[:,:,:,None]*tmp*self._psi2_mudist).sum(1).sum(1)
|
||||
target_S += (dL_dpsi2[:,:,:,None]*tmp*(2.*self._psi2_mudist_sq-1)).sum(1).sum(1)
|
||||
|
||||
self._psi_computations(Z, mu, S)
|
||||
tmp = self._psi2[:, :, :, None] / self.lengthscale2 / self._psi2_denom
|
||||
target_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * self._psi2_mudist).sum(1).sum(1)
|
||||
target_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*self._psi2_mudist_sq - 1)).sum(1).sum(1)
|
||||
|
||||
#---------------------------------------#
|
||||
# Precomputations #
|
||||
#---------------------------------------#
|
||||
|
||||
def _K_computations(self,X,X2):
|
||||
if not (np.array_equal(X,self._X) and np.array_equal(X2,self._X2) and np.array_equal(self._params , self._get_params())):
|
||||
def _K_computations(self, X, X2):
|
||||
if not (np.array_equal(X, self._X) and np.array_equal(X2, self._X2) and np.array_equal(self._params , self._get_params())):
|
||||
self._X = X.copy()
|
||||
self._params == self._get_params().copy()
|
||||
if X2 is None:
|
||||
self._X2 = None
|
||||
X = X/self.lengthscale
|
||||
Xsquare = np.sum(np.square(X),1)
|
||||
self._K_dist2 = -2.*tdot(X) + (Xsquare[:,None] + Xsquare[None,:])
|
||||
X = X / self.lengthscale
|
||||
Xsquare = np.sum(np.square(X), 1)
|
||||
self._K_dist2 = -2.*tdot(X) + (Xsquare[:, None] + Xsquare[None, :])
|
||||
else:
|
||||
self._X2 = X2.copy()
|
||||
X = X/self.lengthscale
|
||||
X2 = X2/self.lengthscale
|
||||
self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X),1)[:,None] + np.sum(np.square(X2),1)[None,:])
|
||||
self._K_dvar = np.exp(-0.5*self._K_dist2)
|
||||
X = X / self.lengthscale
|
||||
X2 = X2 / self.lengthscale
|
||||
self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X), 1)[:, None] + np.sum(np.square(X2), 1)[None, :])
|
||||
self._K_dvar = np.exp(-0.5 * self._K_dist2)
|
||||
|
||||
def _psi_computations(self,Z,mu,S):
|
||||
#here are the "statistics" for psi1 and psi2
|
||||
def _psi_computations(self, Z, mu, S):
|
||||
# here are the "statistics" for psi1 and psi2
|
||||
if not np.array_equal(Z, self._Z):
|
||||
#Z has changed, compute Z specific stuff
|
||||
self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # num_inducing,num_inducing,input_dim
|
||||
|
|
@ -278,13 +277,13 @@ class rbf(kernpart):
|
|||
psi2 = np.empty((N,num_inducing,num_inducing))
|
||||
|
||||
psi2_Zdist_sq = self._psi2_Zdist_sq
|
||||
_psi2_denom = self._psi2_denom.squeeze().reshape(N,self.input_dim)
|
||||
half_log_psi2_denom = 0.5*np.log(self._psi2_denom).squeeze().reshape(N,self.input_dim)
|
||||
_psi2_denom = self._psi2_denom.squeeze().reshape(N, self.input_dim)
|
||||
half_log_psi2_denom = 0.5 * np.log(self._psi2_denom).squeeze().reshape(N, self.input_dim)
|
||||
variance_sq = float(np.square(self.variance))
|
||||
if self.ARD:
|
||||
lengthscale2 = self.lengthscale2
|
||||
else:
|
||||
lengthscale2 = np.ones(input_dim)*self.lengthscale2
|
||||
lengthscale2 = np.ones(input_dim) * self.lengthscale2
|
||||
code = """
|
||||
double tmp;
|
||||
|
||||
|
|
@ -326,6 +325,6 @@ class rbf(kernpart):
|
|||
"""
|
||||
weave.inline(code, support_code=support_code, libraries=['gomp'],
|
||||
arg_names=['N','num_inducing','input_dim','mu','Zhat','mudist_sq','mudist','lengthscale2','_psi2_denom','psi2_Zdist_sq','psi2_exponent','half_log_psi2_denom','psi2','variance_sq'],
|
||||
type_converters=weave.converters.blitz,**self.weave_options)
|
||||
type_converters=weave.converters.blitz, **self.weave_options)
|
||||
|
||||
return mudist,mudist_sq, psi2_exponent, psi2
|
||||
return mudist, mudist_sq, psi2_exponent, psi2
|
||||
|
|
|
|||
|
|
@ -7,26 +7,26 @@ from kernpart import kernpart
|
|||
import numpy as np
|
||||
|
||||
class rbfcos(kernpart):
|
||||
def __init__(self,D,variance=1.,frequencies=None,bandwidths=None,ARD=False):
|
||||
self.D = D
|
||||
def __init__(self,input_dim,variance=1.,frequencies=None,bandwidths=None,ARD=False):
|
||||
self.input_dim = input_dim
|
||||
self.name = 'rbfcos'
|
||||
if self.D>10:
|
||||
if self.input_dim>10:
|
||||
print "Warning: the rbfcos kernel requires a lot of memory for high dimensional inputs"
|
||||
self.ARD = ARD
|
||||
|
||||
#set the default frequencies and bandwidths, appropriate Nparam
|
||||
if ARD:
|
||||
self.Nparam = 2*self.D + 1
|
||||
self.Nparam = 2*self.input_dim + 1
|
||||
if frequencies is not None:
|
||||
frequencies = np.asarray(frequencies)
|
||||
assert frequencies.size == self.D, "bad number of frequencies"
|
||||
assert frequencies.size == self.input_dim, "bad number of frequencies"
|
||||
else:
|
||||
frequencies = np.ones(self.D)
|
||||
frequencies = np.ones(self.input_dim)
|
||||
if bandwidths is not None:
|
||||
bandwidths = np.asarray(bandwidths)
|
||||
assert bandwidths.size == self.D, "bad number of bandwidths"
|
||||
assert bandwidths.size == self.input_dim, "bad number of bandwidths"
|
||||
else:
|
||||
bandwidths = np.ones(self.D)
|
||||
bandwidths = np.ones(self.input_dim)
|
||||
else:
|
||||
self.Nparam = 3
|
||||
if frequencies is not None:
|
||||
|
|
@ -54,8 +54,8 @@ class rbfcos(kernpart):
|
|||
assert x.size==(self.Nparam)
|
||||
if self.ARD:
|
||||
self.variance = x[0]
|
||||
self.frequencies = x[1:1+self.D]
|
||||
self.bandwidths = x[1+self.D:]
|
||||
self.frequencies = x[1:1+self.input_dim]
|
||||
self.bandwidths = x[1+self.input_dim:]
|
||||
else:
|
||||
self.variance, self.frequencies, self.bandwidths = x
|
||||
|
||||
|
|
@ -63,7 +63,7 @@ class rbfcos(kernpart):
|
|||
if self.Nparam == 3:
|
||||
return ['variance','frequency','bandwidth']
|
||||
else:
|
||||
return ['variance']+['frequency_%i'%i for i in range(self.D)]+['bandwidth_%i'%i for i in range(self.D)]
|
||||
return ['variance']+['frequency_%i'%i for i in range(self.input_dim)]+['bandwidth_%i'%i for i in range(self.input_dim)]
|
||||
|
||||
def K(self,X,X2,target):
|
||||
self._K_computations(X,X2)
|
||||
|
|
@ -76,9 +76,9 @@ class rbfcos(kernpart):
|
|||
self._K_computations(X,X2)
|
||||
target[0] += np.sum(dL_dK*self._dvar)
|
||||
if self.ARD:
|
||||
for q in xrange(self.D):
|
||||
for q in xrange(self.input_dim):
|
||||
target[q+1] += -2.*np.pi*self.variance*np.sum(dL_dK*self._dvar*np.tan(2.*np.pi*self._dist[:,:,q]*self.frequencies[q])*self._dist[:,:,q])
|
||||
target[q+1+self.D] += -2.*np.pi**2*self.variance*np.sum(dL_dK*self._dvar*self._dist2[:,:,q])
|
||||
target[q+1+self.input_dim] += -2.*np.pi**2*self.variance*np.sum(dL_dK*self._dvar*self._dist2[:,:,q])
|
||||
else:
|
||||
target[1] += -2.*np.pi*self.variance*np.sum(dL_dK*self._dvar*np.sum(np.tan(2.*np.pi*self._dist*self.frequencies)*self._dist,-1))
|
||||
target[2] += -2.*np.pi**2*self.variance*np.sum(dL_dK*self._dvar*self._dist2.sum(-1))
|
||||
|
|
@ -100,7 +100,7 @@ class rbfcos(kernpart):
|
|||
self._X = X.copy()
|
||||
self._X2 = X2.copy()
|
||||
|
||||
#do the distances: this will be high memory for large D
|
||||
#do the distances: this will be high memory for large input_dim
|
||||
#NB: we don't take the abs of the dist because cos is symmetric
|
||||
self._dist = X[:,None,:] - X2[None,:,:]
|
||||
self._dist2 = np.square(self._dist)
|
||||
|
|
|
|||
|
|
@ -13,16 +13,16 @@ class spline(kernpart):
|
|||
"""
|
||||
Spline kernel
|
||||
|
||||
:param D: the number of input dimensions (fixed to 1 right now TODO)
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions (fixed to 1 right now TODO)
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self,D,variance=1.,lengthscale=1.):
|
||||
self.D = D
|
||||
assert self.D==1
|
||||
def __init__(self,input_dim,variance=1.,lengthscale=1.):
|
||||
self.input_dim = input_dim
|
||||
assert self.input_dim==1
|
||||
self.Nparam = 1
|
||||
self.name = 'spline'
|
||||
self._set_params(np.squeeze(variance))
|
||||
|
|
|
|||
|
|
@ -11,16 +11,16 @@ class symmetric(kernpart):
|
|||
:param k: the kernel to symmetrify
|
||||
:type k: kernpart
|
||||
:param transform: the transform to use in symmetrification (allows symmetry on specified axes)
|
||||
:type transform: A numpy array (D x D) specifiying the transform
|
||||
:type transform: A numpy array (input_dim x input_dim) specifiying the transform
|
||||
:rtype: kernpart
|
||||
|
||||
"""
|
||||
def __init__(self,k,transform=None):
|
||||
if transform is None:
|
||||
transform = np.eye(k.D)*-1.
|
||||
assert transform.shape == (k.D, k.D)
|
||||
transform = np.eye(k.input_dim)*-1.
|
||||
assert transform.shape == (k.input_dim, k.input_dim)
|
||||
self.transform = transform
|
||||
self.D = k.D
|
||||
self.input_dim = k.input_dim
|
||||
self.Nparam = k.Nparam
|
||||
self.name = k.name + '_symm'
|
||||
self.k = k
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ class spkern(kernpart):
|
|||
- to handle multiple inputs, call them x1, z1, etc
|
||||
- to handle multpile correlated outputs, you'll need to define each covariance function and 'cross' variance function. TODO
|
||||
"""
|
||||
def __init__(self,D,k,param=None):
|
||||
def __init__(self,input_dim,k,param=None):
|
||||
self.name='sympykern'
|
||||
self._sp_k = k
|
||||
sp_vars = [e for e in k.atoms() if e.is_Symbol]
|
||||
|
|
@ -35,8 +35,8 @@ class spkern(kernpart):
|
|||
assert all([x.name=='x%i'%i for i,x in enumerate(self._sp_x)])
|
||||
assert all([z.name=='z%i'%i for i,z in enumerate(self._sp_z)])
|
||||
assert len(self._sp_x)==len(self._sp_z)
|
||||
self.D = len(self._sp_x)
|
||||
assert self.D == D
|
||||
self.input_dim = len(self._sp_x)
|
||||
assert self.input_dim == input_dim
|
||||
self._sp_theta = sorted([e for e in sp_vars if not (e.name[0]=='x' or e.name[0]=='z')],key=lambda e:e.name)
|
||||
self.Nparam = len(self._sp_theta)
|
||||
|
||||
|
|
@ -69,15 +69,15 @@ class spkern(kernpart):
|
|||
|
||||
def compute_psi_stats(self):
|
||||
#define some normal distributions
|
||||
mus = [sp.var('mu%i'%i,real=True) for i in range(self.D)]
|
||||
Ss = [sp.var('S%i'%i,positive=True) for i in range(self.D)]
|
||||
mus = [sp.var('mu%i'%i,real=True) for i in range(self.input_dim)]
|
||||
Ss = [sp.var('S%i'%i,positive=True) for i in range(self.input_dim)]
|
||||
normals = [(2*sp.pi*Si)**(-0.5)*sp.exp(-0.5*(xi-mui)**2/Si) for xi, mui, Si in zip(self._sp_x, mus, Ss)]
|
||||
|
||||
#do some integration!
|
||||
#self._sp_psi0 = ??
|
||||
self._sp_psi1 = self._sp_k
|
||||
for i in range(self.D):
|
||||
print 'perfoming integrals %i of %i'%(i+1,2*self.D)
|
||||
for i in range(self.input_dim):
|
||||
print 'perfoming integrals %i of %i'%(i+1,2*self.input_dim)
|
||||
sys.stdout.flush()
|
||||
self._sp_psi1 *= normals[i]
|
||||
self._sp_psi1 = sp.integrate(self._sp_psi1,(self._sp_x[i],-sp.oo,sp.oo))
|
||||
|
|
@ -85,10 +85,10 @@ class spkern(kernpart):
|
|||
self._sp_psi1 = self._sp_psi1.simplify()
|
||||
|
||||
#and here's psi2 (eek!)
|
||||
zprime = [sp.Symbol('zp%i'%i) for i in range(self.D)]
|
||||
zprime = [sp.Symbol('zp%i'%i) for i in range(self.input_dim)]
|
||||
self._sp_psi2 = self._sp_k.copy()*self._sp_k.copy().subs(zip(self._sp_z,zprime))
|
||||
for i in range(self.D):
|
||||
print 'perfoming integrals %i of %i'%(self.D+i+1,2*self.D)
|
||||
for i in range(self.input_dim):
|
||||
print 'perfoming integrals %i of %i'%(self.input_dim+i+1,2*self.input_dim)
|
||||
sys.stdout.flush()
|
||||
self._sp_psi2 *= normals[i]
|
||||
self._sp_psi2 = sp.integrate(self._sp_psi2,(self._sp_x[i],-sp.oo,sp.oo))
|
||||
|
|
@ -113,8 +113,8 @@ class spkern(kernpart):
|
|||
self._function_code = re.sub('DiracDelta\(.+?,.+?\)','0.0',self._function_code)
|
||||
|
||||
#Here's some code to do the looping for K
|
||||
arglist = ", ".join(["X[i*D+%s]"%x.name[1:] for x in self._sp_x]\
|
||||
+ ["Z[j*D+%s]"%z.name[1:] for z in self._sp_z]\
|
||||
arglist = ", ".join(["X[i*input_dim+%s]"%x.name[1:] for x in self._sp_x]\
|
||||
+ ["Z[j*input_dim+%s]"%z.name[1:] for z in self._sp_z]\
|
||||
+ ["param[%i]"%i for i in range(self.Nparam)])
|
||||
|
||||
self._K_code =\
|
||||
|
|
@ -123,7 +123,7 @@ class spkern(kernpart):
|
|||
int j;
|
||||
int N = target_array->dimensions[0];
|
||||
int num_inducing = target_array->dimensions[1];
|
||||
int D = X_array->dimensions[1];
|
||||
int input_dim = X_array->dimensions[1];
|
||||
//#pragma omp parallel for private(j)
|
||||
for (i=0;i<N;i++){
|
||||
for (j=0;j<num_inducing;j++){
|
||||
|
|
@ -140,7 +140,7 @@ class spkern(kernpart):
|
|||
"""
|
||||
int i;
|
||||
int N = target_array->dimensions[0];
|
||||
int D = X_array->dimensions[1];
|
||||
int input_dim = X_array->dimensions[1];
|
||||
//#pragma omp parallel for
|
||||
for (i=0;i<N;i++){
|
||||
target[i] = k(%s);
|
||||
|
|
@ -156,7 +156,7 @@ class spkern(kernpart):
|
|||
int j;
|
||||
int N = partial_array->dimensions[0];
|
||||
int num_inducing = partial_array->dimensions[1];
|
||||
int D = X_array->dimensions[1];
|
||||
int input_dim = X_array->dimensions[1];
|
||||
//#pragma omp parallel for private(j)
|
||||
for (i=0;i<N;i++){
|
||||
for (j=0;j<num_inducing;j++){
|
||||
|
|
@ -174,7 +174,7 @@ class spkern(kernpart):
|
|||
"""
|
||||
int i;
|
||||
int N = partial_array->dimensions[0];
|
||||
int D = X_array->dimensions[1];
|
||||
int input_dim = X_array->dimensions[1];
|
||||
for (i=0;i<N;i++){
|
||||
%s
|
||||
}
|
||||
|
|
@ -182,20 +182,20 @@ class spkern(kernpart):
|
|||
"""%(diag_funclist,"/*"+str(self._sp_k)+"*/") #adding a string representation forces recompile when needed
|
||||
|
||||
#Here's some code to do gradients wrt x
|
||||
gradient_funcs = "\n".join(["target[i*D+%i] += partial[i*num_inducing+j]*dk_dx%i(%s);"%(q,q,arglist) for q in range(self.D)])
|
||||
gradient_funcs = "\n".join(["target[i*input_dim+%i] += partial[i*num_inducing+j]*dk_dx%i(%s);"%(q,q,arglist) for q in range(self.input_dim)])
|
||||
self._dK_dX_code = \
|
||||
"""
|
||||
int i;
|
||||
int j;
|
||||
int N = partial_array->dimensions[0];
|
||||
int num_inducing = partial_array->dimensions[1];
|
||||
int D = X_array->dimensions[1];
|
||||
int input_dim = X_array->dimensions[1];
|
||||
//#pragma omp parallel for private(j)
|
||||
for (i=0;i<N; i++){
|
||||
for (j=0; j<num_inducing; j++){
|
||||
%s
|
||||
//if(isnan(target[i*D+2])){printf("%%f\\n",dk_dx2(X[i*D+0], X[i*D+1], X[i*D+2], Z[j*D+0], Z[j*D+1], Z[j*D+2], param[0], param[1], param[2], param[3], param[4], param[5]));}
|
||||
//if(isnan(target[i*D+2])){printf("%%f,%%f,%%i,%%i\\n", X[i*D+2], Z[j*D+2],i,j);}
|
||||
//if(isnan(target[i*input_dim+2])){printf("%%f\\n",dk_dx2(X[i*input_dim+0], X[i*input_dim+1], X[i*input_dim+2], Z[j*input_dim+0], Z[j*input_dim+1], Z[j*input_dim+2], param[0], param[1], param[2], param[3], param[4], param[5]));}
|
||||
//if(isnan(target[i*input_dim+2])){printf("%%f,%%f,%%i,%%i\\n", X[i*input_dim+2], Z[j*input_dim+2],i,j);}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -209,7 +209,7 @@ class spkern(kernpart):
|
|||
int j;
|
||||
int N = partial_array->dimensions[0];
|
||||
int num_inducing = 0;
|
||||
int D = X_array->dimensions[1];
|
||||
int input_dim = X_array->dimensions[1];
|
||||
for (i=0;i<N; i++){
|
||||
j = i;
|
||||
%s
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class EP(likelihood):
|
|||
self.epsilon = epsilon
|
||||
self.eta, self.delta = power_ep
|
||||
self.data = data
|
||||
self.N, self.input_dim = self.data.shape
|
||||
self.N, self.output_dim = self.data.shape
|
||||
self.is_heteroscedastic = True
|
||||
self.Nparams = 0
|
||||
self._transf_data = self.LikelihoodFunction._preprocess_values(data)
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class Gaussian(likelihood):
|
|||
self.is_heteroscedastic = False
|
||||
self.Nparams = 1
|
||||
self.Z = 0. # a correction factor which accounts for the approximation made
|
||||
N, self.input_dim = data.shape
|
||||
N, self.output_dim = data.shape
|
||||
|
||||
# normalization
|
||||
if normalize:
|
||||
|
|
@ -24,8 +24,8 @@ class Gaussian(likelihood):
|
|||
# Don't scale outputs which have zero variance to zero.
|
||||
self._scale[np.nonzero(self._scale == 0.)] = 1.0e-3
|
||||
else:
|
||||
self._offset = np.zeros((1, self.input_dim))
|
||||
self._scale = np.ones((1, self.input_dim))
|
||||
self._offset = np.zeros((1, self.output_dim))
|
||||
self._scale = np.ones((1, self.output_dim))
|
||||
|
||||
self.set_data(data)
|
||||
|
||||
|
|
@ -35,7 +35,7 @@ class Gaussian(likelihood):
|
|||
def set_data(self, data):
|
||||
self.data = data
|
||||
self.N, D = data.shape
|
||||
assert D == self.input_dim
|
||||
assert D == self.output_dim
|
||||
self.Y = (self.data - self._offset) / self._scale
|
||||
if D > self.N:
|
||||
self.YYT = np.dot(self.Y, self.Y.T)
|
||||
|
|
@ -54,7 +54,7 @@ class Gaussian(likelihood):
|
|||
x = np.float64(x)
|
||||
if np.all(self._variance != x):
|
||||
if x == 0.:
|
||||
self.precision = None
|
||||
self.precision = np.inf
|
||||
self.V = None
|
||||
else:
|
||||
self.precision = 1. / x
|
||||
|
|
@ -68,9 +68,9 @@ class Gaussian(likelihood):
|
|||
"""
|
||||
mean = mu * self._scale + self._offset
|
||||
if full_cov:
|
||||
if self.input_dim > 1:
|
||||
if self.output_dim > 1:
|
||||
raise NotImplementedError, "TODO"
|
||||
# Note. for input_dim>1, we need to re-normalise all the outputs independently.
|
||||
# Note. for output_dim>1, we need to re-normalise all the outputs independently.
|
||||
# This will mess up computations of diag(true_var), below.
|
||||
# note that the upper, lower quantiles should be the same shape as mean
|
||||
# Augment the output variance with the likelihood variance and rescale.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue