diff --git a/.gitignore b/.gitignore index d2d6f360..73431343 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,6 @@ nosetests.xml .mr.developer.cfg .project .pydevproject + +#vim +*.swp diff --git a/GPy/examples/uncertain_input_GP_regression_demo.py b/GPy/examples/uncertain_input_GP_regression_demo.py new file mode 100644 index 00000000..3d2c51f0 --- /dev/null +++ b/GPy/examples/uncertain_input_GP_regression_demo.py @@ -0,0 +1,31 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + + +import pylab as pb +import numpy as np +import GPy +pb.ion() +pb.close('all') + + +###################################### +## 1 dimensional example + +# sample inputs and outputs +S = np.ones((20,1)) +X = np.random.uniform(-3.,3.,(20,1)) +Y = np.sin(X)+np.random.randn(20,1)*0.05 + +k = GPy.kern.bias(1) + GPy.kern.white(1) + +# create simple GP model +m = GPy.models.uncertain_input_GP_regression(X,Y,S,kernel=k) + +# contrain all parameters to be positive +m.constrain_positive('(variance|prec)') + +# optimize and plot +m.optimize('tnc', max_f_eval = 1000, messages=1) +m.plot() +print(m) diff --git a/GPy/kern/bias.py b/GPy/kern/bias.py index 004205fb..a89f56c5 100644 --- a/GPy/kern/bias.py +++ b/GPy/kern/bias.py @@ -46,3 +46,40 @@ class bias(kernpart): def dKdiag_dX(self,partial,X,target): pass + + def psi0(self, Z, mu, S, target): + target += self.variance + + def psi1(self, Z, mu, S, target): + target += self.variance + + def psi2(self, Z, mu, S, target): + target += self.variance**2 + + def dpsi0_dtheta(self, partial, Z, mu, S, target): + target += partial.sum() + + def dpsi0_dZ(self, partial, Z, mu, S, target): + pass + + def dpsi0_dmuS(self, partial, Z, mu, S, target_mu, target_S): + pass + + def dpsi1_dtheta(self, partial, Z, mu, S, target): + target += partial.sum() + + def dpsi1_dZ(self, partial, Z, mu, S, target): + pass + + def dpsi1_dmuS(self, partial, Z, mu, S, target_mu, target_S): + pass + + def dpsi2_dtheta(self, partial, Z, mu, S, target): + target += 2.*self.variance*partial.sum() + + def dpsi2_dZ(self, partial, Z, mu, S, target): + pass + + def dpsi2_dmuS(self, partial, Z, mu, S, target_mu, target_S): + pass + diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index cc63dffc..158913ff 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -201,70 +201,80 @@ class kern(parameterised): [p.dKdiag_dX(partial[s],X[s,i_s],target[s,i_s]) for p,i_s,s in zip(self.parts,self.input_slices,slices)] return target - def psi0(self,Z,mu,S,slices_mu=None,slices_Z=None): + def psi0(self,Z,mu,S,slices=None): + slices = self._process_slices(slices,False) target = np.zeros(mu.shape[0]) - [p.psi0(Z,mu,S,target) for p in self.parts] + [p.psi0(Z,mu[s],S[s],target[s]) for p,s in zip(self.parts,slices)] return target - def dpsi0_dtheta(self,Z,mu,S): - target = np.zeros((mu.shape[0],self.Nparam)) - [p.dpsi0_dtheta(Z,mu,S,target[s]) for p,s in zip(self.parts, self.param_slices)] + def dpsi0_dtheta(self,partial,Z,mu,S,slices=None): + slices = self._process_slices(slices,False) + target = np.zeros(self.Nparam) + [p.dpsi0_dtheta(partial[s],Z,mu[s],S[s],target[ps]) for p,ps,s in zip(self.parts, self.param_slices,slices)] return target - def dpsi0_dmuS(self,Z,mu,S): + def dpsi0_dmuS(self,partial,Z,mu,S,slices=None): + slices = self._process_slices(slices,False) target_mu,target_S = np.zeros_like(mu),np.zeros_like(S) - [p.dpsi0_dmuS(Z,mu,S,target_mu,target_S) for p in self.parts] + [p.dpsi0_dmuS(partial,Z,mu[s],S[s],target_mu[s],target_S[s]) for p,s in zip(self.parts,slices)] return target_mu,target_S - def psi1(self,Z,mu,S): + def psi1(self,Z,mu,S,slices1=None,slices2=None): """Think N,M,Q """ + slices1, slices2 = self._process_slices(slices1,slices2) target = np.zeros((mu.shape[0],Z.shape[0])) - [p.psi1(Z,mu,S,target=target) for p in self.parts] + [p.psi1(Z[s2],mu[s1],S[s1],target[s1,s2]) for p,s1,s2 in zip(self.parts,slices1,slices2)] return target - def dpsi1_dtheta(self,Z,mu,S): + def dpsi1_dtheta(self,partial,Z,mu,S,slices1=None,slices2=None): """N,M,(Ntheta)""" - target = np.zeros((mu.shape[0],Z.shape[0],self.Nparam)) - [p.dpsi1_dtheta(Z,mu,S,target[:,:,s]) for p,s in zip(self.parts, self.param_slices)] + slices1, slices2 = self._process_slices(slices1,slices2) + target = np.zeros((self.Nparam)) + [p.dpsi1_dtheta(partial[s2,s1],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[ps]) for p,ps,s1,s2,i_s in zip(self.parts, self.param_slices,slices1,slices2,self.input_slices)] return target - def dpsi1_dZ(self,Z,mu,S): + def dpsi1_dZ(self,partial,Z,mu,S,slices1=None,slices2=None): """N,M,Q""" - target = np.zeros((mu.shape[0],Z.shape[0],Z.shape[1])) - [p.dpsi1_dZ(Z,mu,S,target) for p in self.parts] + slices1, slices2 = self._process_slices(slices1,slices2) + target = np.zeros_like(Z) + [p.dpsi1_dZ(partial[s2,s1],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[s2,i_s]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)] return target - def dpsi1_dmuS(self,Z,mu,S): + def dpsi1_dmuS(self,partial,Z,mu,S,slices1=None,slices2=None): """return shapes are N,M,Q""" - target_mu, target_S = np.zeros((2,mu.shape[0],Z.shape[0],Z.shape[1])) - [p.dpsi1_dmuS(Z,mu,S,target_mu=target_mu,target_S = target_S) for p in self.parts] + slices1, slices2 = self._process_slices(slices1,slices2) + target_mu, target_S = np.zeros((2,mu.shape[0],mu.shape[1])) + [p.dpsi1_dmuS(partial[s2,s1],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target_mu[s1,i_s],target_S[s1,i_s]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)] return target_mu, target_S - def psi2(self,Z,mu,S): + def psi2(self,Z,mu,S,slices1=None,slices2=None): """ :Z: np.ndarray of inducing inputs (M x Q) : mu, S: np.ndarrays of means and variacnes (each N x Q) :returns psi2: np.ndarray (N,M,M,Q) """ - target = np.zeros((mu.shape[0],Z.shape[0],Z.shape[0])) - [p.psi2(Z,mu,S,target=target) for p in self.parts] + target = np.zeros((Z.shape[0],Z.shape[0])) + slices1, slices2 = self._process_slices(slices1,slices2) + [p.psi2(Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[s2,s2]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)] return target - def dpsi2_dtheta(self,Z,mu,S): + def dpsi2_dtheta(self,partial,Z,mu,S,slices1=None,slices2=None): """Returns shape (N,M,M,Ntheta)""" - target = np.zeros((Z.shape[0],Z.shape[0],self.Nparam)) - [p.dpsi2_dtheta(Z,mu,S,target[:,:,s]) for p,s in zip(self.parts, self.param_slices)] + slices1, slices2 = self._process_slices(slices1,slices2) + target = np.zeros(self.Nparam) + [p.dpsi2_dtheta(partial[s2,s2],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[ps]) for p,i_s,s1,s2,ps in zip(self.parts,self.input_slices,slices1,slices2,self.param_slices)] return target - def dpsi2_dZ(self,Z,mu,S): - """N,M,M,Q""" - target = np.zeros((mu.shape[0],Z.shape[0],Z.shape[0],Z.shape[1])) - [p.dpsi2_dZ(Z,mu,S,target) for p in self.parts] + def dpsi2_dZ(self,partial,Z,mu,S,slices1=None,slices2=None): + slices1, slices2 = self._process_slices(slices1,slices2) + target = np.zeros_like(Z) + [p.dpsi2_dZ(partial[s2,s2],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[s2,i_s]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)] return target - def dpsi2_dmuS(self,Z,mu,S): + def dpsi2_dmuS(self,Z,mu,S,slices1=None,slices2=None): """return shapes are N,M,M,Q""" - target_mu, target_S = np.zeros((2,mu.shape[0],Z.shape[0],Z.shape[0],Z.shape[1])) - [p.dpsi2_dmuS(Z,mu,S,target_mu=target_mu,target_S = target_S) for p in self.parts] + slices1, slices2 = self._process_slices(slices1,slices2) + target_mu, target_S = np.zeros((2,mu.shape[0],mu.shape[1])) + [p.dpsi2_dmuS(partial[s2,s2],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target_mu[s1,i_s],target_S[s1,i_s]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)] #TODO: there are some extra terms to compute here! return target_mu, target_S diff --git a/GPy/kern/rbf.py b/GPy/kern/rbf.py index a9abf09d..c300ae7c 100644 --- a/GPy/kern/rbf.py +++ b/GPy/kern/rbf.py @@ -17,7 +17,7 @@ class rbf(kernpart): :param lengthscale: the lengthscale of the kernel :type lengthscale: float - .. Note: for rbf with different lengthscales on each dimension, see rbf_ARD + .. Note: for rbf with different lengthscale on each dimension, see rbf_ARD """ def __init__(self,D,variance=1.,lengthscale=1.): @@ -81,80 +81,88 @@ class rbf(kernpart): self._K_exponent = -0.5*self._K_dist2 self._K_dvar = np.exp(-0.5*self._K_dist2) -if __name__=='__main__': - #run some simple tests on the kernel (TODO:move these to unititest) - #TODO: these are broken in this new structure! - N = 10 - M = 5 - Q = 3 + def psi0(self,Z,mu,S,target): + target += self.variance - Z = np.random.randn(M,Q) - mu = np.random.randn(N,Q) - S = np.random.rand(N,Q) + def dpsi0_dtheta(self,partial,Z,mu,S,target): + target[0] += 1. - var = 2.5 - lengthscales = np.ones(Q)*0.7 + def dpsi0_dmuS(self,Z,mu,S,target_mu,target_S): + pass - k = rbf(Q,var,lengthscales) + def psi1(self,Z,mu,S,target): + self._psi_computations(Z,mu,S) + target += self._psi1 - from checkgrad import checkgrad + def dpsi1_dtheta(self,partial,Z,mu,S,target): + self._psi_computations(Z,mu,S) + denom_deriv = S[:,None,:]/(self.lengthscale**3+self.lengthscale*S[:,None,:]) + d_length = self._psi1[:,:,None]*(self.lengthscale*np.square(self._psi1_dist/(self.lengthscale2+S[:,None,:])) + denom_deriv) + target[0] += np.sum(partial*self._psi1/self.variance) + target[1] += np.sum(d_length*partial[:,:,None]) - def k_theta_test(param,k): - k.set_param(param) - K = k.K(Z) - dK_dtheta = k.dK_dtheta(Z) - f = np.sum(K) - df = dK_dtheta.sum(0).sum(0) - return f,np.array(df) - print "dk_dtheta_test" - checkgrad(k_theta_test,np.random.randn(1+Q),args=(k,)) + def dpsi1_dZ(self,partial,Z,mu,S,target): + self._psi_computations(Z,mu,S) + target += np.sum(partial[:,:,None]*-self._psi1[:,:,None]*self._psi1_dist/self.lengthscale2/self._psi1_denom,0) + def dpsi1_dmuS(self,partial,Z,mu,S,target_mu,target_S): + self._psi_computations(Z,mu,S) + tmp = self._psi1[:,:,None]/self.lengthscale2/self._psi1_denom + target_mu += np.sum(partial*tmp*self._psi1_dist,1) + target_S += np.sum(partial*0.5*tmp*(self._psi1_dist_sq-1),1) - def psi1_mu_test(mu,k): - mu = mu.reshape(N,Q) - f = np.sum(k.psi1(Z,mu,S)) - df = k.dpsi1_dmuS(Z,mu,S)[0].sum(1) - return f,df.flatten() - print "psi1_mu_test" - checkgrad(psi1_mu_test,np.random.randn(N*Q),args=(k,)) + def psi2(self,Z,mu,S,target): + self._psi_computations(Z,mu,S) + target += self._psi2.sum(0) #TODO: psi2 should be NxMxM (for het. noise) - def psi1_S_test(S,k): - S = S.reshape(N,Q) - f = np.sum(k.psi1(Z,mu,S)) - df = k.dpsi1_dmuS(Z,mu,S)[1].sum(1) - return f,df.flatten() - print "psi1_S_test" - checkgrad(psi1_S_test,np.random.rand(N*Q),args=(k,)) + def dpsi2_dtheta(self,partial,Z,mu,S,target): + """Shape N,M,M,Ntheta""" + self._psi_computations(Z,mu,S) + d_var = np.sum(2.*self._psi2/self.variance,0) + d_length = self._psi2[:,:,:,None]*(0.5*self._psi2_Zdist_sq*self._psi2_denom + 2.*self._psi2_mudist_sq + 2.*S[:,None,None,:]/self.lengthscale2)/(self.lengthscale*self._psi2_denom) + d_length = d_length.sum(0) + target[0] += np.sum(partial*d_var) + target[1] += np.sum(d_length*partial) - def psi1_theta_test(theta,k): - k.set_param(theta) - f = np.sum(k.psi1(Z,mu,S)) - df = np.array([np.sum(grad) for grad in k.dpsi1_dtheta(Z,mu,S)]) - return f,df - print "psi1_theta_test" - checkgrad(psi1_theta_test,np.random.rand(1+Q),args=(k,)) + def dpsi2_dZ(self,partial,Z,mu,S,target): + """Returns shape N,M,M,Q""" + self._psi_computations(Z,mu,S) + dZ = self._psi2[:,:,:,None]/self.lengthscale2*(-0.5*self._psi2_Zdist + self._psi2_mudist/self._psi2_denom) + target += np.sum(partial[None,:,:,None]*dZ,0).sum(1) + def dpsi2_dmuS(self,Z,mu,S,target_mu,target_S): + """Think N,M,M,Q """ + self._psi_computations(Z,mu,S) + tmp = self._psi2[:,:,:,None]/self.lengthscale2/self._psi2_denom + target_mu += (partial*-tmp*2.*self._psi2_mudist).sum(1).sum(1) + target_S += (partial*tmp*(2.*self._psi2_mudist_sq-1)).sum(1).sum(1) - def psi2_mu_test(mu,k): - mu = mu.reshape(N,Q) - f = np.sum(k.psi2(Z,mu,S)) - df = k.dpsi2_dmuS(Z,mu,S)[0].sum(1).sum(1) - return f,df.flatten() - print "psi2_mu_test" - checkgrad(psi2_mu_test,np.random.randn(N*Q),args=(k,)) + def _psi_computations(self,Z,mu,S): + #here are the "statistics" for psi1 and psi2 + if not np.all(Z==self._Z): + #Z has changed, compute Z specific stuff + self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # M,M,Q + self._psi2_Zdist = Z[:,None,:]-Z[None,:,:] # M,M,Q + self._psi2_Zdist_sq = np.square(self._psi2_Zdist)/self.lengthscale2 # M,M,Q + self._Z = Z - def psi2_S_test(S,k): - S = S.reshape(N,Q) - f = np.sum(k.psi2(Z,mu,S)) - df = k.dpsi2_dmuS(Z,mu,S)[1].sum(1).sum(1) - return f,df.flatten() - print "psi2_S_test" - checkgrad(psi2_S_test,np.random.rand(N*Q),args=(k,)) + if not (np.all(Z==self._Z) and np.all(mu==self._mu) and np.all(S==self._S)): + #something's changed. recompute EVERYTHING + + #TODO: make more efficient for large Q (using NDL's dot product trick) + #psi1 + self._psi1_denom = S[:,None,:]/self.lengthscale2 + 1. + self._psi1_dist = Z[None,:,:]-mu[:,None,:] + self._psi1_dist_sq = np.square(self._psi1_dist)/self.lengthscale2/self._psi1_denom + self._psi1_exponent = -0.5*np.sum(self._psi1_dist_sq+np.log(self._psi1_denom),-1) + self._psi1 = self.variance*np.exp(self._psi1_exponent) + + #psi2 + self._psi2_denom = 2.*S[:,None,None,:]/self.lengthscale2+1. # N,M,M,Q + self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,M,M,Q + self._psi2_mudist_sq = np.square(self._psi2_mudist)/(self.lengthscale2*self._psi2_denom) + self._psi2_exponent = np.sum(-self._psi2_Zdist_sq/4. -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,M,M + self._psi2 = np.square(self.variance)*np.exp(self._psi2_exponent) # N,M,M + + self._Z, self._mu, self._S = Z, mu,S - def psi2_theta_test(theta,k): - k.set_param(theta) - f = np.sum(k.psi2(Z,mu,S)) - df = np.array([np.sum(grad) for grad in k.dpsi2_dtheta(Z,mu,S)]) - return f,df - print "psi2_theta_test" - checkgrad(psi2_theta_test,np.random.rand(1+Q),args=(k,)) diff --git a/GPy/kern/white.py b/GPy/kern/white.py index df62d4b6..587a2b4a 100644 --- a/GPy/kern/white.py +++ b/GPy/kern/white.py @@ -54,33 +54,33 @@ class white(kernpart): def psi0(self,Z,mu,S,target): target += self.variance - def dpsi0_dtheta(self,Z,mu,S,target): - target += 1. + def dpsi0_dtheta(self,partial,Z,mu,S,target): + target += partial.sum() - def dpsi0_dmuS(self,Z,mu,S,target_mu,target_S): + def dpsi0_dmuS(self,partial,Z,mu,S,target_mu,target_S): pass def psi1(self,Z,mu,S,target): pass - def dpsi1_dtheta(self,Z,mu,S,target): + def dpsi1_dtheta(self,partial,Z,mu,S,target): pass - def dpsi1_dZ(self,Z,mu,S,target): + def dpsi1_dZ(self,partial,Z,mu,S,target): pass - def dpsi1_dmuS(self,Z,mu,S,target_mu,target_S): + def dpsi1_dmuS(self,partial,Z,mu,S,target_mu,target_S): pass def psi2(self,Z,mu,S,target): pass - def dpsi2_dZ(self,Z,mu,S,target): + def dpsi2_dZ(self,partial,Z,mu,S,target): pass - def dpsi2_dtheta(self,Z,mu,S,target): + def dpsi2_dtheta(self,partial,Z,mu,S,target): pass - def dpsi2_dmuS(self,Z,mu,S,target_mu,target_S): + def dpsi2_dmuS(self,partial,Z,mu,S,target_mu,target_S): pass diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index dd721559..d176e7b6 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -9,3 +9,4 @@ from warped_GP import warpedGP from GP_EP import GP_EP from generalized_FITC import generalized_FITC from sparse_GPLVM import sparse_GPLVM +from uncertain_input_GP_regression import uncertain_input_GP_regression diff --git a/GPy/models/sparse_GP_regression.py b/GPy/models/sparse_GP_regression.py index 39a38214..a221ad31 100644 --- a/GPy/models/sparse_GP_regression.py +++ b/GPy/models/sparse_GP_regression.py @@ -52,7 +52,6 @@ class sparse_GP_regression(GP_regression): self._compute_kernel_matrices() self._computations() - def _compute_kernel_matrices(self): # kernel computations, using BGPLVM notation #TODO: the following can be switched out in the case of uncertain inputs (or the BGPLVM!) @@ -63,16 +62,6 @@ class sparse_GP_regression(GP_regression): self.psi1 = self.kern.K(self.Z,self.X) self.psi2 = np.dot(self.psi1,self.psi1.T) - #self.dKmm_dtheta = self.kern.dK_dtheta(self.Z) - #self.dpsi0_dtheta = self.kern.dKdiag_dtheta(self.X).sum(0) - #self.dpsi1_dtheta = self.kern.dK_dtheta(self.Z,self.X) - #tmp = np.dot(self.psi1, self.dpsi1_dtheta) - #self.dpsi2_dtheta = tmp + tmp.transpose(1,0,2) - - #self.dpsi1_dZ = self.kern.dK_dX(self.Z,self.X) - #self.dpsi2_dZ = np.tensordot(self.psi1,self.dpsi1_dZ,((1),(0)))*2.0 - #self.dKmm_dZ = self.kern.dK_dX(self.Z) - def _computations(self): # TODO find routine to multiply triangular matrices self.psi1Y = np.dot(self.psi1, self.Y) @@ -101,13 +90,6 @@ class sparse_GP_regression(GP_regression): # Computes dL_dKmm TODO: nicer precomputations - # tmp = self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi) - # self.dL_dKmm = -self.beta * self.D * 0.5 * mdot(self.Lmi.T, self.A, self.Lmi) # dB - # self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - tmp - tmp.T + self.Kmmi) # dC - # tmp = (mdot(self.LBL_inv, self.psi1YYpsi1, self.Kmmi) - # - self.beta*mdot(self.G, self.psi2, self.Kmmi)) - # self.dL_dKmm += -0.5*self.beta2*(tmp + tmp.T - self.G) - tmp = self.beta*mdot(self.LBL_inv, self.psi2, self.Kmmi) self.dL_dKmm = -self.beta * self.D * 0.5 * mdot(self.Lmi.T, self.A, self.Lmi) # dB self.dL_dKmm += -0.5 * self.D * (- self.LBL_inv - tmp - tmp.T + self.Kmmi) # dC diff --git a/GPy/models/uncertain_input_GP_regression.py b/GPy/models/uncertain_input_GP_regression.py new file mode 100644 index 00000000..66724b07 --- /dev/null +++ b/GPy/models/uncertain_input_GP_regression.py @@ -0,0 +1,70 @@ +# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import numpy as np +import pylab as pb +from ..util.linalg import mdot, jitchol, chol_inv, pdinv +from ..util.plot import gpplot +from .. import kern +from ..inference.likelihoods import likelihood +from sparse_GP_regression import sparse_GP_regression + +class uncertain_input_GP_regression(sparse_GP_regression): + """ + Variational sparse GP model (Regression) with uncertainty on the inputs + + :param X: inputs + :type X: np.ndarray (N x Q) + :param X_uncertainty: uncertainty on X (Gaussian variances, assumed isotrpic) + :type X_uncertainty: np.ndarray (N x Q) + :param Y: observed data + :type Y: np.ndarray of observations (N x D) + :param kernel : the kernel/covariance function. See link kernels + :type kernel: a GPy kernel + :param Z: inducing inputs (optional, see note) + :type Z: np.ndarray (M x Q) | None + :param Zslices: slices for the inducing inputs (see slicing TODO: link) + :param M : Number of inducing points (optional, default 10. Ignored if Z is not None) + :type M: int + :param beta: noise precision. TODO> ignore beta if doing EP + :type beta: float + :param normalize_(X|Y) : whether to normalize the data before computing (predictions will be in original scales) + :type normalize_(X|Y): bool + """ + + def __init__(self,X,Y,X_uncertainty,kernel=None, beta=100., Z=None,Zslices=None,M=10,normalize_X=False,normalize_Y=False): + self.X_uncertainty = X_uncertainty + sparse_GP_regression.__init__(self, X, Y, kernel = kernel, beta = beta, normalize_X = normalize_X, normalize_Y = normalize_Y) + self.trYYT = np.sum(np.square(self.Y)) + + def _compute_kernel_matrices(self): + # kernel computations, using BGPLVM notation + #TODO: slices for psi statistics (easy enough) + self.Kmm = self.kern.K(self.Z) + self.psi0 = self.kern.psi0(self.Z,self.X, self.X_uncertainty).sum() + self.psi1 = self.kern.psi1(self.Z,self.X, self.X_uncertainty).T + self.psi2 = self.kern.psi2(self.Z,self.X, self.X_uncertainty) + + def dL_dtheta(self): + #re-cast computations in psi2 back to psi1: + dL_dtheta = self.kern.dK_dtheta(self.dL_dKmm,self.Z) + dL_dtheta += self.kern.dpsi0_dtheta(self.dL_dpsi0, self.Z,self.X,self.X_uncertainty) + dL_dtheta += self.kern.dpsi1_dtheta(self.dL_dpsi1.T,self.Z,self.X, self.X_uncertainty) + dL_dtheta += self.kern.dpsi2_dtheta(self.dL_dpsi2,self.Z,self.X, self.X_uncertainty) # for multiple_beta, dL_dpsi2 will be a different shape + return dL_dtheta + + def dL_dZ(self): + dL_dZ = 2.*self.kern.dK_dX(self.dL_dKmm,self.Z,)#factor of two becase of vertical and horizontal 'stripes' in dKmm_dZ + dL_dZ += self.kern.dpsi1_dZ(self.dL_dpsi1.T,self.Z,self.X, self.X_uncertainty) + dL_dZ += self.kern.dpsi2_dZ(self.dL_dpsi2,self.Z,self.X, self.X_uncertainty) + return dL_dZ + + def plot(self,*args,**kwargs): + """ + Plot the fitted model: just call the sparse GP_regression plot function and then add + markers to represent uncertainty on the inputs + """ + sparse_GP_regression.plot(self,*args,**kwargs) + if self.Q==1: + pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_uncertainty.flatten())) +