From ef15de9411123b936a8fe556e3257970c12a56d0 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Fri, 26 Apr 2013 17:26:43 +0100 Subject: [PATCH 01/11] added a tdot function (thanks Iain) --- GPy/models/sparse_GP.py | 5 +-- GPy/util/linalg.py | 99 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 99 insertions(+), 5 deletions(-) diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py index e158e026..dc77e795 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/models/sparse_GP.py @@ -108,9 +108,6 @@ class sparse_GP(GP): self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B) self.psi1V = np.dot(self.psi1, self.V) - #tmp = np.dot(self.Lmi.T, self.LBi.T) - #tmp = linalg.lapack.clapack.dtrtrs(self.Lm.T,np.asarray(self.LBi.T,order='C'),lower=0)[0] - #self.C = np.dot(tmp,tmp.T) #TODO: tmp is triangular. replace with dtrmm (blas) when available tmp = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.Bi),lower=1,trans=1)[0] self.C = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0] self.Cpsi1V = np.dot(self.C,self.psi1V) @@ -171,7 +168,7 @@ class sparse_GP(GP): #likelihood is not heterscedatic self.partial_for_likelihood = - 0.5 * self.N*self.D*self.likelihood.precision + 0.5 * np.sum(np.square(self.likelihood.Y))*self.likelihood.precision**2 self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum()*self.likelihood.precision**2 - np.trace(self.A)*self.likelihood.precision*sf2) - self.partial_for_likelihood += 0.5 * self.D * trace_dot(self.Bi,self.A)*self.likelihood.precision + self.partial_for_likelihood += 0.5 * self.D * trace_dot(self.Bi,self.A)*self.likelihood.precision # TODO: unstable? self.partial_for_likelihood += self.likelihood.precision*(0.5*trace_dot(self.psi2_beta_scaled,self.E*sf2) - np.trace(self.Cpsi1VVpsi1)) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 79025d4f..34e30dca 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -1,9 +1,12 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) +#tdot function courtesy of Ian Murray: +# Iain Murray, April 2013. iain contactable via iainmurray.net +# http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot.py import numpy as np -from scipy import linalg, optimize +from scipy import linalg, optimize, weave import pylab as pb import Tango import sys @@ -11,9 +14,17 @@ import re import pdb import cPickle import types +import ctypes +from ctypes import byref, c_char, c_int, c_double # TODO #import scipy.lib.lapack.flapack import scipy as sp +try: + _blaslib = ctypes.cdll.LoadLibrary(np.core._dotblas.__file__) + _blas_available = True +except: + _blas_available = False + def trace_dot(a,b): """ efficiently compute the trace of the matrix product of a and b @@ -175,3 +186,89 @@ def PCA(Y, Q): X /= v; W *= v; return X, W.T + + +def tdot_numpy(mat,out=None): + return np.dot(mat,mat.T,out) + +def tdot_blas(mat, out=None): + """returns np.dot(mat, mat.T), but faster for large 2D arrays of doubles.""" + if (mat.dtype != 'float64') or (len(mat.shape) != 2): + return np.dot(mat, mat.T) + nn = mat.shape[0] + if not out: + out = np.zeros((nn,nn)) + else: + assert(out.dtype == 'float64') + assert(out.shape == (nn,nn)) + # FIXME: should allow non-contiguous out, and copy output into it: + assert(8 in out.strides) + # zeroing needed because of dumb way I copy across triangular answer + out[:] = 0.0 + + ## Call to DSYRK from BLAS + # If already in Fortran order (rare), and has the right sorts of strides I + # could avoid the copy. I also thought swapping to cblas API would allow use + # of C order. However, I tried that and had errors with large matrices: + # http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot_broken.py + mat = mat.copy(order='F') + TRANS = c_char('n') + N = c_int(mat.shape[0]) + K = c_int(mat.shape[1]) + LDA = c_int(mat.shape[0]) + UPLO = c_char('l') + ALPHA = c_double(1.0) + A = mat.ctypes.data_as(ctypes.c_void_p) + BETA = c_double(0.0) + C = out.ctypes.data_as(ctypes.c_void_p) + LDC = c_int(np.max(out.strides) / 8) + _blaslib.dsyrk_(byref(UPLO), byref(TRANS), byref(N), byref(K), + byref(ALPHA), A, byref(LDA), byref(BETA), C, byref(LDC)) + + symmetrify(out.T) + + return out + +def tdot(*args, **kwargs): + if _blas_available: + return tdot_blas(*args,**kwargs) + else: + return tdot_numpy(*args,**kwargs) + +def symmetrify(A): + """ + Take the square matrix A and make it symmetrical by copting elements from the lower half to the upper + + works IN PLACE. + """ + N,M = A.shape + assert N==M + c_contig_code = """ + for (int i=1; i Date: Fri, 26 Apr 2013 19:32:33 +0100 Subject: [PATCH 02/11] James and Nicolos massive Yak shaving session --- GPy/kern/kern.py | 16 +++++++----- GPy/kern/linear.py | 47 +++++++++++++++++++++--------------- GPy/kern/rbf.py | 31 ++++++++++++------------ GPy/kern/white.py | 10 +++----- GPy/likelihoods/Gaussian.py | 2 +- GPy/models/Bayesian_GPLVM.py | 2 +- GPy/models/sparse_GP.py | 20 +++++++-------- GPy/testing/unit_tests.py | 10 ++++++++ GPy/util/linalg.py | 16 +++++++----- 9 files changed, 90 insertions(+), 64 deletions(-) diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index 2ef07fa5..a6551e11 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -289,9 +289,11 @@ class kern(parameterised): assert X.shape[1] == self.D slices1, slices2 = self._process_slices(slices1, slices2) if X2 is None: - X2 = X - target = np.zeros((X.shape[0], X2.shape[0])) - [p.K(X[s1, i_s], X2[s2, i_s], target=target[s1, s2]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + target = np.zeros((X.shape[0], X.shape[0])) + [p.K(X[s1, i_s], None, target=target[s1, s2]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + else: + target = np.zeros((X.shape[0], X2.shape[0])) + [p.K(X[s1, i_s], X2[s2, i_s], target=target[s1, s2]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] return target def dK_dtheta(self, dL_dK, X, X2=None, slices1=None, slices2=None): @@ -308,10 +310,12 @@ class kern(parameterised): """ assert X.shape[1] == self.D slices1, slices2 = self._process_slices(slices1, slices2) - if X2 is None: - X2 = X target = np.zeros(self.Nparam) - [p.dK_dtheta(dL_dK[s1, s2], X[s1, i_s], X2[s2, i_s], target[ps]) for p, i_s, ps, s1, s2 in zip(self.parts, self.input_slices, self.param_slices, slices1, slices2)] + if X2 is None: + [p.dK_dtheta(dL_dK[s1, s2], X[s1, i_s], None, target[ps]) for p, i_s, ps, s1, s2 in zip(self.parts, self.input_slices,self.param_slices, slices1, slices2)] + else: + [p.dK_dtheta(dL_dK[s1, s2], X[s1, i_s], X2[s2, i_s], target[ps]) for p, i_s, ps, s1, s2 in zip(self.parts, self.input_slices,self.param_slices, slices1, slices2)] + return self._transform_gradients(target) diff --git a/GPy/kern/linear.py b/GPy/kern/linear.py index 78a8732a..78dbdf01 100644 --- a/GPy/kern/linear.py +++ b/GPy/kern/linear.py @@ -4,6 +4,7 @@ from kernpart import kernpart import numpy as np +from ..util.linalg import tdot class linear(kernpart): """ @@ -65,8 +66,11 @@ class linear(kernpart): def K(self,X,X2,target): if self.ARD: XX = X*np.sqrt(self.variances) - XX2 = X2*np.sqrt(self.variances) - target += np.dot(XX, XX2.T) + if X2 is None: + target += tdot(XX) + else: + XX2 = X2*np.sqrt(self.variances) + target += np.dot(XX, XX2.T) else: self._K_computations(X, X2) target += self.variances * self._dot_product @@ -76,8 +80,11 @@ class linear(kernpart): def dK_dtheta(self,dL_dK,X,X2,target): if self.ARD: - product = X[:,None,:]*X2[None,:,:] - target += (dL_dK[:,:,None]*product).sum(0).sum(0) + if X2 is None: + [np.add(target[i:i+1],np.sum(dL_dK*tdot(X[:,i:i+1])),target[i:i+1]) for i in range(self.D)] + else: + product = X[:,None,:]*X2[None,:,:] + target += (dL_dK[:,:,None]*product).sum(0).sum(0) else: self._K_computations(X, X2) target += np.sum(self._dot_product*dL_dK) @@ -133,9 +140,9 @@ class linear(kernpart): returns N,M,M matrix """ self._psi_computations(Z,mu,S) - psi2 = self.ZZ*np.square(self.variances)*self.mu2_S[:, None, None, :] - target += psi2.sum(-1) - #TODO: this could be faster using np.tensordot + #psi2 = self.ZZ*np.square(self.variances)*self.mu2_S[:, None, None, :] + #target += psi2.sum(-1) + target += np.tensordot(self.ZZ[None,:,:,:]*np.square(self.variances),self.mu2_S[:, None, None, :],((3),(3))).squeeze().T def dpsi2_dtheta(self,dL_dpsi2,Z,mu,S,target): self._psi_computations(Z,mu,S) @@ -156,28 +163,30 @@ class linear(kernpart): self._psi_computations(Z,mu,S) mu2_S = np.sum(self.mu2_S,0)# Q, target += (dL_dpsi2[:,:,:,None] * (self.mu2_S[:,None,None,:]*(Z*np.square(self.variances)[None,:])[None,None,:,:])).sum(0).sum(1) + #TODO: tensordot would gain some time here #---------------------------------------# # Precomputations # #---------------------------------------# def _K_computations(self,X,X2): - if X2 is None: - X2 = X - if not (np.all(X==self._Xcache) and np.all(X2==self._X2cache)): - self._Xcache = X - self._X2cache = X2 - self._dot_product = np.dot(X,X2.T) - else: - # print "Cache hit!" - pass # TODO: insert debug message here (logging framework) + if not (np.array_equal(X, self._Xcache) and np.array_equal(X2, self._X2cache)): + self._Xcache = X.copy() + if X2 is None: + self._dot_product = tdot(X) + self._X2cache = None + else: + self._X2cache = X2.copy() + self._dot_product = np.dot(X,X2.T) def _psi_computations(self,Z,mu,S): #here are the "statistics" for psi1 and psi2 if not np.all(Z==self._Z): #Z has changed, compute Z specific stuff - self.ZZ = Z[:,None,:]*Z[None,:,:] # M,M,Q - self._Z = Z + #self.ZZ = Z[:,None,:]*Z[None,:,:] # M,M,Q + self.ZZ = np.empty((Z.shape[0],Z.shape[0],Z.shape[1]),order='F') + [tdot(Z[:,i:i+1],self.ZZ[:,:,i].T) for i in xrange(Z.shape[1])] + self._Z = Z.copy() if not (np.all(mu==self._mu) and np.all(S==self._S)): self.mu2_S = np.square(mu)+S - self._mu, self._S = mu, S + self._mu, self._S = mu.copy(), S.copy() diff --git a/GPy/kern/rbf.py b/GPy/kern/rbf.py index 9ff7a93e..027e5e9e 100644 --- a/GPy/kern/rbf.py +++ b/GPy/kern/rbf.py @@ -6,6 +6,7 @@ from kernpart import kernpart import numpy as np import hashlib from scipy import weave +from ..util.linalg import tdot class rbf(kernpart): """ @@ -74,11 +75,8 @@ class rbf(kernpart): return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscale.size)] def K(self,X,X2,target): - if X2 is None: - X2 = X - self._K_computations(X,X2) - np.add(self.variance*self._K_dvar, target,target) + target += self.variance*self._K_dvar def Kdiag(self,X,target): np.add(target,self.variance,target) @@ -87,6 +85,7 @@ class rbf(kernpart): self._K_computations(X,X2) target[0] += np.sum(self._K_dvar*dL_dK) if self.ARD: + if X2 is None: X2 = X [np.add(target[1+q:2+q],(self.variance/self.lengthscale[q]**3)*np.sum(self._K_dvar*dL_dK*np.square(X[:,q][:,None]-X2[:,q][None,:])),target[1+q:2+q]) for q in range(self.D)] else: target[1] += (self.variance/self.lengthscale)*np.sum(self._K_dvar*self._K_dist2*dL_dK) @@ -182,29 +181,31 @@ class rbf(kernpart): #---------------------------------------# def _K_computations(self,X,X2): - if not (np.all(X==self._X) and np.all(X2==self._X2) and np.all(self._params == self._get_params())): + if not (np.array_equal(X,self._X) and np.array_equal(X2,self._X2) and np.array_equal(self._params , self._get_params())): self._X = X.copy() - self._X2 = X2.copy() self._params == self._get_params().copy() - if X2 is None: X2 = X - #never do this: self._K_dist = X[:,None,:]-X2[None,:,:] # this can be computationally heavy - #_K_dist = X[:,None,:]-X2[None,:,:] - #_K_dist2 = np.square(_K_dist/self.lengthscale) - X = X/self.lengthscale - X2 = X2/self.lengthscale - self._K_dist2 = (-2.*np.dot(X, X2.T) + np.sum(np.square(X),1)[:,None] + np.sum(np.square(X2),1)[None,:]) + if X2 is None: + self._X2 = None + X = X/self.lengthscale + Xsquare = np.sum(np.square(X),1) + self._K_dist2 = (-2.*tdot(X) + Xsquare[:,None] + Xsquare[None,:]) + else: + self._X2 = X2.copy() + X = X/self.lengthscale + X2 = X2/self.lengthscale + self._K_dist2 = (-2.*np.dot(X, X2.T) + np.sum(np.square(X),1)[:,None] + np.sum(np.square(X2),1)[None,:]) self._K_dvar = np.exp(-0.5*self._K_dist2) def _psi_computations(self,Z,mu,S): #here are the "statistics" for psi1 and psi2 - if not np.all(Z==self._Z): + if not np.array_equal(Z, self._Z): #Z has changed, compute Z specific stuff self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # M,M,Q self._psi2_Zdist = 0.5*(Z[:,None,:]-Z[None,:,:]) # M,M,Q self._psi2_Zdist_sq = np.square(self._psi2_Zdist/self.lengthscale) # M,M,Q self._Z = Z - if not (np.all(Z==self._Z) and np.all(mu==self._mu) and np.all(S==self._S)): + if not (np.array_equal(Z, self._Z) and np.array_equal(mu, self._mu) and np.array_equal(S, self._S)): #something's changed. recompute EVERYTHING #psi1 diff --git a/GPy/kern/white.py b/GPy/kern/white.py index f5d6894a..be6aad45 100644 --- a/GPy/kern/white.py +++ b/GPy/kern/white.py @@ -30,17 +30,15 @@ class white(kernpart): return ['variance'] def K(self,X,X2,target): - if X.shape==X2.shape: - if np.all(X==X2): - np.add(target,np.eye(X.shape[0])*self.variance,target) + if X2 is None: + target += np.eye(X.shape[0])*self.variance def Kdiag(self,X,target): target += self.variance def dK_dtheta(self,dL_dK,X,X2,target): - if X.shape==X2.shape: - if np.all(X==X2): - target += np.trace(dL_dK) + if X2 is None: + target += np.trace(dL_dK) def dKdiag_dtheta(self,dL_dKdiag,X,target): target += np.sum(dL_dKdiag) diff --git a/GPy/likelihoods/Gaussian.py b/GPy/likelihoods/Gaussian.py index 25d12491..d3696fa6 100644 --- a/GPy/likelihoods/Gaussian.py +++ b/GPy/likelihoods/Gaussian.py @@ -30,7 +30,7 @@ class Gaussian(likelihood): self.trYYT = np.trace(self.YYT) else: self.YYT = None - self.trYYT = None + self.trYYT = np.sum(np.square(self.Y)) def _get_params(self): return np.asarray(self._variance) diff --git a/GPy/models/Bayesian_GPLVM.py b/GPy/models/Bayesian_GPLVM.py index 0d4cf91e..6333fb1c 100644 --- a/GPy/models/Bayesian_GPLVM.py +++ b/GPy/models/Bayesian_GPLVM.py @@ -33,7 +33,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM): X = self.initialise_latent(init, Q, Y) if X_variance is None: - X_variance = np.clip((np.ones_like(X) * 0.5) + .01 * np.random.randn(*X.shape), 0, 1) + X_variance = np.clip((np.ones_like(X) * 0.5) + .01 * np.random.randn(*X.shape), 0.001, 1) if Z is None: Z = np.random.permutation(X.copy())[:M] diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py index dc77e795..697a9978 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/models/sparse_GP.py @@ -3,7 +3,7 @@ import numpy as np import pylab as pb -from ..util.linalg import mdot, jitchol, chol_inv, pdinv, trace_dot +from ..util.linalg import mdot, jitchol, chol_inv, pdinv, trace_dot, tdot from ..util.plot import gpplot from .. import kern from GP import GP @@ -50,9 +50,6 @@ class sparse_GP(GP): self.has_uncertain_inputs=True self.X_variance = X_variance - if not self.likelihood.is_heteroscedastic: - self.likelihood.trYYT = np.trace(np.dot(self.likelihood.Y, self.likelihood.Y.T)) # TODO: something more elegant here? - GP.__init__(self, X, likelihood, kernel=kernel, normalize_X=normalize_X, Xslices=Xslices) #normalize X uncertainty also @@ -86,13 +83,15 @@ class sparse_GP(GP): self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision.flatten().reshape(self.N,1,1)/sf2)).sum(0) else: tmp = self.psi1*(np.sqrt(self.likelihood.precision.flatten().reshape(1,self.N))/sf) - self.psi2_beta_scaled = np.dot(tmp,tmp.T) + #self.psi2_beta_scaled = np.dot(tmp,tmp.T) + self.psi2_beta_scaled = tdot(tmp) else: if self.has_uncertain_inputs: self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision/sf2)).sum(0) else: tmp = self.psi1*(np.sqrt(self.likelihood.precision)/sf) - self.psi2_beta_scaled = np.dot(tmp,tmp.T) + #self.psi2_beta_scaled = np.dot(tmp,tmp.T) + self.psi2_beta_scaled = tdot(tmp) self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm) @@ -110,10 +109,11 @@ class sparse_GP(GP): self.psi1V = np.dot(self.psi1, self.V) tmp = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.Bi),lower=1,trans=1)[0] self.C = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0] + #TODO: can we multiply in C by forwardsubstitution? self.Cpsi1V = np.dot(self.C,self.psi1V) self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T) - #self.E = np.dot(self.Cpsi1VVpsi1,self.C)/sf2 - self.E = np.dot(self.Cpsi1V/sf,self.Cpsi1V.T/sf) + #self.E = np.dot(self.Cpsi1V/sf,self.Cpsi1V.T/sf) + self.E = tdot(self.Cpsi1V/sf) # Compute dL_dpsi # FIXME: this is untested for the heterscedastic + uncertin inputs case self.dL_dpsi0 = - 0.5 * self.D * (self.likelihood.precision * np.ones([self.N,1])).flatten() @@ -166,9 +166,9 @@ class sparse_GP(GP): #self.partial_for_likelihood += -np.diag(np.dot((self.C - 0.5 * mdot(self.C,self.psi2_beta_scaled,self.C) ) , self.psi1VVpsi1 ))*self.likelihood.precision #dD else: #likelihood is not heterscedatic - self.partial_for_likelihood = - 0.5 * self.N*self.D*self.likelihood.precision + 0.5 * np.sum(np.square(self.likelihood.Y))*self.likelihood.precision**2 + self.partial_for_likelihood = - 0.5 * self.N*self.D*self.likelihood.precision + 0.5 * self.likelihood.trYYT*self.likelihood.precision**2 self.partial_for_likelihood += 0.5 * self.D * (self.psi0.sum()*self.likelihood.precision**2 - np.trace(self.A)*self.likelihood.precision*sf2) - self.partial_for_likelihood += 0.5 * self.D * trace_dot(self.Bi,self.A)*self.likelihood.precision # TODO: unstable? + self.partial_for_likelihood += 0.5 * self.D * trace_dot(self.Bi,self.A)*self.likelihood.precision self.partial_for_likelihood += self.likelihood.precision*(0.5*trace_dot(self.psi2_beta_scaled,self.E*sf2) - np.trace(self.Cpsi1VVpsi1)) diff --git a/GPy/testing/unit_tests.py b/GPy/testing/unit_tests.py index 55a1fb65..ee8368ac 100644 --- a/GPy/testing/unit_tests.py +++ b/GPy/testing/unit_tests.py @@ -112,6 +112,16 @@ class GradientTests(unittest.TestCase): bias = GPy.kern.bias(2) self.check_model_with_white(bias, model_type='GP_regression', dimension=2) + def test_GP_regression_linear_kern_1D_ARD(self): + ''' Testing the GP regression with linear kernel on 1d data ''' + linear = GPy.kern.linear(1,ARD=True) + self.check_model_with_white(linear, model_type='GP_regression', dimension=1) + + def test_GP_regression_linear_kern_2D_ARD(self): + ''' Testing the GP regression with linear kernel on 2d data ''' + linear = GPy.kern.linear(2,ARD=True) + self.check_model_with_white(linear, model_type='GP_regression', dimension=2) + def test_GP_regression_linear_kern_1D(self): ''' Testing the GP regression with linear kernel on 1d data ''' linear = GPy.kern.linear(1) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 34e30dca..b19aa2b6 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -196,7 +196,7 @@ def tdot_blas(mat, out=None): if (mat.dtype != 'float64') or (len(mat.shape) != 2): return np.dot(mat, mat.T) nn = mat.shape[0] - if not out: + if out is None: out = np.zeros((nn,nn)) else: assert(out.dtype == 'float64') @@ -211,7 +211,7 @@ def tdot_blas(mat, out=None): # could avoid the copy. I also thought swapping to cblas API would allow use # of C order. However, I tried that and had errors with large matrices: # http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot_broken.py - mat = mat.copy(order='F') + mat = np.asfortranarray(mat) TRANS = c_char('n') N = c_int(mat.shape[0]) K = c_int(mat.shape[1]) @@ -225,7 +225,7 @@ def tdot_blas(mat, out=None): _blaslib.dsyrk_(byref(UPLO), byref(TRANS), byref(N), byref(K), byref(ALPHA), A, byref(LDA), byref(BETA), C, byref(LDC)) - symmetrify(out.T) + symmetrify(out,upper=True) return out @@ -235,7 +235,7 @@ def tdot(*args, **kwargs): else: return tdot_numpy(*args,**kwargs) -def symmetrify(A): +def symmetrify(A,upper=False): """ Take the square matrix A and make it symmetrical by copting elements from the lower half to the upper @@ -257,9 +257,13 @@ def symmetrify(A): } } """ - if A.flags['C_CONTIGUOUS']: + if A.flags['C_CONTIGUOUS'] and upper: + weave.inline(f_contig_code,['A','N']) + elif A.flags['C_CONTIGUOUS'] and not upper: weave.inline(c_contig_code,['A','N']) - elif A.flags['F_CONTIGUOUS']: + elif A.flags['F_CONTIGUOUS'] and upper: + weave.inline(c_contig_code,['A','N']) + elif A.flags['F_CONTIGUOUS'] and not upper: weave.inline(f_contig_code,['A','N']) else: tmp = np.tril(A) From 8306bb652ccd26d818f102a6aa35a84e01cea9c3 Mon Sep 17 00:00:00 2001 From: Neil Lawrence Date: Fri, 26 Apr 2013 21:35:15 +0100 Subject: [PATCH 03/11] Added first draft of acclaim mocap functionality. --- GPy/util/mocap.py | 606 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 606 insertions(+) diff --git a/GPy/util/mocap.py b/GPy/util/mocap.py index e66a36b9..0cc2f20b 100644 --- a/GPy/util/mocap.py +++ b/GPy/util/mocap.py @@ -1,6 +1,611 @@ import os import numpy as np +import math +class vertex: + def __init__(self, name, id, parents=[], children=[], meta = {}): + self.name = name + self.id = id + self.parents = parents + self.children = children + self.meta = meta + + def __str__(self): + return self.name + '(' + str(self.id) + ').' + +class tree: + def __init__(self): + self.vertices = [] + self.vertices.append(vertex(name='root', id=0)) + + def __str__(self): + index = self.find_root() + return self.branch_str(index) + + def branch_str(self, index, indent=''): + out = indent + str(self.vertices[index]) + '\n' + for child in self.vertices[index].children: + out+=self.branch_str(child, indent+' ') + return out + + def find_children(self): + """Take a tree and set the children according to the parents. + + Takes a tree structure which lists the parents of each vertex + and computes the children for each vertex and places them in.""" + for i in range(len(self.vertices)): + self.vertices[i].children = [] + for i in range(len(self.vertices)): + for parent in self.vertices[i].parents: + if i not in self.vertices[parent].children: + self.vertices[parent].children.append(i) + + def find_parents(self): + """Take a tree and set the parents according to the children + + Takes a tree structure which lists the children of each vertex + and computes the parents for each vertex and places them in.""" + for i in range(len(self.vertices)): + self.vertices[i].parents = [] + for i in range(len(self.vertices)): + for child in self.vertices[i].children: + if i not in self.vertices[child].parents: + self.vertices[child].parents.append(i) + + def find_root(self): + """Finds the index of the root node of the tree.""" + self.find_parents() + index = 0 + while len(self.vertices[index].parents)>0: + index = self.vertices[index].parents[0] + return index + + def get_index_by_id(self, id): + """Give the index associated with a given vertex id.""" + for i in range(len(self.vertices)): + if self.vertices[i].id == id: + return i + raise Error, 'Reverse look up of id failed.' + + def get_index_by_name(self, name): + """Give the index associated with a given vertex name.""" + for i in range(len(self.vertices)): + if self.vertices[i].name == name: + return i + raise Error, 'Reverse look up of name failed.' + + def order_vertices(self): + """Order vertices in the graph such that parents always have a lower index than children.""" + + ordered = False + while ordered == False: + for i in range(len(self.vertices)): + ordered = True + for parent in self.vertices[i].parents: + if parent>i: + ordered = False + self.swap_vertices(i, parent) + + + + + def swap_vertices(self, i, j): + """Swap two vertices in the tree structure array. + swap_vertex swaps the location of two vertices in a tree structure array. + ARG tree : the tree for which two vertices are to be swapped. + ARG i : the index of the first vertex to be swapped. + ARG j : the index of the second vertex to be swapped. + RETURN tree : the tree structure with the two vertex locations + swapped. + """ + store_vertex_i = self.vertices[i] + store_vertex_j = self.vertices[j] + self.vertices[j] = store_vertex_i + self.vertices[i] = store_vertex_j + for k in range(len(self.vertices)): + for swap_list in [self.vertices[k].children, self.vertices[k].parents]: + if i in swap_list: + swap_list[swap_list.index(i)] = -1 + if j in swap_list: + swap_list[swap_list.index(j)] = i + if -1 in swap_list: + swap_list[swap_list.index(-1)] = j + + + +def rotation_matrix(xangle, yangle, zangle, order='zxy', degrees=False): + + """Compute the rotation matrix for an angle in each direction. + This is a helper function for computing the rotation matrix for a given set of angles in a given order. + ARG xangle : rotation for x-axis. + ARG yangle : rotation for y-axis. + ARG zangle : rotation for z-axis. + ARG order : the order for the rotations.""" + if degrees: + xangle = math.radians(xangle) + yangle = math.radians(yangle) + zangle = math.radians(zangle) + + # Here we assume we rotate z, then x then y. + c1 = math.cos(xangle) # The x angle + c2 = math.cos(yangle) # The y angle + c3 = math.cos(zangle) # the z angle + s1 = math.sin(xangle) + s2 = math.sin(yangle) + s3 = math.sin(zangle) + + # see http://en.wikipedia.org/wiki/Rotation_matrix for + # additional info. + + if order=='zxy': + rot_mat = np.array([[c2*c3-s1*s2*s3, c2*s3+s1*s2*c3, -s2*c1],[-c1*s3, c1*c3, s1],[s2*c3+c2*s1*s3, s2*s3-c2*s1*c3, c2*c1]]) + else: + rot_mat = np.eye(3) + for i in range(len(order)): + if order[i]=='x': + rot_mat = np.dot(np.array([[1, 0, 0], [0, c1, s1], [0, -s1, c1]]),rot_mat) + elif order[i] == 'y': + rot_mat = np.dot(np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]),rot_mat) + elif order[i] == 'z': + rot_mat = np.dot(np.array([[c3, s3, 0], [-s3, c3, 0], [0, 0, 1]]),rot_mat) + + return rot_mat + + +# Motion capture data routines. +class skeleton(tree): + def __init__(self): + tree.__init__(self) + + def to_xyz(self, channels): + raise NotImplementedError, "this needs to be implemented to use the skeleton class" + + + def finalize(self): + """After loading in a skeleton ensure parents are correct, vertex orders are correct and rotation matrices are correct.""" + + self.find_parents() + self.order_vertices() + self.set_rotation_matrices() + + def smooth_angle_channels(self, channels): + """Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions.""" + for vertex in self.vertices: + for col in vertex.meta['rot_ind']: + if col: + for k in range(1, channels.shape[0]): + diff=channels[k, col]-channels[k-1, col] + if abs(diff+360.)0: + start_val = end_val + end_val = end_val + len(vertex.meta['channels']) + for j in range(num_frames): + channels[j, start_val:end_val] = bones[i][j] + self.resolve_indices(i, start_val) + + self.smooth_angle_channels(channels) + return channels + + + def read_documentation(self, fid): + """Read documentation from an acclaim skeleton file stream.""" + + lin = self.read_line(fid) + while lin[0] != ':': + self.documentation.append(lin) + lin = self.read_line(fid) + return lin + + def read_hierarchy(self, fid): + """Read hierarchy information from acclaim skeleton file stream.""" + + lin = self.read_line(fid) + + while lin != 'end': + parts = lin.split() + if lin != 'begin': + ind = self.get_index_by_name(parts[0]) + for i in range(1, len(parts)): + self.vertices[ind].children.append(self.get_index_by_name(parts[i])) + lin = self.read_line(fid) + lin = self.read_line(fid) + return lin + + def read_line(self, fid): + """Read a line from a file string and check it isn't either empty or commented before returning.""" + lin = '#' + while lin[0] == '#': + lin = fid.readline().strip() + if lin == '': + return lin + return lin + + + def read_root(self, fid): + """Read the root node from an acclaim skeleton file stream.""" + lin = self.read_line(fid) + while lin[0] != ':': + parts = lin.split() + if parts[0]=='order': + order = [] + for i in range(1, len(parts)): + if parts[i].lower()=='rx': + chan = 'Xrotation' + order.append('x') + elif parts[i].lower()=='ry': + chan = 'Yrotation' + order.append('y') + elif parts[i].lower()=='rz': + chan = 'Zrotation' + order.append('z') + elif parts[i].lower()=='tx': + chan = 'Xposition' + elif parts[i].lower()=='ty': + chan = 'Yposition' + elif parts[i].lower()=='tz': + chan = 'Zposition' + elif parts[i].lower()=='l': + chan = 'length' + self.vertices[0].meta['channels'].append(chan) + # order is reversed compared to bvh + self.vertices[0].meta['order'] = order[::-1] + + elif parts[0]=='axis': + # order is reversed compared to bvh + self.vertices[0].meta['axis_order'] = parts[1][::-1].lower() + elif parts[0]=='position': + self.vertices[0].meta['offset'] = [float(parts[1]), + float(parts[2]), + float(parts[3])] + elif parts[0]=='orientation': + self.vertices[0].meta['orientation'] = [float(parts[1]), + float(parts[2]), + float(parts[3])] + lin = self.read_line(fid) + return lin + + def read_skel(self, fid): + """Loads an acclaim skeleton format from a file stream.""" + lin = self.read_line(fid) + while lin: + if lin[0]==':': + if lin[1:]== 'name': + lin = self.read_line(fid) + self.name = lin + elif lin[1:]=='units': + lin = self.read_units(fid) + elif lin[1:]=='documentation': + lin = self.read_documentation(fid) + elif lin[1:]=='root': + lin = self.read_root(fid) + elif lin[1:]=='bonedata': + lin = self.read_bonedata(fid) + elif lin[1:]=='hierarchy': + lin = self.read_hierarchy(fid) + elif lin[1:8]=='version': + lin = self.read_line(fid) + continue + else: + if not lin: + self.finalize() + return + lin = self.read_line(fid) + else: + raise Error, 'Unrecognised file format' + + def read_units(self, fid): + """Read units from an acclaim skeleton file stream.""" + lin = self.read_line(fid) + while lin[0] != ':': + parts = lin.split() + if parts[0]=='mass': + self.mass = float(parts[1]) + elif parts[0]=='length': + self.length = float(parts[1]) + elif parts[0]=='angle': + self.angle = parts[1] + lin = self.read_line(fid) + return lin + + def resolve_indices(self, index, start_val): + """Get indices for the skeleton from the channels when loading in channel data.""" + + channels = self.vertices[index].meta['channels'] + base_channel = start_val - 1 + rot_ind = np.zeros(3) + pos_ind = np.zeros(3) + for i in range(len(channels)): + if channels[i]== 'Xrotation': + rot_ind[0] = base_channel + i + elif channels[i]=='Yrotation': + rot_ind[1] = base_channel + i + elif channels[i]=='Zrotation': + rot_ind[2] = base_channel + i + elif channels[i]=='Xposition': + pos_ind[0] = base_channel + i + elif channels[i]=='Yposition': + pos_ind[1] = base_channel + i + elif channels[i]=='Zposition': + pos_ind[2] = base_channel + i + self.vertices[index].meta['rot_ind'] = list(rot_ind) + self.vertices[index].meta['pos_ind'] = list(pos_ind) + + def set_rotation_matrices(self): + """Set the meta information at each vertex to contain the correct matrices C and Cinv as prescribed by the rotations and rotation orders.""" + for i in range(len(self.vertices)): + self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0], + self.vertices[i].meta['axis'][1], + self.vertices[i].meta['axis'][2], + self.vertices[i].meta['axis_order'], + degrees=True) + # Todo: invert this by applying angle operations in reverse order + self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C']) + + +# Utilities for loading in x,y,z data. def load_text_data(dataset, directory, centre=True): """Load in a data set of marker points from the Ohio State University C3D motion capture files (http://accad.osu.edu/research/mocap/mocap_data.htm).""" @@ -72,3 +677,4 @@ def read_connections(file_name, point_names): +skel = acclaim_skeleton() From 8b00c5a8279c5d10f7caefafc587a8ec243e01d4 Mon Sep 17 00:00:00 2001 From: Neil Lawrence Date: Fri, 26 Apr 2013 23:37:48 +0100 Subject: [PATCH 04/11] Fixed two bugs in to_xyz, checked on a test version of MATLAB code. --- GPy/util/mocap.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/GPy/util/mocap.py b/GPy/util/mocap.py index 0cc2f20b..2eec687d 100644 --- a/GPy/util/mocap.py +++ b/GPy/util/mocap.py @@ -215,10 +215,10 @@ class acclaim_skeleton(skeleton): self.load_skel(file_name) def to_xyz(self, channels): - rot_val = self.vertices[0].meta['orientation'] + rot_val = list(self.vertices[0].meta['orientation']) for i in range(len(self.vertices[0].meta['rot_ind'])): rind = self.vertices[0].meta['rot_ind'][i] - if rind != 0: + if rind != -1: rot_val[i] += channels[rind] self.vertices[0].meta['rot'] = rotation_matrix(rot_val[0], @@ -227,11 +227,11 @@ class acclaim_skeleton(skeleton): self.vertices[0].meta['axis_order'], degrees=True) # vertex based store of the xyz location - self.vertices[0].meta['xyz'] = self.vertices[0].meta['offset'] + self.vertices[0].meta['xyz'] = list(self.vertices[0].meta['offset']) for i in range(len(self.vertices[0].meta['pos_ind'])): pind = self.vertices[0].meta['pos_ind'][i] - if pind != 0: + if pind != -1: self.vertices[0].meta['xyz'][i] += channels[pind] @@ -253,7 +253,7 @@ class acclaim_skeleton(skeleton): rot_val = np.zeros(3) for j in range(len(self.vertices[ind].meta['rot_ind'])): rind = self.vertices[ind].meta['rot_ind'][j] - if rind != 0: + if rind != -1: rot_val[j] = channels[rind] else: rot_val[j] = 0 @@ -275,7 +275,8 @@ class acclaim_skeleton(skeleton): self.vertices[ind].meta['rot'] = np.dot(np.dot(np.dot(torient_inv,tdof),torient),self.vertices[parent].meta['rot']) - self.vertices[ind].meta['xyz'] += np.dot(self.vertices[ind].meta['offset'],self.vertices[ind].meta['rot']) + + self.vertices[ind].meta['xyz'] = self.vertices[parent].meta['xyz'] + np.dot(self.vertices[ind].meta['offset'],self.vertices[ind].meta['rot']) for i in range(len(children)): cind = children[i] @@ -524,6 +525,7 @@ class acclaim_skeleton(skeleton): self.vertices[0].meta['orientation'] = [float(parts[1]), float(parts[2]), float(parts[3])] + print self.vertices[0].meta['orientation'] lin = self.read_line(fid) return lin @@ -574,9 +576,9 @@ class acclaim_skeleton(skeleton): """Get indices for the skeleton from the channels when loading in channel data.""" channels = self.vertices[index].meta['channels'] - base_channel = start_val - 1 - rot_ind = np.zeros(3) - pos_ind = np.zeros(3) + base_channel = start_val + rot_ind = -np.ones(3, dtype=int) + pos_ind = -np.ones(3, dtype=int) for i in range(len(channels)): if channels[i]== 'Xrotation': rot_ind[0] = base_channel + i From d7ac1d025b6c384e12e44e3a8d43c8801be3d971 Mon Sep 17 00:00:00 2001 From: Neil Lawrence Date: Sat, 27 Apr 2013 00:52:10 +0100 Subject: [PATCH 05/11] Added CMU 35 motion capture data. --- GPy/util/datasets.py | 47 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 0e0929c7..d326f31b 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -217,7 +217,6 @@ def crescent_data(num_data=200, seed=default_seed): Y = np.vstack((np.ones((num_data_part[0] + num_data_part[1], 1)), -np.ones((num_data_part[2] + num_data_part[3], 1)))) return {'X':X, 'Y':Y, 'info': "Two separate classes of data formed approximately in the shape of two crescents."} - def creep_data(): all_data = np.loadtxt(os.path.join(data_path, 'creep', 'taka')) y = all_data[:, 1:2].copy() @@ -226,3 +225,49 @@ def creep_data(): X = all_data[:, features].copy() return {'X': X, 'y' : y} +def cmu_35_walk_jog(): + + skel = GPy.util.mocap.acclaim_skeleton(os.path.join(data_path, 'mocap', 'cmu', '35', '35.asf')) + examples = ['01', '02', '03', '04', '05', '06', + '07', '08', '09', '10', '11', '12', + '13', '14', '15', '16', '17', '19', + '20', '21', '22', '23', '24', '25', + '26', '28', '30', '31', '32', '33', '34'] + test_examples = ['18', '29'] + # Label differently for each sequence + exlbls = np.eye(31) + testexlbls = np.eye(2) + tot_length = 0 + tot_test_length = 0 + tY = [] + tlbls = [] + for i in range(len(examples)): + tmpchan = skel.load_channels(os.path.join(data_path, 'mocap', 'cmu', '35', '35_' + examples[i] + '.amc')) + tY.append(tmpchan[::4, :]) + tlbls.append(np.tile(exlbls[i, :], (tY[i].shape[0], 1))) + tot_length += tY[i].shape[0] + Y = np.zeros((tot_length, tY[0].shape[1])) + lbls = np.zeros((tot_length, tlbls[0].shape[1])) + endInd = 0 + for i in range(len(tY)): + startInd = endInd + endInd += tY[i].shape[0] + Y[startInd:endInd, :] = tY[i] + lbls[startInd:endInd, :] = tlbls[i] + tYtest = [] + tlblstest = [] + for i in range(len(test_examples)): + tmpchan = skel.load_channels(os.path.join(data_path, 'mocap', 'cmu', '35', '35_' + test_examples[i] + '.amc')) + tYtest.append(tmpchan[::4, :]) + tlblstest.append(np.tile(testexlbls[i, :], (tYtest[i].shape[0], 1))) + tot_test_length += tYtest[i].shape[0] + + Ytest = np.zeros((tot_test_length, tYtest[0].shape[1])) + lblstest = np.zeros((tot_test_length, tlblstest[0].shape[1])) + endInd = 0 + for i in range(len(tYtest)): + startInd = endInd + endInd += tYtest[i].shape[0] + Ytest[startInd:endInd, :] = tYtest[i] + lblstest[startInd:endInd, :] = tlblstest[i] + return {'Y': Y, 'lbls' : lbls, 'Ytest': Ytest, 'lblstest' : lblstest, 'info': "Walk and jog data from CMU data base subject 35."} From ac842d51e6e68cf8eac3bb7c4fb8268d1ec3f301 Mon Sep 17 00:00:00 2001 From: Neil Lawrence Date: Sat, 27 Apr 2013 10:39:55 +0100 Subject: [PATCH 06/11] cmu_mocap() example mostly working except some fiddling with axes for visualization. Also changes to naming of scaling and offset parameters in GP.py and deal with the case where the scale parameter is zero. --- GPy/likelihoods/Gaussian.py | 33 ++++++--- GPy/models/GP.py | 2 - GPy/models/GPLVM.py | 4 +- GPy/util/datasets.py | 123 +++++++++++++++++++++++----------- GPy/util/mocap.py | 8 +++ GPy/util/visualize.py | 130 ++++++++++++++++++++++++------------ 6 files changed, 202 insertions(+), 98 deletions(-) diff --git a/GPy/likelihoods/Gaussian.py b/GPy/likelihoods/Gaussian.py index d3696fa6..23ab216e 100644 --- a/GPy/likelihoods/Gaussian.py +++ b/GPy/likelihoods/Gaussian.py @@ -2,19 +2,30 @@ import numpy as np from likelihood import likelihood class Gaussian(likelihood): + """ + Likelihood class for doing Expectation propagation + + :param Y: observed output (Nx1 numpy.darray) + ..Note:: Y values allowed depend on the likelihood_function used + :param variance : + :param normalize: whether to normalize the data before computing (predictions will be in original scales) + :type normalize: False|True + """ def __init__(self,data,variance=1.,normalize=False): self.is_heteroscedastic = False self.Nparams = 1 self.Z = 0. # a correction factor which accounts for the approximation made N, self.D = data.shape - #normaliztion + #normalization if normalize: - self._mean = data.mean(0)[None,:] - self._std = data.std(0)[None,:] + self._bias = data.mean(0)[None,:] + self._scale = data.std(0)[None,:] + # Don't scale outputs which have zero variance to zero. + self._scale[np.nonzero(self._scale==0.)] = 1.0e-3 else: - self._mean = np.zeros((1,self.D)) - self._std = np.ones((1,self.D)) + self._bias = np.zeros((1,self.D)) + self._scale = np.ones((1,self.D)) self.set_data(data) @@ -24,7 +35,7 @@ class Gaussian(likelihood): self.data = data self.N,D = data.shape assert D == self.D - self.Y = (self.data - self._mean)/self._std + self.Y = (self.data - self._bias)/self._scale if D > self.N: self.YYT = np.dot(self.Y,self.Y.T) self.trYYT = np.trace(self.YYT) @@ -47,19 +58,19 @@ class Gaussian(likelihood): """ Un-normalize the prediction and add the likelihood variance, then return the 5%, 95% interval """ - mean = mu*self._std + self._mean + mean = mu*self._scale + self._bias if full_cov: if self.D >1: raise NotImplementedError, "TODO" #Note. for D>1, we need to re-normalise all the outputs independently. # This will mess up computations of diag(true_var), below. #note that the upper, lower quantiles should be the same shape as mean - true_var = (var + np.eye(var.shape[0])*self._variance)*self._std**2 - _5pc = mean + - 2.*np.sqrt(np.diag(true_var)) + true_var = (var + np.eye(var.shape[0])*self._variance)*self._scale**2 + _5pc = mean - 2.*np.sqrt(np.diag(true_var)) _95pc = mean + 2.*np.sqrt(np.diag(true_var)) else: - true_var = (var + self._variance)*self._std**2 - _5pc = mean + - 2.*np.sqrt(true_var) + true_var = (var + self._variance)*self._scale**2 + _5pc = mean - 2.*np.sqrt(true_var) _95pc = mean + 2.*np.sqrt(true_var) return mean, true_var, _5pc, _95pc diff --git a/GPy/models/GP.py b/GPy/models/GP.py index 74bb5915..c6e46bea 100644 --- a/GPy/models/GP.py +++ b/GPy/models/GP.py @@ -19,8 +19,6 @@ class GP(model): :parm likelihood: a GPy likelihood :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_X: False|True - :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales) - :type normalize_Y: False|True :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) :rtype: model object :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 diff --git a/GPy/models/GPLVM.py b/GPy/models/GPLVM.py index bd56ff12..c0d9429a 100644 --- a/GPy/models/GPLVM.py +++ b/GPy/models/GPLVM.py @@ -24,12 +24,12 @@ class GPLVM(GP): :type init: 'PCA'|'random' """ - def __init__(self, Y, Q, init='PCA', X = None, kernel=None, **kwargs): + def __init__(self, Y, Q, init='PCA', X = None, kernel=None, normalize_Y=False, **kwargs): if X is None: X = self.initialise_latent(init, Q, Y) if kernel is None: kernel = kern.rbf(Q) + kern.bias(Q) - likelihood = Gaussian(Y) + likelihood = Gaussian(Y, normalize=normalize_Y) GP.__init__(self, X, likelihood, kernel, **kwargs) def initialise_latent(self, init, Q, Y): diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index d326f31b..ab290dd8 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -225,49 +225,92 @@ def creep_data(): X = all_data[:, features].copy() return {'X': X, 'y' : y} -def cmu_35_walk_jog(): +def cmu_mocap_49_balance(): + """Load CMU subject 49's one legged balancing motion that was used by Alvarez, Luengo and Lawrence at AISTATS 2009.""" + train_motions = ['18', '19'] + test_motions = ['20'] + data = cmu_mocap('49', train_motions, test_motions, sample_every=4) + data['info'] = "One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of " + data['info'] + return data - skel = GPy.util.mocap.acclaim_skeleton(os.path.join(data_path, 'mocap', 'cmu', '35', '35.asf')) - examples = ['01', '02', '03', '04', '05', '06', +def cmu_mocap_35_walk_jog(): + """Load CMU subject 35's walking and jogging motions, the same data that was used by Taylor, Roweis and Hinton at NIPS 2007. but without their preprocessing. Also used by Lawrence at AISTATS 2007.""" + train_motions = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '19', '20', '21', '22', '23', '24', '25', '26', '28', '30', '31', '32', '33', '34'] - test_examples = ['18', '29'] - # Label differently for each sequence - exlbls = np.eye(31) - testexlbls = np.eye(2) - tot_length = 0 - tot_test_length = 0 - tY = [] - tlbls = [] - for i in range(len(examples)): - tmpchan = skel.load_channels(os.path.join(data_path, 'mocap', 'cmu', '35', '35_' + examples[i] + '.amc')) - tY.append(tmpchan[::4, :]) - tlbls.append(np.tile(exlbls[i, :], (tY[i].shape[0], 1))) - tot_length += tY[i].shape[0] - Y = np.zeros((tot_length, tY[0].shape[1])) - lbls = np.zeros((tot_length, tlbls[0].shape[1])) - endInd = 0 - for i in range(len(tY)): - startInd = endInd - endInd += tY[i].shape[0] - Y[startInd:endInd, :] = tY[i] - lbls[startInd:endInd, :] = tlbls[i] - tYtest = [] - tlblstest = [] - for i in range(len(test_examples)): - tmpchan = skel.load_channels(os.path.join(data_path, 'mocap', 'cmu', '35', '35_' + test_examples[i] + '.amc')) - tYtest.append(tmpchan[::4, :]) - tlblstest.append(np.tile(testexlbls[i, :], (tYtest[i].shape[0], 1))) - tot_test_length += tYtest[i].shape[0] + test_motions = ['18', '29'] + data = cmu_mocap('35', train_motions, test_motions, sample_every=4) + data['info'] = "Walk and jog data from CMU data base subject 35. As used in Tayor, Roweis and Hinton at NIPS 2007, but without their pre-processing (i.e. as used by Lawrence at AISTATS 2007). It consists of " + data['info'] + return data - Ytest = np.zeros((tot_test_length, tYtest[0].shape[1])) - lblstest = np.zeros((tot_test_length, tlblstest[0].shape[1])) - endInd = 0 - for i in range(len(tYtest)): - startInd = endInd - endInd += tYtest[i].shape[0] - Ytest[startInd:endInd, :] = tYtest[i] - lblstest[startInd:endInd, :] = tlblstest[i] - return {'Y': Y, 'lbls' : lbls, 'Ytest': Ytest, 'lblstest' : lblstest, 'info': "Walk and jog data from CMU data base subject 35."} +def cmu_mocap(subject, train_motions, test_motions=[], sample_every=4): + """Load a given subject's training and test motions from the CMU motion capture data.""" + + # Load in subject skeleton. + subject_dir = os.path.join(data_path, 'mocap', 'cmu', subject) + skel = GPy.util.mocap.acclaim_skeleton(os.path.join(subject_dir, subject + '.asf')) + + # Set up labels for each sequence + exlbls = np.eye(len(train_motions)) + + # Load sequences + tot_length = 0 + temp_Y = [] + temp_lbls = [] + for i in range(len(train_motions)): + temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + train_motions[i] + '.amc')) + temp_Y.append(temp_chan[::sample_every, :]) + temp_lbls.append(np.tile(exlbls[i, :], (temp_Y[i].shape[0], 1))) + tot_length += temp_Y[i].shape[0] + + Y = np.zeros((tot_length, temp_Y[0].shape[1])) + lbls = np.zeros((tot_length, temp_lbls[0].shape[1])) + + end_ind = 0 + for i in range(len(temp_Y)): + start_ind = end_ind + end_ind += temp_Y[i].shape[0] + Y[start_ind:end_ind, :] = temp_Y[i] + lbls[start_ind:end_ind, :] = temp_lbls[i] + if len(test_motions)>0: + temp_Ytest = [] + temp_lblstest = [] + + testexlbls = np.eye(len(test_motions)) + tot_test_length = 0 + for i in range(len(test_motions)): + temp_chan = skel.load_channels(os.path.join(subject_dir, subject + '_' + test_motions[i] + '.amc')) + temp_Ytest.append(temp_chan[::sample_every, :]) + temp_lblstest.append(np.tile(testexlbls[i, :], (temp_Ytest[i].shape[0], 1))) + tot_test_length += temp_Ytest[i].shape[0] + + # Load test data + Ytest = np.zeros((tot_test_length, temp_Ytest[0].shape[1])) + lblstest = np.zeros((tot_test_length, temp_lblstest[0].shape[1])) + + end_ind = 0 + for i in range(len(temp_Ytest)): + start_ind = end_ind + end_ind += temp_Ytest[i].shape[0] + Ytest[start_ind:end_ind, :] = temp_Ytest[i] + lblstest[start_ind:end_ind, :] = temp_lblstest[i] + else: + Ytest = None + lblstest = None + + info = 'Subject: ' + subject + '. Training motions: ' + for motion in train_motions: + info += motion + ', ' + info = info[:-2] + if len(test_motions)>0: + info += '. Test motions: ' + for motion in test_motions: + info += motion + ', ' + info = info[:-2] + '.' + else: + info += '.' + if sample_every != 1: + info += ' Data is sub-sampled to every ' + str(sample_every) + ' frames.' + return {'Y': Y, 'lbls' : lbls, 'Ytest': Ytest, 'lblstest' : lblstest, 'info': info, 'skel': skel} diff --git a/GPy/util/mocap.py b/GPy/util/mocap.py index 2eec687d..76650086 100644 --- a/GPy/util/mocap.py +++ b/GPy/util/mocap.py @@ -157,6 +157,13 @@ class skeleton(tree): def __init__(self): tree.__init__(self) + def connection_matrix(self): + connection = np.zeros((len(self.vertices), len(self.vertices)), dtype=bool) + for i in range(len(self.vertices)): + for j in range(len(self.vertices[i].children)): + connection[i, self.vertices[i].children[j]] = True + return connection + def to_xyz(self, channels): raise NotImplementedError, "this needs to be implemented to use the skeleton class" @@ -557,6 +564,7 @@ class acclaim_skeleton(skeleton): lin = self.read_line(fid) else: raise Error, 'Unrecognised file format' + self.finalize() def read_units(self, fid): """Read units from an acclaim skeleton file stream.""" diff --git a/GPy/util/visualize.py b/GPy/util/visualize.py index 482cc687..9754db63 100644 --- a/GPy/util/visualize.py +++ b/GPy/util/visualize.py @@ -184,71 +184,115 @@ class image_show(data_show): #if self.invert: # self.vals = -self.vals -class stick_show(data_show): - """Show a three dimensional point cloud as a figure. Connect elements of the figure together using the matrix connect.""" + +class mocap_data_show(data_show): + """Base class for visualizing motion capture data.""" def __init__(self, vals, axes=None, connect=None): if axes==None: fig = plt.figure() axes = fig.add_subplot(111, projection='3d') data_show.__init__(self, vals, axes) - self.vals = vals.reshape((3, vals.shape[1]/3)).T - self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()]) - self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()]) - self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()]) - self.points_handle = self.axes.scatter(self.vals[:, 0], self.vals[:, 1], self.vals[:, 2]) - self.axes.set_xlim(self.x_lim) - self.axes.set_ylim(self.y_lim) - self.axes.set_zlim(self.z_lim) - self.axes.set_aspect(1) - self.axes.autoscale(enable=False) self.connect = connect - if not self.connect==None: - x = [] - y = [] - z = [] - self.I, self.J = np.nonzero(self.connect) - for i in range(len(self.I)): - x.append(self.vals[self.I[i], 0]) - x.append(self.vals[self.J[i], 0]) - x.append(np.NaN) - y.append(self.vals[self.I[i], 1]) - y.append(self.vals[self.J[i], 1]) - y.append(np.NaN) - z.append(self.vals[self.I[i], 2]) - z.append(self.vals[self.J[i], 2]) - z.append(np.NaN) - self.line_handle = self.axes.plot(np.array(x), np.array(y), np.array(z), 'b-') + self.process_values(vals) + self.initialize_axes() + self.draw_vertices() + self.finalize_axes() + self.draw_edges() self.axes.figure.canvas.draw() - def modify(self, vals): - self.points_handle.remove() - self.line_handle[0].remove() - self.vals = vals.reshape((3, vals.shape[1]/3)).T + def draw_vertices(self): self.points_handle = self.axes.scatter(self.vals[:, 0], self.vals[:, 1], self.vals[:, 2]) - self.axes.set_xlim(self.x_lim) - self.axes.set_ylim(self.y_lim) - self.axes.set_zlim(self.z_lim) + + def draw_edges(self): self.line_handle = [] if not self.connect==None: x = [] y = [] z = [] self.I, self.J = np.nonzero(self.connect) - for i in range(len(self.I)): - x.append(self.vals[self.I[i], 0]) - x.append(self.vals[self.J[i], 0]) + for i, j in zip(self.I, self.J): + x.append(self.vals[i, 0]) + x.append(self.vals[j, 0]) x.append(np.NaN) - y.append(self.vals[self.I[i], 1]) - y.append(self.vals[self.J[i], 1]) + y.append(self.vals[i, 1]) + y.append(self.vals[j, 1]) y.append(np.NaN) - z.append(self.vals[self.I[i], 2]) - z.append(self.vals[self.J[i], 2]) + z.append(self.vals[i, 2]) + z.append(self.vals[j, 2]) z.append(np.NaN) self.line_handle = self.axes.plot(np.array(x), np.array(y), np.array(z), 'b-') - + + def modify(self, vals): + self.process_values(vals) + self.initialize_axes_modify() + self.draw_vertices() + self.finalize_axes_modify() + self.draw_edges() self.axes.figure.canvas.draw() + def process_values(self, vals): + raise NotImplementedError, "this needs to be implemented to use the data_show class" + + def initialize_axes(self): + """Set up the axes with the right limits and scaling.""" + self.x_lim = np.array([self.vals[:, 0].min(), self.vals[:, 0].max()]) + self.y_lim = np.array([self.vals[:, 1].min(), self.vals[:, 1].max()]) + self.z_lim = np.array([self.vals[:, 2].min(), self.vals[:, 2].max()]) + + def initialize_axes_modify(self): + self.points_handle.remove() + self.line_handle[0].remove() + + def finalize_axes(self): + self.axes.set_xlim(self.x_lim) + self.axes.set_ylim(self.y_lim) + self.axes.set_zlim(self.z_lim) + self.axes.set_aspect(1) + self.axes.autoscale(enable=False) + + def finalize_axes_modify(self): + self.axes.set_xlim(self.x_lim) + self.axes.set_ylim(self.y_lim) + self.axes.set_zlim(self.z_lim) +class stick_show(mocap_data_show): + """Show a three dimensional point cloud as a figure. Connect elements of the figure together using the matrix connect.""" + def __init__(self, vals, axes=None, connect=None): + mocap_data_show.__init__(self, vals, axes, connect) + + def process_values(self, vals): + self.vals = vals.reshape((3, vals.shape[1]/3)).T + +class skeleton_show(mocap_data_show): + """data_show class for visualizing motion capture data encoded as a skeleton with angles.""" + def __init__(self, vals, skel, padding=0, axes=None): + self.skel = skel + self.padding = padding + connect = skel.connection_matrix() + mocap_data_show.__init__(self, vals, axes, connect) + + def process_values(self, vals): + if self.padding>0: + channels = np.zeros((vals.shape[0], vals.shape[1]+self.padding)) + channels[:, 0:vals.shape[0]] = vals + else: + channels = vals + vals_mat = self.skel.to_xyz(channels.flatten()) + self.vals = vals_mat + # Flip the Y and Z axes + self.vals[:, 0] = vals_mat[:, 0] + self.vals[:, 1] = vals_mat[:, 2] + self.vals[:, 2] = vals_mat[:, 1] + + def wrap_around(vals, lim, connect): + quot = lim[1] - lim[0] + vals = rem(vals, quot)+lim[0] + nVals = floor(vals/quot) + for i in range(connect.shape[0]): + for j in find(connect[i, :]): + if nVals[i] != nVals[j]: + connect[i, j] = False + return vals, connect From 52ba8e4ba36fdfbcb0f0e643c7e1a366065fe250 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Sun, 28 Apr 2013 17:22:04 +0100 Subject: [PATCH 07/11] remo0ved slices from models slices are now handles by special indexing kern parts, such as coregionalisation, independent_outputs. The old slicing functionality has been removed simply to clean up the code a little. Now that input_slices still exist (and will continue to be useful) in kern.py. They do need a little work though, for the psi-statistics --- GPy/kern/kern.py | 152 ++++++++++------------------- GPy/models/GP.py | 63 +++++------- GPy/models/GP_regression.py | 10 +- GPy/models/generalized_FITC.py | 17 ++-- GPy/models/sparse_GP.py | 20 ++-- GPy/models/sparse_GP_regression.py | 12 +-- GPy/models/warped_GP.py | 4 +- 7 files changed, 103 insertions(+), 175 deletions(-) diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index a6551e11..4547fadc 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -13,15 +13,9 @@ from prod import prod class kern(parameterised): def __init__(self, D, parts=[], input_slices=None): """ - This kernel does 'compound' structures. + This is the main kernel class for GPy. It handles multiple (additive) kernel functions, and keeps track of variaous things like which parameters live where. - The compund structure enables many features of GPy, including - - Hierarchical models - - Correleated output models - - multi-view learning - - Hadamard product and outer-product kernels will require a new class. - This feature is currently WONTFIX. for small number sof inputs, you can use the sympy kernel for this. + The technical code for kernels is divided into _parts_ (see e.g. rbf.py). This obnject contains a list of parts, which are computed additively. For multiplication, special _prod_ parts are used. :param D: The dimensioality of the kernel's input space :type D: int @@ -94,34 +88,6 @@ class kern(parameterised): self.param_slices.append(slice(count, count + p.Nparam)) count += p.Nparam - def _process_slices(self, slices1=None, slices2=None): - """ - Format the slices so that they can easily be used. - Both slices can be any of three things: - - If None, the new points covary through every kernel part (default) - - If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part - - If a list of booleans, specifying which kernel parts are active - - if the second arg is False, return only slices1 - - returns actual lists of slice objects - """ - if slices1 is None: - slices1 = [slice(None)] * self.Nparts - elif all([type(s_i) is bool for s_i in slices1]): - slices1 = [slice(None) if s_i else slice(0) for s_i in slices1] - else: - assert all([type(s_i) is slice for s_i in slices1]), "invalid slice objects" - if slices2 is None: - slices2 = [slice(None)] * self.Nparts - elif slices2 is False: - return slices1 - elif all([type(s_i) is bool for s_i in slices2]): - slices2 = [slice(None) if s_i else slice(0) for s_i in slices2] - else: - assert all([type(s_i) is slice for s_i in slices2]), "invalid slice objects" - return slices1, slices2 - def __add__(self, other): assert self.D == other.D newkern = kern(self.D, self.parts + other.parts, self.input_slices + other.input_slices) @@ -142,7 +108,7 @@ class kern(parameterised): :param other: the other kernel to be added :type other: GPy.kern """ - return self +other + return self + other def add_orthogonal(self, other): """ @@ -285,18 +251,19 @@ class kern(parameterised): return sum([[name + '_' + n for n in k._get_param_names()] for name, k in zip(names, self.parts)], []) - def K(self, X, X2=None, slices1=None, slices2=None): + def K(self, X, X2=None, which_parts='all'): + if which_parts=='all': + which_parts = [True]*self.Nparts assert X.shape[1] == self.D - slices1, slices2 = self._process_slices(slices1, slices2) if X2 is None: target = np.zeros((X.shape[0], X.shape[0])) - [p.K(X[s1, i_s], None, target=target[s1, s2]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + [p.K(X[:, i_s], None, target=target) for p, i_s, part_i_used in zip(self.parts, self.input_slices, which_parts) if part_i_used] else: target = np.zeros((X.shape[0], X2.shape[0])) - [p.K(X[s1, i_s], X2[s2, i_s], target=target[s1, s2]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + [p.K(X[:, i_s], X2[:,i_s], target=target) for p, i_s, part_i_used in zip(self.parts, self.input_slices, which_parts) if part_i_used] return target - def dK_dtheta(self, dL_dK, X, X2=None, slices1=None, slices2=None): + def dK_dtheta(self, dL_dK, X, X2=None): """ :param dL_dK: An array of dL_dK derivaties, dL_dK :type dL_dK: Np.ndarray (N x M) @@ -304,109 +271,94 @@ class kern(parameterised): :type X: np.ndarray (N x D) :param X2: Observed dara inputs (optional, defaults to X) :type X2: np.ndarray (M x D) - :param slices1: a slice object for each kernel part, describing which data are affected by each kernel part - :type slices1: list of slice objects, or list of booleans - :param slices2: slices for X2 """ assert X.shape[1] == self.D - slices1, slices2 = self._process_slices(slices1, slices2) target = np.zeros(self.Nparam) if X2 is None: - [p.dK_dtheta(dL_dK[s1, s2], X[s1, i_s], None, target[ps]) for p, i_s, ps, s1, s2 in zip(self.parts, self.input_slices,self.param_slices, slices1, slices2)] + [p.dK_dtheta(dL_dK, X[:, i_s], None, target[ps]) for p, i_s, ps, in zip(self.parts, self.input_slices, self.param_slices)] else: - [p.dK_dtheta(dL_dK[s1, s2], X[s1, i_s], X2[s2, i_s], target[ps]) for p, i_s, ps, s1, s2 in zip(self.parts, self.input_slices,self.param_slices, slices1, slices2)] - + [p.dK_dtheta(dL_dK, X[:, i_s], X2[:, i_s], target[ps]) for p, i_s, ps, in zip(self.parts, self.input_slices, self.param_slices)] return self._transform_gradients(target) - def dK_dX(self, dL_dK, X, X2=None, slices1=None, slices2=None): + def dK_dX(self, dL_dK, X, X2=None): if X2 is None: X2 = X - slices1, slices2 = self._process_slices(slices1, slices2) target = np.zeros_like(X) - [p.dK_dX(dL_dK[s1, s2], X[s1, i_s], X2[s2, i_s], target[s1, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + if X2 is None: + [p.dK_dX(dL_dK, X[:, i_s], None, target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] + else: + [p.dK_dX(dL_dK, X[:, i_s], X2[:, i_s], target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] return target - def Kdiag(self, X, slices=None): + def Kdiag(self, X, which_parts='all'): + if which_parts=='all': + which_parts = [True]*self.Nparts assert X.shape[1] == self.D - slices = self._process_slices(slices, False) target = np.zeros(X.shape[0]) - [p.Kdiag(X[s, i_s], target=target[s]) for p, i_s, s in zip(self.parts, self.input_slices, slices)] + [p.Kdiag(X[:, i_s], target=target) for p, i_s in zip(self.parts, self.input_slices)] return target - def dKdiag_dtheta(self, dL_dKdiag, X, slices=None): + def dKdiag_dtheta(self, dL_dKdiag, X): assert X.shape[1] == self.D - assert len(dL_dKdiag.shape) == 1 assert dL_dKdiag.size == X.shape[0] - slices = self._process_slices(slices, False) target = np.zeros(self.Nparam) - [p.dKdiag_dtheta(dL_dKdiag[s], X[s, i_s], target[ps]) for p, i_s, s, ps in zip(self.parts, self.input_slices, slices, self.param_slices)] + [p.dKdiag_dtheta(dL_dKdiag, X[:, i_s], target[ps]) for p, i_s, ps in zip(self.parts, self.input_slices, self.param_slices)] return self._transform_gradients(target) - def dKdiag_dX(self, dL_dKdiag, X, slices=None): + def dKdiag_dX(self, dL_dKdiag, X): assert X.shape[1] == self.D - slices = self._process_slices(slices, False) target = np.zeros_like(X) - [p.dKdiag_dX(dL_dKdiag[s], X[s, i_s], target[s, i_s]) for p, i_s, s in zip(self.parts, self.input_slices, slices)] + [p.dKdiag_dX(dL_dKdiag, X[:, i_s], target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] return target - def psi0(self, Z, mu, S, slices=None): - slices = self._process_slices(slices, False) + def psi0(self, Z, mu, S): target = np.zeros(mu.shape[0]) - [p.psi0(Z, mu[s], S[s], target[s]) for p, s in zip(self.parts, slices)] + [p.psi0(Z[:,i_s], mu[:,i_s], S[:,i_s], target) for p, i_s in zip(self.parts, self.input_slices)] return target - def dpsi0_dtheta(self, dL_dpsi0, Z, mu, S, slices=None): - slices = self._process_slices(slices, False) + def dpsi0_dtheta(self, dL_dpsi0, Z, mu, S): target = np.zeros(self.Nparam) - [p.dpsi0_dtheta(dL_dpsi0[s], Z, mu[s], S[s], target[ps]) for p, ps, s in zip(self.parts, self.param_slices, slices)] + [p.dpsi0_dtheta(dL_dpsi0, Z[:,i_s], mu[:,i_s], S[:,i_s], target[ps]) for p, ps, i_s in zip(self.parts, self.param_slices, self.input_slices)] return self._transform_gradients(target) - def dpsi0_dmuS(self, dL_dpsi0, Z, mu, S, slices=None): - slices = self._process_slices(slices, False) + def dpsi0_dmuS(self, dL_dpsi0, Z, mu, S): target_mu, target_S = np.zeros_like(mu), np.zeros_like(S) - [p.dpsi0_dmuS(dL_dpsi0, Z, mu[s], S[s], target_mu[s], target_S[s]) for p, s in zip(self.parts, slices)] + [p.dpsi0_dmuS(dL_dpsi0, Z[:,i_s], mu[:,i_s], S[:,i_s], target_mu[:,i_s], target_S[:,i_s]) for p, i_s in zip(self.parts, self.input_slices)] return target_mu, target_S - def psi1(self, Z, mu, S, slices1=None, slices2=None): - """Think N,M,Q """ - slices1, slices2 = self._process_slices(slices1, slices2) + def psi1(self, Z, mu, S): target = np.zeros((mu.shape[0], Z.shape[0])) - [p.psi1(Z[s2], mu[s1], S[s1], target[s1, s2]) for p, s1, s2 in zip(self.parts, slices1, slices2)] + [p.psi1(Z[:,i_s], mu[:,i_s], S[:,i_s], target) for p, i_s in zip(self.parts, self.input_slices)] return target - def dpsi1_dtheta(self, dL_dpsi1, Z, mu, S, slices1=None, slices2=None): - """N,M,(Ntheta)""" - slices1, slices2 = self._process_slices(slices1, slices2) + def dpsi1_dtheta(self, dL_dpsi1, Z, mu, S): target = np.zeros((self.Nparam)) - [p.dpsi1_dtheta(dL_dpsi1[s2, s1], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[ps]) for p, ps, s1, s2, i_s in zip(self.parts, self.param_slices, slices1, slices2, self.input_slices)] + [p.dpsi1_dtheta(dL_dpsi1, Z[:, i_s], mu[:, i_s], S[:, i_s], target[ps]) for p, ps, i_s in zip(self.parts, self.param_slices, self.input_slices)] return self._transform_gradients(target) - def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, slices1=None, slices2=None): - """N,M,Q""" - slices1, slices2 = self._process_slices(slices1, slices2) + def dpsi1_dZ(self, dL_dpsi1, Z, mu, S): target = np.zeros_like(Z) - [p.dpsi1_dZ(dL_dpsi1[s2, s1], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[s2, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + [p.dpsi1_dZ(dL_dpsi1, Z[:, i_s], mu[:, i_s], S[:, i_s], target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] return target - def dpsi1_dmuS(self, dL_dpsi1, Z, mu, S, slices1=None, slices2=None): + def dpsi1_dmuS(self, dL_dpsi1, Z, mu, S): """return shapes are N,M,Q""" - slices1, slices2 = self._process_slices(slices1, slices2) target_mu, target_S = np.zeros((2, mu.shape[0], mu.shape[1])) - [p.dpsi1_dmuS(dL_dpsi1[s2, s1], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target_mu[s1, i_s], target_S[s1, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + [p.dpsi1_dmuS(dL_dpsi1, Z[:, i_s], mu[:, i_s], S[:, i_s], target_mu[:, i_s], target_S[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] return target_mu, target_S - def psi2(self, Z, mu, S, slices1=None, slices2=None): + def psi2(self, Z, mu, S): """ :param Z: np.ndarray of inducing inputs (M x Q) :param mu, S: np.ndarrays of means and variances (each N x Q) :returns psi2: np.ndarray (N,M,M) """ target = np.zeros((mu.shape[0], Z.shape[0], Z.shape[0])) - slices1, slices2 = self._process_slices(slices1, slices2) - [p.psi2(Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[s1, s2, s2]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + [p.psi2(Z[:, i_s], mu[:, i_s], S[:, i_s], target) for p, i_s in zip(self.parts, self.input_slices)] # compute the "cross" terms + #TODO: input_slices needed for p1, p2 in itertools.combinations(self.parts, 2): # white doesn;t combine with anything if p1.name == 'white' or p2.name == 'white': @@ -434,14 +386,12 @@ class kern(parameterised): raise NotImplementedError, "psi2 cannot be computed for this kernel" return target - def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S, slices1=None, slices2=None): - """Returns shape (N,M,M,Ntheta)""" - slices1, slices2 = self._process_slices(slices1, slices2) + def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S): target = np.zeros(self.Nparam) - [p.dpsi2_dtheta(dL_dpsi2[s1, s2, s2], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[ps]) for p, i_s, s1, s2, ps in zip(self.parts, self.input_slices, slices1, slices2, self.param_slices)] + [p.dpsi2_dtheta(dL_dpsi2, Z[:, i_s], mu[:, i_s], S[:, i_s], target[ps]) for p, i_s, ps in zip(self.parts, self.input_slices, self.param_slices)] # compute the "cross" terms - # TODO: better looping + # TODO: better looping, input_slices for i1, i2 in itertools.combinations(range(len(self.parts)), 2): p1, p2 = self.parts[i1], self.parts[i2] # ipsl1, ipsl2 = self.input_slices[i1], self.input_slices[i2] @@ -478,12 +428,12 @@ class kern(parameterised): return self._transform_gradients(target) - def dpsi2_dZ(self, dL_dpsi2, Z, mu, S, slices1=None, slices2=None): - slices1, slices2 = self._process_slices(slices1, slices2) + def dpsi2_dZ(self, dL_dpsi2, Z, mu, S): target = np.zeros_like(Z) - [p.dpsi2_dZ(dL_dpsi2[s1, s2, s2], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[s2, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + [p.dpsi2_dZ(dL_dpsi2, Z[:, i_s], mu[:, i_s], S[:, i_s], target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] # compute the "cross" terms + #TODO: we need input_slices here. for p1, p2 in itertools.combinations(self.parts, 2): # white doesn;t combine with anything if p1.name == 'white' or p2.name == 'white': @@ -506,16 +456,14 @@ class kern(parameterised): else: raise NotImplementedError, "psi2 cannot be computed for this kernel" - return target * 2. - def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S, slices1=None, slices2=None): - """return shapes are N,M,M,Q""" - slices1, slices2 = self._process_slices(slices1, slices2) + def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S): target_mu, target_S = np.zeros((2, mu.shape[0], mu.shape[1])) - [p.dpsi2_dmuS(dL_dpsi2[s1, s2, s2], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target_mu[s1, i_s], target_S[s1, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)] + [p.dpsi2_dmuS(dL_dpsi2, Z[:, i_s], mu[:, i_s], S[:, i_s], target_mu[:, i_s], target_S[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] # compute the "cross" terms + #TODO: we need input_slices here. for p1, p2 in itertools.combinations(self.parts, 2): # white doesn;t combine with anything if p1.name == 'white' or p2.name == 'white': diff --git a/GPy/models/GP.py b/GPy/models/GP.py index c6e46bea..45ed61ca 100644 --- a/GPy/models/GP.py +++ b/GPy/models/GP.py @@ -19,7 +19,6 @@ class GP(model): :parm likelihood: a GPy likelihood :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_X: False|True - :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) :rtype: model object :param epsilon_ep: convergence criterion for the Expectation Propagation algorithm, defaults to 0.1 :param powerep: power-EP parameters [$\eta$,$\delta$], defaults to [1.,1.] @@ -28,10 +27,9 @@ class GP(model): .. Note:: Multiple independent outputs are allowed using columns of Y """ - def __init__(self, X, likelihood, kernel, normalize_X=False, Xslices=None): + def __init__(self, X, likelihood, kernel, normalize_X=False): # parse arguments - self.Xslices = Xslices self.X = X assert len(self.X.shape) == 2 self.N, self.Q = self.X.shape @@ -64,12 +62,12 @@ class GP(model): return np.zeros_like(self.Z) def _set_params(self, p): - self.kern._set_params_transformed(p[:self.kern.Nparam]) + self.kern._set_params_transformed(p[:self.kern.Nparam_transformed()]) # self.likelihood._set_params(p[self.kern.Nparam:]) # test by Nicolas self.likelihood._set_params(p[self.kern.Nparam_transformed():]) # test by Nicolas - self.K = self.kern.K(self.X, slices1=self.Xslices, slices2=self.Xslices) + self.K = self.kern.K(self.X) self.K += self.likelihood.covariance_matrix self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) @@ -92,7 +90,7 @@ class GP(model): """ Approximates a non-gaussian likelihood using Expectation Propagation - For a Gaussian (or direct: TODO) likelihood, no iteration is required: + For a Gaussian likelihood, no iteration is required: this function does nothing """ self.likelihood.fit_full(self.kern.K(self.X)) @@ -122,31 +120,33 @@ class GP(model): """ The gradient of all parameters. - For the kernel parameters, use the chain rule via dL_dK - - For the likelihood parameters, pass in alpha = K^-1 y + Note, we use the chain rule: dL_dtheta = dL_dK * d_K_dtheta """ - return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X, slices1=self.Xslices, slices2=self.Xslices), self.likelihood._gradients(partial=np.diag(self.dL_dK)))) + return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK)))) - def _raw_predict(self, _Xnew, slices=None, full_cov=False): + def _raw_predict(self, _Xnew, which_parts='all', full_cov=False): """ Internal helper function for making predictions, does not account for normalization or likelihood + + #TODO: which_parts does nothing + + """ - Kx = self.kern.K(self.X, _Xnew, slices1=self.Xslices, slices2=slices) + Kx = self.kern.K(self.X, _Xnew,which_parts=which_parts) mu = np.dot(np.dot(Kx.T, self.Ki), self.likelihood.Y) KiKx = np.dot(self.Ki, Kx) if full_cov: - Kxx = self.kern.K(_Xnew, slices1=slices, slices2=slices) + Kxx = self.kern.K(_Xnew, which_parts=which_parts) var = Kxx - np.dot(KiKx.T, Kx) else: - Kxx = self.kern.Kdiag(_Xnew, slices=slices) + Kxx = self.kern.Kdiag(_Xnew, which_parts=which_parts) var = Kxx - np.sum(np.multiply(KiKx, Kx), 0) var = var[:, None] return mu, var - def predict(self, Xnew, slices=None, full_cov=False): + def predict(self, Xnew, which_parts='all', full_cov=False): """ Predict the function(s) at the new point(s) Xnew. @@ -154,19 +154,14 @@ class GP(model): --------- :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray, Nnew x self.Q - :param slices: specifies which outputs kernel(s) the Xnew correspond to (see below) - :type slices: (None, list of slice objects, list of ints) + :param which_parts: specifies which outputs kernel(s) to use in prediction + :type which_parts: ('all', list of bools) :param full_cov: whether to return the folll covariance matrix, or just the diagonal :type full_cov: bool :rtype: posterior mean, a Numpy array, Nnew x self.D :rtype: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise :rtype: lower and upper boundaries of the 95% confidence intervals, Numpy arrays, Nnew x self.D - .. Note:: "slices" specifies how the the points X_new co-vary wich the training points. - - - If None, the new points covary throigh every kernel part (default) - - If a list of slices, the i^th slice specifies which data are affected by the i^th kernel part - - If a list of booleans, specifying which kernel parts are active If full_cov and self.D > 1, the return shape of var is Nnew x Nnew x self.D. If self.D == 1, the return shape is Nnew x Nnew. This is to allow for different normalizations of the output dimensions. @@ -174,15 +169,15 @@ class GP(model): """ # normalize X values Xnew = (Xnew.copy() - self._Xmean) / self._Xstd - mu, var = self._raw_predict(Xnew, slices, full_cov) + mu, var = self._raw_predict(Xnew, which_parts, full_cov) - # now push through likelihood TODO + # now push through likelihood mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) return mean, var, _025pm, _975pm - def plot_f(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, full_cov=False): + def plot_f(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, full_cov=False): """ Plot the GP's view of the world, where the data is normalized and the likelihood is Gaussian @@ -190,8 +185,8 @@ class GP(model): :param which_data: which if the training data to plot (default all) :type which_data: 'all' or a slice object to slice self.X, self.Y :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits - :param which_functions: which of the kernel functions to plot (additively) - :type which_functions: list of bools + :param which_parts: which of the kernel functions to plot (additively) + :type which_parts: 'all', or list of bools :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D Plot the posterior of the GP. @@ -202,19 +197,17 @@ class GP(model): Can plot only part of the data and part of the posterior functions using which_data and which_functions Plot the data's view of the world, with non-normalized values and GP predictions passed through the likelihood """ - if which_functions == 'all': - which_functions = [True] * self.kern.Nparts if which_data == 'all': which_data = slice(None) if self.X.shape[1] == 1: Xnew, xmin, xmax = x_frame1D(self.X, plot_limits=plot_limits) if samples == 0: - m, v = self._raw_predict(Xnew, slices=which_functions) + m, v = self._raw_predict(Xnew, which_parts=which_parts) gpplot(Xnew, m, m - 2 * np.sqrt(v), m + 2 * np.sqrt(v)) pb.plot(self.X[which_data], self.likelihood.Y[which_data], 'kx', mew=1.5) else: - m, v = self._raw_predict(Xnew, slices=which_functions, full_cov=True) + m, v = self._raw_predict(Xnew, which_parts=which_parts, full_cov=True) Ysim = np.random.multivariate_normal(m.flatten(), v, samples) gpplot(Xnew, m, m - 2 * np.sqrt(np.diag(v)[:, None]), m + 2 * np.sqrt(np.diag(v))[:, None]) for i in range(samples): @@ -230,7 +223,7 @@ class GP(model): elif self.X.shape[1] == 2: resolution = resolution or 50 Xnew, xmin, xmax, xx, yy = x_frame2D(self.X, plot_limits, resolution) - m, v = self._raw_predict(Xnew, slices=which_functions) + m, v = self._raw_predict(Xnew, which_parts=which_parts) m = m.reshape(resolution, resolution).T pb.contour(xx, yy, m, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) pb.scatter(Xorig[:, 0], Xorig[:, 1], 40, Yorig, linewidth=0, cmap=pb.cm.jet, vmin=m.min(), vmax=m.max()) @@ -246,8 +239,6 @@ class GP(model): """ # TODO include samples - if which_functions == 'all': - which_functions = [True] * self.kern.Nparts if which_data == 'all': which_data = slice(None) @@ -256,7 +247,7 @@ class GP(model): Xu = self.X * self._Xstd + self._Xmean # NOTE self.X are the normalized values now Xnew, xmin, xmax = x_frame1D(Xu, plot_limits=plot_limits) - m, var, lower, upper = self.predict(Xnew, slices=which_functions) + m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) gpplot(Xnew, m, lower, upper) pb.plot(Xu[which_data], self.likelihood.data[which_data], 'kx', mew=1.5) if self.has_uncertain_inputs: @@ -277,7 +268,7 @@ class GP(model): resolution = resolution or 50 Xnew, xx, yy, xmin, xmax = x_frame2D(self.X, plot_limits, resolution) x, y = np.linspace(xmin[0], xmax[0], resolution), np.linspace(xmin[1], xmax[1], resolution) - m, var, lower, upper = self.predict(Xnew, slices=which_functions) + m, var, lower, upper = self.predict(Xnew, which_parts=which_parts) m = m.reshape(resolution, resolution).T pb.contour(x, y, m, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) Yf = self.likelihood.Y.flatten() diff --git a/GPy/models/GP_regression.py b/GPy/models/GP_regression.py index 5f9f9f3e..7f2673a6 100644 --- a/GPy/models/GP_regression.py +++ b/GPy/models/GP_regression.py @@ -11,26 +11,24 @@ class GP_regression(GP): """ Gaussian Process model for regression - This is a thin wrapper around the GP class, with a set of sensible defalts + This is a thin wrapper around the models.GP class, with a set of sensible defalts :param X: input observations :param Y: observed values - :param kernel: a GPy kernel, defaults to rbf+white + :param kernel: a GPy kernel, defaults to rbf :param normalize_X: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_X: False|True :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_Y: False|True - :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) - :rtype: model object .. Note:: Multiple independent outputs are allowed using columns of Y """ - def __init__(self,X,Y,kernel=None,normalize_X=False,normalize_Y=False, Xslices=None): + def __init__(self,X,Y,kernel=None,normalize_X=False,normalize_Y=False): if kernel is None: kernel = kern.rbf(X.shape[1]) likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y) - GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X, Xslices=Xslices) + GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) diff --git a/GPy/models/generalized_FITC.py b/GPy/models/generalized_FITC.py index 26875f64..25b6c18f 100644 --- a/GPy/models/generalized_FITC.py +++ b/GPy/models/generalized_FITC.py @@ -23,20 +23,19 @@ class generalized_FITC(sparse_GP): :type X_variance: np.ndarray (N x Q) | None :param Z: inducing inputs (optional, see note) :type Z: np.ndarray (M x Q) | None - :param Zslices: slices for the inducing inputs (see slicing TODO: link) :param M : Number of inducing points (optional, default 10. Ignored if Z is not None) :type M: int :param normalize_(X|Y) : whether to normalize the data before computing (predictions will be in original scales) :type normalize_(X|Y): bool """ - def __init__(self, X, likelihood, kernel, Z, X_variance=None, Xslices=None,Zslices=None, normalize_X=False): + def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False): self.Z = Z self.M = self.Z.shape[0] self._precision = likelihood.precision - sparse_GP.__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, Xslices=None,Zslices=None, normalize_X=False) + sparse_GP.__init__(self, X, likelihood, kernel=kernel, Z=self.Z, X_variance=None, normalize_X=False) def _set_params(self, p): self.Z = p[:self.M*self.Q].reshape(self.M, self.Q) @@ -145,7 +144,7 @@ class generalized_FITC(sparse_GP): D = 0.5*np.trace(self.Cpsi1VVpsi1) return A+C+D - def _raw_predict(self, Xnew, slices, full_cov=False): + def _raw_predict(self, Xnew, which_parts, full_cov=False): if self.likelihood.is_heteroscedastic: """ Make a prediction for the generalized FITC model @@ -174,16 +173,16 @@ class generalized_FITC(sparse_GP): self.mu_H = mu_H Sigma_H = C + np.dot(mu_u,np.dot(self.Sigma,mu_u.T)) # q(f_star|y) = N(f_star|mu_star,sigma2_star) - Kx = self.kern.K(self.Z, Xnew) + Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts) KR0T = np.dot(Kx.T,self.Lmi.T) mu_star = np.dot(KR0T,mu_H) if full_cov: - Kxx = self.kern.K(Xnew) + Kxx = self.kern.K(Xnew,which_parts=which_parts) var = Kxx + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T)) else: - Kxx = self.kern.Kdiag(Xnew) - Kxx_ = self.kern.K(Xnew) - var_ = Kxx_ + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T)) + Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts) + Kxx_ = self.kern.K(Xnew,which_parts=which_parts) # TODO: RA, is this line needed? + var_ = Kxx_ + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T)) # TODO: RA, is this line needed? var = (Kxx + np.sum(KR0T.T*np.dot(Sigma_H - np.eye(self.M),KR0T.T),0))[:,None] return mu_star[:,None],var else: diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py index 697a9978..20caa1a8 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/models/sparse_GP.py @@ -9,10 +9,6 @@ from .. import kern from GP import GP from scipy import linalg -#Still TODO: -# make use of slices properly (kernel can now do this) -# enable heteroscedatic noise (kernel will need to compute psi2 as a (NxMxM) array) - class sparse_GP(GP): """ Variational sparse GP model @@ -27,19 +23,16 @@ class sparse_GP(GP): :type X_variance: np.ndarray (N x Q) | None :param Z: inducing inputs (optional, see note) :type Z: np.ndarray (M x Q) | None - :param Zslices: slices for the inducing inputs (see slicing TODO: link) :param M : Number of inducing points (optional, default 10. Ignored if Z is not None) :type M: int :param normalize_(X|Y) : whether to normalize the data before computing (predictions will be in original scales) :type normalize_(X|Y): bool """ - def __init__(self, X, likelihood, kernel, Z, X_variance=None, Xslices=None,Zslices=None, normalize_X=False): + def __init__(self, X, likelihood, kernel, Z, X_variance=None, normalize_X=False): self.scale_factor = 100.0# a scaling factor to help keep the algorithm stable self.auto_scale_factor = False self.Z = Z - self.Zslices = Zslices - self.Xslices = Xslices self.M = Z.shape[0] self.likelihood = likelihood @@ -50,7 +43,7 @@ class sparse_GP(GP): self.has_uncertain_inputs=True self.X_variance = X_variance - GP.__init__(self, X, likelihood, kernel=kernel, normalize_X=normalize_X, Xslices=Xslices) + GP.__init__(self, X, likelihood, kernel=kernel, normalize_X=normalize_X) #normalize X uncertainty also if self.has_uncertain_inputs: @@ -65,13 +58,12 @@ class sparse_GP(GP): self.psi1 = self.kern.psi1(self.Z,self.X, self.X_variance).T self.psi2 = self.kern.psi2(self.Z,self.X, self.X_variance) else: - self.psi0 = self.kern.Kdiag(self.X,slices=self.Xslices) + self.psi0 = self.kern.Kdiag(self.X) self.psi1 = self.kern.K(self.Z,self.X) self.psi2 = None def _computations(self): #TODO: find routine to multiply triangular matrices - #TODO: slices for psi statistics (easy enough) sf = self.scale_factor sf2 = sf**2 @@ -252,16 +244,16 @@ class sparse_GP(GP): dL_dZ += self.kern.dK_dX(self.dL_dpsi1,self.Z,self.X) return dL_dZ - def _raw_predict(self, Xnew, slices, full_cov=False): + def _raw_predict(self, Xnew, which_parts='all', full_cov=False): """Internal helper function for making predictions, does not account for normalization""" Kx = self.kern.K(self.Z, Xnew) mu = mdot(Kx.T, self.C/self.scale_factor, self.psi1V) if full_cov: - Kxx = self.kern.K(Xnew) + Kxx = self.kern.K(Xnew,which_parts=which_parts) var = Kxx - mdot(Kx.T, (self.Kmmi - self.C/self.scale_factor**2), Kx) #NOTE this won't work for plotting else: - Kxx = self.kern.Kdiag(Xnew) + Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts) var = Kxx - np.sum(Kx*np.dot(self.Kmmi - self.C/self.scale_factor**2, Kx),0) return mu,var[:,None] diff --git a/GPy/models/sparse_GP_regression.py b/GPy/models/sparse_GP_regression.py index 0ef78c32..84a5d37c 100644 --- a/GPy/models/sparse_GP_regression.py +++ b/GPy/models/sparse_GP_regression.py @@ -13,7 +13,7 @@ class sparse_GP_regression(sparse_GP): """ Gaussian Process model for regression - This is a thin wrapper around the GP class, with a set of sensible defalts + This is a thin wrapper around the sparse_GP class, with a set of sensible defalts :param X: input observations :param Y: observed values @@ -22,25 +22,25 @@ class sparse_GP_regression(sparse_GP): :type normalize_X: False|True :param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales) :type normalize_Y: False|True - :param Xslices: how the X,Y data co-vary in the kernel (i.e. which "outputs" they correspond to). See (link:slicing) :rtype: model object .. Note:: Multiple independent outputs are allowed using columns of Y """ - def __init__(self,X,Y,kernel=None,normalize_X=False,normalize_Y=False, Xslices=None,Z=None, M=10): - #kern defaults to rbf + def __init__(self, X, Y, kernel=None, normalize_X=False, normalize_Y=False, Z=None, M=10): + #kern defaults to rbf (plus white for stability) if kernel is None: kernel = kern.rbf(X.shape[1]) + kern.white(X.shape[1],1e-3) #Z defaults to a subset of the data if Z is None: - Z = np.random.permutation(X.copy())[:M] + i = np.random.permutation(X.shape[0])[:M] + Z = X[i].copy() else: assert Z.shape[1]==X.shape[1] #likelihood defaults to Gaussian likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y) - sparse_GP.__init__(self, X, likelihood, kernel, Z, normalize_X=normalize_X, Xslices=Xslices) + sparse_GP.__init__(self, X, likelihood, kernel, Z, normalize_X=normalize_X) diff --git a/GPy/models/warped_GP.py b/GPy/models/warped_GP.py index 052f8d8e..9c3ce401 100644 --- a/GPy/models/warped_GP.py +++ b/GPy/models/warped_GP.py @@ -14,7 +14,7 @@ from .. import likelihoods from .. import kern class warpedGP(GP): - def __init__(self, X, Y, kernel=None, warping_function = None, warping_terms = 3, normalize_X=False, normalize_Y=False, Xslices=None): + def __init__(self, X, Y, kernel=None, warping_function = None, warping_terms = 3, normalize_X=False, normalize_Y=False): if kernel is None: kernel = kern.rbf(X.shape[1]) @@ -28,7 +28,7 @@ class warpedGP(GP): self.predict_in_warped_space = False likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y) - GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X, Xslices=Xslices) + GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) def _set_params(self, x): self.warping_params = x[:self.warping_function.num_parameters] From 7d9352c7330d9c826c21c9e8f8cb4aee930037b5 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Sun, 28 Apr 2013 21:37:36 +0100 Subject: [PATCH 08/11] weaved coregionalise. much performance gained --- GPy/kern/coregionalise.py | 62 ++++++++++++++++++++++++++++++++++--- GPy/kern/kern.py | 1 + GPy/kern/prod.py | 9 ++++-- GPy/kern/prod_orthogonal.py | 9 ++++-- 4 files changed, 70 insertions(+), 11 deletions(-) diff --git a/GPy/kern/coregionalise.py b/GPy/kern/coregionalise.py index a76bb31e..a4d22c2d 100644 --- a/GPy/kern/coregionalise.py +++ b/GPy/kern/coregionalise.py @@ -5,10 +5,11 @@ from kernpart import kernpart import numpy as np from GPy.util.linalg import mdot, pdinv import pdb +from scipy import weave class coregionalise(kernpart): """ - Kernel for Intrisec Corregionalization Models + Kernel for Intrinsic Corregionalization Models """ def __init__(self,Nout,R=1, W=None, kappa=None): self.D = 1 @@ -42,19 +43,70 @@ class coregionalise(kernpart): def K(self,index,index2,target): index = np.asarray(index,dtype=np.int) + + #here's the old code (numpy) + #if index2 is None: + #index2 = index + #else: + #index2 = np.asarray(index2,dtype=np.int) + #false_target = target.copy() + #ii,jj = np.meshgrid(index,index2) + #ii,jj = ii.T, jj.T + #false_target += self.B[ii,jj] + if index2 is None: - index2 = index + code=""" + for(int i=0;i Date: Sun, 28 Apr 2013 22:32:37 +0100 Subject: [PATCH 09/11] reimplemented caching in prod_orthogonal... --- GPy/kern/prod_orthogonal.py | 59 +++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/GPy/kern/prod_orthogonal.py b/GPy/kern/prod_orthogonal.py index 6ba9965f..cc15a94e 100644 --- a/GPy/kern/prod_orthogonal.py +++ b/GPy/kern/prod_orthogonal.py @@ -21,44 +21,35 @@ class prod_orthogonal(kernpart): self.name = k1.name + '' + k2.name self.k1 = k1 self.k2 = k2 + self._X, self._X2, self._params = np.empty(shape=(3,1)) self._set_params(np.hstack((k1._get_params(),k2._get_params()))) def _get_params(self): """return the value of the parameters.""" - return self.params + return np.hstack((self.k1._get_params(), self.k2._get_params())) def _set_params(self,x): """set the value of the parameters.""" self.k1._set_params(x[:self.k1.Nparam]) self.k2._set_params(x[self.k1.Nparam:]) - self.params = x def _get_param_names(self): """return parameter names.""" return [self.k1.name + '_' + param_name for param_name in self.k1._get_param_names()] + [self.k2.name + '_' + param_name for param_name in self.k2._get_param_names()] def K(self,X,X2,target): - """Compute the covariance matrix between X and X2.""" - target1 = np.zeros_like(target) - target2 = np.zeros_like(target) - if X2 is None: - self.k1.K(X[:,:self.k1.D],None,target1) - self.k2.K(X[:,self.k1.D:],None,target2) - else: - self.k1.K(X[:,:self.k1.D],X2[:,:self.k1.D],target1) - self.k2.K(X[:,self.k1.D:],X2[:,self.k1.D:],target2) - target += target1 * target2 + self._K_computations(X,X2) + target += self._K1 * self._K2 def dK_dtheta(self,dL_dK,X,X2,target): """derivative of the covariance matrix with respect to the parameters.""" - if X2 is None: X2 = X - K1 = np.zeros((X.shape[0],X2.shape[0])) - K2 = np.zeros((X.shape[0],X2.shape[0])) - self.k1.K(X[:,:self.k1.D],X2[:,:self.k1.D],K1) - self.k2.K(X[:,self.k1.D:],X2[:,self.k1.D:],K2) - - self.k1.dK_dtheta(dL_dK*K2, X[:,:self.k1.D], X2[:,:self.k1.D], target[:self.k1.Nparam]) - self.k2.dK_dtheta(dL_dK*K1, X[:,self.k1.D:], X2[:,self.k1.D:], target[self.k1.Nparam:]) + self._K_computations(X,X2) + if X2 is None: + self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.D], None, target[:self.k1.Nparam]) + self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.D:], None, target[self.k1.Nparam:]) + else: + self.k1.dK_dtheta(dL_dK*self._K2, X[:,:self.k1.D], X2[:,:self.k1.D], target[:self.k1.Nparam]) + self.k2.dK_dtheta(dL_dK*self._K1, X[:,self.k1.D:], X2[:,self.k1.D:], target[self.k1.Nparam:]) def Kdiag(self,X,target): """Compute the diagonal of the covariance matrix associated to X.""" @@ -78,14 +69,9 @@ class prod_orthogonal(kernpart): def dK_dX(self,dL_dK,X,X2,target): """derivative of the covariance matrix with respect to X.""" - if X2 is None: X2 = X - K1 = np.zeros((X.shape[0],X2.shape[0])) - K2 = np.zeros((X.shape[0],X2.shape[0])) - self.k1.K(X[:,0:self.k1.D],X2[:,0:self.k1.D],K1) - self.k2.K(X[:,self.k1.D:],X2[:,self.k1.D:],K2) - - self.k1.dK_dX(dL_dK*K2, X[:,:self.k1.D], X2[:,:self.k1.D], target) - self.k2.dK_dX(dL_dK*K1, X[:,self.k1.D:], X2[:,self.k1.D:], target) + self._K_computations(X,X2) + self.k1.dK_dX(dL_dK*self._K2, X[:,:self.k1.D], X2[:,:self.k1.D], target) + self.k2.dK_dX(dL_dK*self._K1, X[:,self.k1.D:], X2[:,self.k1.D:], target) def dKdiag_dX(self, dL_dKdiag, X, target): K1 = np.zeros(X.shape[0]) @@ -96,3 +82,20 @@ class prod_orthogonal(kernpart): self.k1.dK_dX(dL_dKdiag*K2, X[:,:self.k1.D], target) self.k2.dK_dX(dL_dKdiag*K1, X[:,self.k1.D:], target) + def _K_computations(self,X,X2): + if not (np.array_equal(X,self._X) and np.array_equal(X2,self._X2) and np.array_equal(self._params , self._get_params())): + self._X = X.copy() + self._params == self._get_params().copy() + if X2 is None: + self._X2 = None + self._K1 = np.zeros((X.shape[0],X.shape[0])) + self._K2 = np.zeros((X.shape[0],X.shape[0])) + self.k1.K(X[:,:self.k1.D],None,self._K1) + self.k2.K(X[:,self.k1.D:],None,self._K2) + else: + self._X2 = X2.copy() + self._K1 = np.zeros((X.shape[0],X2.shape[0])) + self._K2 = np.zeros((X.shape[0],X2.shape[0])) + self.k1.K(X[:,:self.k1.D],X2[:,:self.k1.D],self._K1) + self.k2.K(X[:,self.k1.D:],X2[:,self.k1.D:],self._K2) + From 23bde6f3ddd56b938279451f1fcb55a84e00ced5 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Mon, 29 Apr 2013 09:11:36 +0100 Subject: [PATCH 10/11] removed uncollapsed sparse GP. superceeded by the forthcoming svigp package --- GPy/models/uncollapsed_sparse_GP.py | 151 ---------------------------- 1 file changed, 151 deletions(-) delete mode 100644 GPy/models/uncollapsed_sparse_GP.py diff --git a/GPy/models/uncollapsed_sparse_GP.py b/GPy/models/uncollapsed_sparse_GP.py deleted file mode 100644 index d2638784..00000000 --- a/GPy/models/uncollapsed_sparse_GP.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) 2012 James Hensman -# Licensed under the BSD 3-clause license (see LICENSE.txt) - -import numpy as np -import pylab as pb -from ..util.linalg import mdot, jitchol, chol_inv, pdinv -from .. import kern -from ..likelihoods import likelihood -from sparse_GP import sparse_GP - -class uncollapsed_sparse_GP(sparse_GP): - """ - Variational sparse GP model (Regression), where the approximating distribution q(u) is represented explicitly - - :param X: inputs - :type X: np.ndarray (N x Q) - :param likelihood: GPy likelihood class, containing observed data - :param q_u: canonical parameters of the distribution squasehd into a 1D array - :type q_u: np.ndarray - :param kernel : the kernel/covariance function. See link kernels - :type kernel: a GPy kernel - :param Z: inducing inputs (optional, see note) - :type Z: np.ndarray (M x Q) | None - :param Zslices: slices for the inducing inputs (see slicing TODO: link) - :param normalize_X : whether to normalize the data before computing (predictions will be in original scales) - :type normalize_X: bool - """ - - def __init__(self, X, likelihood, kernel, Z, q_u=None, **kwargs): - self.M = Z.shape[0] - if q_u is None: - q_u = np.hstack((np.random.randn(self.M*likelihood.D),-0.5*np.eye(self.M).flatten())) - self.likelihood = likelihood - self.set_vb_param(q_u) - sparse_GP.__init__(self, X, likelihood, kernel, Z, **kwargs) - - def _computations(self): - # kernel computations, using BGPLVM notation - self.Kmm = self.kern.K(self.Z) - if self.has_uncertain_inputs: - raise NotImplementedError - else: - self.psi0 = self.kern.Kdiag(self.X,slices=self.Xslices) - self.psi1 = self.kern.K(self.Z,self.X) - if self.likelihood.is_heteroscedastic: - raise NotImplementedError - else: - tmp = self.psi1*(np.sqrt(self.likelihood.precision)/sf) - self.psi2_beta_scaled = np.dot(tmp,tmp.T) - self.psi2 = self.psi1.T[:,:,None]*self.psi1.T[:,None,:] - - - self.V = self.likelihood.precision*self.Y - self.VmT = np.dot(self.V,self.q_u_expectation[0].T) - self.psi1V = np.dot(self.psi1, self.V) - self.psi1VVpsi1 = np.dot(self.psi1V, self.psi1V.T) - self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm) - self.A = mdot(self.Lmi, self.beta*self.psi2, self.Lmi.T) - self.B = np.eye(self.M) + self.A - self.Lambda = mdot(self.Lmi.T,self.B,self.Lmi) - self.trace_K = self.psi0 - np.trace(self.A)/self.beta - self.projected_mean = mdot(self.psi1.T,self.Kmmi,self.q_u_expectation[0]) - - # Compute dL_dpsi - self.dL_dpsi0 = - 0.5 * self.likelihood.D * self.beta * np.ones(self.N) - self.dL_dpsi1 = np.dot(self.VmT,self.Kmmi).T # This is the correct term for E I think... - self.dL_dpsi2 = 0.5 * self.beta * self.likelihood.D * (self.Kmmi - mdot(self.Kmmi,self.q_u_expectation[1],self.Kmmi)) - - # Compute dL_dKmm - tmp = self.beta*mdot(self.psi2,self.Kmmi,self.q_u_expectation[1]) -np.dot(self.q_u_expectation[0],self.psi1V.T) - tmp += tmp.T - tmp += self.likelihood.D*(-self.beta*self.psi2 - self.Kmm + self.q_u_expectation[1]) - self.dL_dKmm = 0.5*mdot(self.Kmmi,tmp,self.Kmmi) - - #Compute the gradient of the log likelihood wrt noise variance - #TODO: suport heteroscedatic noise - dbeta = 0.5 * self.N*self.likelihood.D/self.beta - dbeta += - 0.5 * self.likelihood.D * self.trace_K - dbeta += - 0.5 * self.likelihood.D * np.sum(self.q_u_expectation[1]*mdot(self.Kmmi,self.psi2,self.Kmmi)) - dbeta += - 0.5 * self.trYYT - dbeta += np.sum(np.dot(self.Y.T,self.projected_mean)) - self.partial_for_likelihood = -dbeta*self.likelihood.precision**2 - - def log_likelihood(self): - """ - Compute the (lower bound on the) log marginal likelihood - """ - A = -0.5*self.N*self.likelihood.D*(np.log(2.*np.pi) - np.log(self.beta)) - B = -0.5*self.beta*self.likelihood.D*self.trace_K - C = -0.5*self.likelihood.D *(self.Kmm_logdet-self.q_u_logdet + np.sum(self.Lambda * self.q_u_expectation[1]) - self.M) - D = -0.5*self.beta*self.trYYT - E = np.sum(np.dot(self.V.T,self.projected_mean)) - return A+B+C+D+E - - def _raw_predict(self, Xnew, slices,full_cov=False): - """Internal helper function for making predictions, does not account for normalization""" - Kx = self.kern.K(Xnew,self.Z) - mu = mdot(Kx,self.Kmmi,self.q_u_expectation[0]) - - tmp = self.Kmmi- mdot(self.Kmmi,self.q_u_cov,self.Kmmi) - if full_cov: - Kxx = self.kern.K(Xnew) - var = Kxx - mdot(Kx,tmp,Kx.T) - else: - Kxx = self.kern.Kdiag(Xnew) - var = (Kxx - np.sum(Kx*np.dot(Kx,tmp),1))[:,None] - return mu,var - - - def set_vb_param(self,vb_param): - """set the distribution q(u) from the canonical parameters""" - self.q_u_prec = -2.*vb_param[-self.M**2:].reshape(self.M, self.M) - self.q_u_cov, q_u_Li, q_u_L, tmp = pdinv(self.q_u_prec) - self.q_u_logdet = -tmp - self.q_u_mean = np.dot(self.q_u_cov,vb_param[:self.M*self.likelihood.D].reshape(self.M,self.likelihood.D)) - - self.q_u_expectation = (self.q_u_mean, np.dot(self.q_u_mean,self.q_u_mean.T)+self.q_u_cov*self.likelihood.D) - - self.q_u_canonical = (np.dot(self.q_u_prec, self.q_u_mean),-0.5*self.q_u_prec) - #TODO: computations now? - - def get_vb_param(self): - """ - Return the canonical parameters of the distribution q(u) - """ - return np.hstack([e.flatten() for e in self.q_u_canonical]) - - def vb_grad_natgrad(self): - """ - Compute the gradients of the lower bound wrt the canonical and - Expectation parameters of u. - - Note that the natural gradient in either is given by the gradient in the other (See Hensman et al 2012 Fast Variational inference in the conjugate exponential Family) - """ - dL_dmmT_S = -0.5*self.Lambda-self.q_u_canonical[1] - dL_dm = np.dot(self.Kmmi,self.psi1V) - np.dot(self.Lambda,self.q_u_mean) - - #dL_dSim = - #dL_dmhSi = - - return np.hstack((dL_dm.flatten(),dL_dmmT_S.flatten())) # natgrad only, grad TODO - - - def plot(self, *args, **kwargs): - """ - add the distribution q(u) to the plot from sparse_GP - """ - sparse_GP.plot(self,*args,**kwargs) - if self.Q==1: - pb.errorbar(self.Z[:,0],self.q_u_expectation[0][:,0],yerr=2.*np.sqrt(np.diag(self.q_u_cov)),fmt=None,ecolor='b') - From 5fca43f980711becc89fc13efde81a753662cf55 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Mon, 29 Apr 2013 11:37:08 +0100 Subject: [PATCH 11/11] more stabilisation of sparse GP --- GPy/models/__init__.py | 1 - GPy/models/sparse_GP.py | 15 ++++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index d63adaf1..4be8d360 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -9,7 +9,6 @@ from sparse_GP_regression import sparse_GP_regression from GPLVM import GPLVM from warped_GP import warpedGP from sparse_GPLVM import sparse_GPLVM -from uncollapsed_sparse_GP import uncollapsed_sparse_GP from Bayesian_GPLVM import Bayesian_GPLVM from mrd import MRD from generalized_FITC import generalized_FITC diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py index 20caa1a8..a085090d 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/models/sparse_GP.py @@ -92,7 +92,7 @@ class sparse_GP(GP): #Compute A = L^-1 psi2 beta L^-T #self. A = mdot(self.Lmi,self.psi2_beta_scaled,self.Lmi.T) tmp = linalg.lapack.flapack.dtrtrs(self.Lm,self.psi2_beta_scaled.T,lower=1)[0] - self.A = linalg.lapack.flapack.dtrtrs(self.Lm,np.asarray(tmp.T,order='F'),lower=1)[0] + self.A = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1)[0] self.B = np.eye(self.M)/sf2 + self.A @@ -101,12 +101,17 @@ class sparse_GP(GP): self.psi1V = np.dot(self.psi1, self.V) tmp = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.Bi),lower=1,trans=1)[0] self.C = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0] - #TODO: can we multiply in C by forwardsubstitution? - self.Cpsi1V = np.dot(self.C,self.psi1V) - self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T) - #self.E = np.dot(self.Cpsi1V/sf,self.Cpsi1V.T/sf) + + #self.Cpsi1V = np.dot(self.C,self.psi1V) + #back substutue C into psi1V + tmp,info1 = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.psi1V),lower=1,trans=0) + tmp,info2 = linalg.lapack.flapack.dpotrs(self.LB,tmp,lower=1) + self.Cpsi1V,info3 = linalg.lapack.flapack.dtrtrs(self.Lm,tmp,lower=1,trans=1) + + self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T) #TODO: stabilize? self.E = tdot(self.Cpsi1V/sf) + # Compute dL_dpsi # FIXME: this is untested for the heterscedastic + uncertin inputs case self.dL_dpsi0 = - 0.5 * self.D * (self.likelihood.precision * np.ones([self.N,1])).flatten() self.dL_dpsi1 = np.dot(self.Cpsi1V,self.V.T)