diff --git a/GPy/core/FITC.py b/GPy/core/FITC.py deleted file mode 100644 index a7875dce..00000000 --- a/GPy/core/FITC.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (c) 2012, GPy authors (see AUTHORS.txt). -# Licensed under the BSD 3-clause license (see LICENSE.txt) - -import numpy as np -import pylab as pb -from ..util.linalg import mdot, jitchol, chol_inv, tdot, symmetrify,pdinv -from ..util.plot import gpplot -from .. import kern -from scipy import stats, linalg -#from ..core import sparse_GP -from gp_base import GPBase - -def backsub_both_sides(L,X): - """ Return L^-T * X * L^-1, assumuing X is symmetrical and L is lower cholesky""" - tmp,_ = linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(X),lower=1,trans=1) - return linalg.lapack.flapack.dtrtrs(L,np.asfortranarray(tmp.T),lower=1,trans=1)[0].T - -class FITC(GPBase): - """ - sparse FITC approximation - - :param X: inputs - :type X: np.ndarray (N x Q) - :param likelihood: a likelihood instance, containing the observed data - :type likelihood: GPy.likelihood.(Gaussian | EP) - :param kernel : the kernel (covariance function). See link kernels - :type kernel: a GPy.kern.kern instance - :param Z: inducing inputs (optional, see note) - :type Z: np.ndarray (M x Q) | None - :param M : Number of inducing points (optional, default 10. Ignored if Z is not None) - :type M: int - :param normalize_(X|Y) : whether to normalize the data before computing (predictions will be in original scales) - :type normalize_(X|Y): bool - """ - - def __init__(self, X, likelihood, kernel, Z, normalize_X=False): - GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) - - self.Z = Z - self.M = Z.shape[0] - self.likelihood = likelihood - - X_variance = None - if X_variance is None: - self.has_uncertain_inputs = False - else: - assert X_variance.shape == X.shape - self.has_uncertain_inputs = True - self.X_variance = X_variance - - if normalize_X: - self.Z = (self.Z.copy() - self._Xmean) / self._Xstd - - # normalize X uncertainty also - if self.has_uncertain_inputs: - self.X_variance /= np.square(self._Xstd) - - def _set_params(self, p): - self.Z = p[:self.M * self.input_dim].reshape(self.M, self.input_dim) - self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam]) - self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:]) - self._compute_kernel_matrices() - self._computations() - - def _get_params(self): - return np.hstack([self.Z.flatten(), self.kern._get_params_transformed(), self.likelihood._get_params()]) - - def _get_param_names(self): - return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])],[])\ - + self.kern._get_param_names_transformed() + self.likelihood._get_param_names() - - - def update_likelihood_approximation(self): - """ - Approximates a non-Gaussian likelihood using Expectation Propagation - - For a Gaussian likelihood, no iteration is required: - this function does nothing - """ - if self.has_uncertain_inputs: - raise NotImplementedError, "FITC approximation not implemented for uncertain inputs" - else: - self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0) - self._set_params(self._get_params()) # update the GP - - def _compute_kernel_matrices(self): - # kernel computations, using BGPLVM notation - self.Kmm = self.kern.K(self.Z) - if self.has_uncertain_inputs: - self.psi0 = self.kern.psi0(self.Z, self.X, self.X_variance) - self.psi1 = self.kern.psi1(self.Z, self.X, self.X_variance).T - self.psi2 = self.kern.psi2(self.Z, self.X, self.X_variance) - else: - self.psi0 = self.kern.Kdiag(self.X) - self.psi1 = self.kern.K(self.Z, self.X) - self.psi2 = None - - - def _computations(self): - #factor Kmm - self.Lm = jitchol(self.Kmm) - self.Lmi,info = linalg.lapack.flapack.dtrtrs(self.Lm,np.eye(self.M),lower=1) - Lmipsi1 = np.dot(self.Lmi,self.psi1) - self.Qnn = np.dot(Lmipsi1.T,Lmipsi1).copy() - self.Diag0 = self.psi0 - np.diag(self.Qnn) - self.beta_star = self.likelihood.precision/(1. + self.likelihood.precision*self.Diag0[:,None]) #Includes Diag0 in the precision - self.V_star = self.beta_star * self.likelihood.Y - - # The rather complex computations of self.A - if self.has_uncertain_inputs: - raise NotImplementedError - else: - if self.likelihood.is_heteroscedastic: - assert self.likelihood.D == 1 - tmp = self.psi1 * (np.sqrt(self.beta_star.flatten().reshape(1, self.N))) - tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(tmp), lower=1) - self.A = tdot(tmp) - - # factor B - self.B = np.eye(self.M) + self.A - self.LB = jitchol(self.B) - self.LBi = chol_inv(self.LB) - self.psi1V = np.dot(self.psi1, self.V_star) - - Lmi_psi1V, info = linalg.lapack.flapack.dtrtrs(self.Lm, np.asfortranarray(self.psi1V), lower=1, trans=0) - self._LBi_Lmi_psi1V, _ = linalg.lapack.flapack.dtrtrs(self.LB, np.asfortranarray(Lmi_psi1V), lower=1, trans=0) - - Kmmipsi1 = np.dot(self.Lmi.T,Lmipsi1) - b_psi1_Ki = self.beta_star * Kmmipsi1.T - Ki_pbp_Ki = np.dot(Kmmipsi1,b_psi1_Ki) - Kmmi = np.dot(self.Lmi.T,self.Lmi) - LBiLmi = np.dot(self.LBi,self.Lmi) - LBL_inv = np.dot(LBiLmi.T,LBiLmi) - VVT = np.outer(self.V_star,self.V_star) - VV_p_Ki = np.dot(VVT,Kmmipsi1.T) - Ki_pVVp_Ki = np.dot(Kmmipsi1,VV_p_Ki) - psi1beta = self.psi1*self.beta_star.T - H = self.Kmm + mdot(self.psi1,psi1beta.T) - LH = jitchol(H) - LHi = chol_inv(LH) - Hi = np.dot(LHi.T,LHi) - - betapsi1TLmiLBi = np.dot(psi1beta.T,LBiLmi.T) - alpha = np.array([np.dot(a.T,a) for a in betapsi1TLmiLBi])[:,None] - gamma_1 = mdot(VVT,self.psi1.T,Hi) - pHip = mdot(self.psi1.T,Hi,self.psi1) - gamma_2 = mdot(self.beta_star*pHip,self.V_star) - gamma_3 = self.V_star * gamma_2 - - self._dL_dpsi0 = -0.5 * self.beta_star#dA_dpsi0: logdet(self.beta_star) - self._dL_dpsi0 += .5 * self.V_star**2 #dA_psi0: yT*beta_star*y - self._dL_dpsi0 += .5 *alpha #dC_dpsi0 - self._dL_dpsi0 += 0.5*mdot(self.beta_star*pHip,self.V_star)**2 - self.V_star * mdot(self.V_star.T,pHip*self.beta_star).T #dD_dpsi0 - - self._dL_dpsi1 = b_psi1_Ki.copy() #dA_dpsi1: logdet(self.beta_star) - self._dL_dpsi1 += -np.dot(psi1beta.T,LBL_inv) #dC_dpsi1 - self._dL_dpsi1 += gamma_1 - mdot(psi1beta.T,Hi,self.psi1,gamma_1) #dD_dpsi1 - - self._dL_dKmm = -0.5 * np.dot(Kmmipsi1,b_psi1_Ki) #dA_dKmm: logdet(self.beta_star) - self._dL_dKmm += .5*(LBL_inv - Kmmi) + mdot(LBL_inv,psi1beta,Kmmipsi1.T) #dC_dKmm - self._dL_dKmm += -.5 * mdot(Hi,self.psi1,gamma_1) #dD_dKmm - - self._dpsi1_dtheta = 0 - self._dpsi1_dX = 0 - self._dKmm_dtheta = 0 - self._dKmm_dX = 0 - - self._dpsi1_dX_jkj = 0 - self._dpsi1_dtheta_jkj = 0 - - for i,V_n,alpha_n,gamma_n,gamma_k in zip(range(self.N),self.V_star,alpha,gamma_2,gamma_3): - K_pp_K = np.dot(Kmmipsi1[:,i:(i+1)],Kmmipsi1[:,i:(i+1)].T) - - #Diag_dpsi1 = Diag_dA_dpsi1: yT*beta_star*y + Diag_dC_dpsi1 +Diag_dD_dpsi1 - _dpsi1 = (-V_n**2 - alpha_n + 2.*gamma_k - gamma_n**2) * Kmmipsi1.T[i:(i+1),:] - - #Diag_dKmm = Diag_dA_dKmm: yT*beta_star*y +Diag_dC_dKmm +Diag_dD_dKmm - _dKmm = .5*(V_n**2 + alpha_n + gamma_n**2 - 2.*gamma_k) * K_pp_K #Diag_dD_dKmm - - self._dpsi1_dtheta += self.kern.dK_dtheta(_dpsi1,self.X[i:i+1,:],self.Z) - self._dKmm_dtheta += self.kern.dK_dtheta(_dKmm,self.Z) - - self._dKmm_dX += 2.*self.kern.dK_dX(_dKmm ,self.Z) - self._dpsi1_dX += self.kern.dK_dX(_dpsi1.T,self.Z,self.X[i:i+1,:]) - - # the partial derivative vector for the likelihood - if self.likelihood.Nparams == 0: - # save computation here. - self.partial_for_likelihood = None - elif self.likelihood.is_heteroscedastic: - raise NotImplementedError, "heteroscedatic derivates not implemented" - else: - # likelihood is not heterscedatic - dbstar_dnoise = self.likelihood.precision * (self.beta_star**2 * self.Diag0[:,None] - self.beta_star) - Lmi_psi1 = mdot(self.Lmi,self.psi1) - LBiLmipsi1 = np.dot(self.LBi,Lmi_psi1) - aux_0 = np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1) - aux_1 = self.likelihood.Y.T * np.dot(self._LBi_Lmi_psi1V.T,LBiLmipsi1) - aux_2 = np.dot(LBiLmipsi1.T,self._LBi_Lmi_psi1V) - - dA_dnoise = 0.5 * self.D * (dbstar_dnoise/self.beta_star).sum() - 0.5 * self.D * np.sum(self.likelihood.Y**2 * dbstar_dnoise) - dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T) - dC_dnoise = -0.5 * np.sum(mdot(self.LBi.T,self.LBi,Lmi_psi1) * Lmi_psi1 * dbstar_dnoise.T) - - dD_dnoise_1 = mdot(self.V_star*LBiLmipsi1.T,LBiLmipsi1*dbstar_dnoise.T*self.likelihood.Y.T) - alpha = mdot(LBiLmipsi1,self.V_star) - alpha_ = mdot(LBiLmipsi1.T,alpha) - dD_dnoise_2 = -0.5 * self.D * np.sum(alpha_**2 * dbstar_dnoise ) - - dD_dnoise_1 = mdot(self.V_star.T,self.psi1.T,self.Lmi.T,self.LBi.T,self.LBi,self.Lmi,self.psi1,dbstar_dnoise*self.likelihood.Y) - dD_dnoise_2 = 0.5*mdot(self.V_star.T,self.psi1.T,Hi,self.psi1,dbstar_dnoise*self.psi1.T,Hi,self.psi1,self.V_star) - dD_dnoise = dD_dnoise_1 + dD_dnoise_2 - - self.partial_for_likelihood = dA_dnoise + dC_dnoise + dD_dnoise - - def log_likelihood(self): - """ Compute the (lower bound on the) log marginal likelihood """ - A = -0.5 * self.N * self.D * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.beta_star)) - 0.5 * np.sum(self.V_star * self.likelihood.Y) - C = -self.D * (np.sum(np.log(np.diag(self.LB)))) - D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V)) - return A + C + D - - def _log_likelihood_gradients(self): - pass - return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood))) - - def dL_dtheta(self): - if self.has_uncertain_inputs: - raise NotImplementedError, "FITC approximation not implemented for uncertain inputs" - else: - dL_dtheta = self.kern.dKdiag_dtheta(self._dL_dpsi0,self.X) - dL_dtheta += self.kern.dK_dtheta(self._dL_dpsi1,self.X,self.Z) - dL_dtheta += self.kern.dK_dtheta(self._dL_dKmm,X=self.Z) - dL_dtheta += self._dKmm_dtheta - dL_dtheta += self._dpsi1_dtheta - return dL_dtheta - - def dL_dZ(self): - if self.has_uncertain_inputs: - raise NotImplementedError, "FITC approximation not implemented for uncertain inputs" - else: - dL_dZ = self.kern.dK_dX(self._dL_dpsi1.T,self.Z,self.X) - dL_dZ += 2. * self.kern.dK_dX(self._dL_dKmm,X=self.Z) - dL_dZ += self._dpsi1_dX - dL_dZ += self._dKmm_dX - return dL_dZ - - def _raw_predict(self, Xnew, which_parts, full_cov=False): - - if self.likelihood.is_heteroscedastic: - Iplus_Dprod_i = 1./(1.+ self.Diag0 * self.likelihood.precision.flatten()) - self.Diag = self.Diag0 * Iplus_Dprod_i - self.P = Iplus_Dprod_i[:,None] * self.psi1.T - self.RPT0 = np.dot(self.Lmi,self.psi1) - self.L = np.linalg.cholesky(np.eye(self.M) + np.dot(self.RPT0,((1. - Iplus_Dprod_i)/self.Diag0)[:,None]*self.RPT0.T)) - self.R,info = linalg.flapack.dtrtrs(self.L,self.Lmi,lower=1) - self.RPT = np.dot(self.R,self.P.T) - self.Sigma = np.diag(self.Diag) + np.dot(self.RPT.T,self.RPT) - self.w = self.Diag * self.likelihood.v_tilde - self.Gamma = np.dot(self.R.T, np.dot(self.RPT,self.likelihood.v_tilde)) - self.mu = self.w + np.dot(self.P,self.Gamma) - - """ - Make a prediction for the generalized FITC model - - Arguments - --------- - X : Input prediction data - Nx1 numpy array (floats) - """ - # q(u|f) = N(u| R0i*mu_u*f, R0i*C*R0i.T) - - # Ci = I + (RPT0)Di(RPT0).T - # C = I - [RPT0] * (D+[RPT0].T*[RPT0])^-1*[RPT0].T - # = I - [RPT0] * (D + self.Qnn)^-1 * [RPT0].T - # = I - [RPT0] * (U*U.T)^-1 * [RPT0].T - # = I - V.T * V - U = np.linalg.cholesky(np.diag(self.Diag0) + self.Qnn) - V,info = linalg.flapack.dtrtrs(U,self.RPT0.T,lower=1) - C = np.eye(self.M) - np.dot(V.T,V) - mu_u = np.dot(C,self.RPT0)*(1./self.Diag0[None,:]) - #self.C = C - #self.RPT0 = np.dot(self.R0,self.Knm.T) P0.T - #self.mu_u = mu_u - #self.U = U - # q(u|y) = N(u| R0i*mu_H,R0i*Sigma_H*R0i.T) - mu_H = np.dot(mu_u,self.mu) - self.mu_H = mu_H - Sigma_H = C + np.dot(mu_u,np.dot(self.Sigma,mu_u.T)) - # q(f_star|y) = N(f_star|mu_star,sigma2_star) - Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts) - KR0T = np.dot(Kx.T,self.Lmi.T) - mu_star = np.dot(KR0T,mu_H) - if full_cov: - Kxx = self.kern.K(Xnew,which_parts=which_parts) - var = Kxx + np.dot(KR0T,np.dot(Sigma_H - np.eye(self.M),KR0T.T)) - else: - Kxx = self.kern.Kdiag(Xnew,which_parts=which_parts) - var = (Kxx + np.sum(KR0T.T*np.dot(Sigma_H - np.eye(self.M),KR0T.T),0))[:,None] - return mu_star[:,None],var - else: - raise NotImplementedError, "homoscedastic FITC not implemented" - """ - Kx = self.kern.K(self.Z, Xnew) - mu = mdot(Kx.T, self.C/self.scale_factor, self.psi1V) - if full_cov: - Kxx = self.kern.K(Xnew) - var = Kxx - mdot(Kx.T, (self.Kmmi - self.C/self.scale_factor**2), Kx) #NOTE this won't work for plotting - else: - Kxx = self.kern.Kdiag(Xnew) - var = Kxx - np.sum(Kx*np.dot(self.Kmmi - self.C/self.scale_factor**2, Kx),0) - return mu,var[:,None] - """ diff --git a/GPy/core/__init__.py b/GPy/core/__init__.py index 10dc55d7..32b6c02d 100644 --- a/GPy/core/__init__.py +++ b/GPy/core/__init__.py @@ -6,4 +6,4 @@ from parameterised import * import priors from GPy.core.gp import GP from GPy.core.sparse_gp import SparseGP -from FITC import FITC +from fitc import FITC diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py index ecc090fe..edb1c179 100644 --- a/GPy/examples/classification.py +++ b/GPy/examples/classification.py @@ -24,7 +24,7 @@ def crescent_data(seed=default_seed): # FIXME Y = data['Y'] Y[Y.flatten()==-1] = 0 - m = GPy.models.GP_classification(data['X'], Y) + m = GPy.models.GPClassification(data['X'], Y) m.ensure_default_constraints() m.update_likelihood_approximation() m.optimize() @@ -41,7 +41,7 @@ def oil(): Y[Y.flatten()==-1] = 0 # Create GP model - m = GPy.models.GP_classification(data['X'], Y) + m = GPy.models.GPClassification(data['X'], Y) # Contrain all parameters to be positive m.constrain_positive('') @@ -66,7 +66,7 @@ def toy_linear_1d_classification(seed=default_seed): Y[Y.flatten() == -1] = 0 # Model definition - m = GPy.models.GP_classification(data['X'], Y) + m = GPy.models.GPClassification(data['X'], Y) m.ensure_default_constraints() # Optimize @@ -95,7 +95,7 @@ def sparse_toy_linear_1d_classification(seed=default_seed): Y[Y.flatten() == -1] = 0 # Model definition - m = GPy.models.sparse_GP_classification(data['X'], Y) + m = GPy.models.SparseGPClassification(data['X'], Y) m['.*len']= 2. m.ensure_default_constraints() @@ -127,7 +127,7 @@ def sparse_crescent_data(inducing=10, seed=default_seed): Y = data['Y'] Y[Y.flatten()==-1]=0 - m = GPy.models.sparse_GP_classification(data['X'], Y) + m = GPy.models.SparseGPClassification(data['X'], Y) m.ensure_default_constraints() m['.*len'] = 10. m.update_likelihood_approximation() @@ -150,7 +150,7 @@ def FITC_crescent_data(inducing=10, seed=default_seed): Y = data['Y'] Y[Y.flatten()==-1]=0 - m = GPy.models.FITC_classification(data['X'], Y) + m = GPy.models.FITCClassification(data['X'], Y) m.ensure_default_constraints() m['.*len'] = 10. m.update_likelihood_approximation() diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index 281d8a34..f18e89db 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -2,6 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) from gp_regression import GPRegression +from gp_classification import GPClassification from sparse_gp_regression import SparseGPRegression from sparse_gp_classification import SparseGPClassification from fitc_classification import FITCClassification diff --git a/GPy/models/gp_classification.py b/GPy/models/gp_classification.py index 74455a2b..376f0005 100644 --- a/GPy/models/gp_classification.py +++ b/GPy/models/gp_classification.py @@ -7,11 +7,11 @@ from ..core import GP from .. import likelihoods from .. import kern -class GP_classification(GP): +class GPClassification(GP): """ Gaussian Process classification - This is a thin wrapper around the models.GP class, with a set of sensible defalts + This is a thin wrapper around the models.GP class, with a set of sensible defaults :param X: input observations :param Y: observed values diff --git a/GPy/models/sparse_gp_classification.py b/GPy/models/sparse_gp_classification.py index ee244aa5..f82de00f 100644 --- a/GPy/models/sparse_gp_classification.py +++ b/GPy/models/sparse_gp_classification.py @@ -3,12 +3,12 @@ import numpy as np -from ..core import sparse_GP +from ..core import SparseGP from .. import likelihoods from .. import kern from ..likelihoods import likelihood -class sparse_GP_classification(sparse_GP): +class SparseGPClassification(SparseGP): """ sparse Gaussian Process model for classification @@ -43,5 +43,5 @@ class sparse_GP_classification(sparse_GP): else: assert Z.shape[1]==X.shape[1] - sparse_GP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X) + SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X) self._set_params(self._get_params()) diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 8f22a7d0..c477f283 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -13,7 +13,7 @@ default_seed = 10000 # Some general utilities. def sample_class(f): p = 1. / (1. + np.exp(-f)) - c = np.random.Binomial(1, p) + c = np.random.binomial(1, p) c = np.where(c, 1, -1) return c @@ -22,7 +22,7 @@ def fetch_dataset(resource, save_name = None, save_file = True, messages = True) print "Downloading resource: " , resource, " ... ", response = url.urlopen(resource) # TODO: Some error checking... - # ... + # ... html = response.read() response.close() if save_file: @@ -33,8 +33,6 @@ def fetch_dataset(resource, save_name = None, save_file = True, messages = True) if messages: print "Done!" return html - - def della_gatta_TRP63_gene_expression(gene_number=None): mat_data = scipy.io.loadmat(os.path.join(data_path, 'DellaGattadata.mat'))