diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py index c25ea124..592299d8 100644 --- a/GPy/examples/classification.py +++ b/GPy/examples/classification.py @@ -20,11 +20,19 @@ def crescent_data(model_type='Full', inducing=10, seed=default_seed): #FIXME :param inducing : number of inducing variables (only used for 'FITC' or 'DTC'). :type inducing: int """ + data = GPy.util.datasets.crescent_data(seed=seed) - likelihood = GPy.inference.likelihoods.probit(data['Y']) + + # Kernel object + kernel = GPy.kern.rbf(data['X'].shape[1]) + + # Likelihood object + distribution = GPy.likelihoods.likelihood_functions.probit() + likelihood = GPy.likelihoods.EP(data['Y'],distribution) + if model_type=='Full': - m = GPy.models.GP_EP(data['X'],likelihood) + m = GPy.models.GP(data['X'],likelihood,kernel) else: # create sparse GP EP model m = GPy.models.sparse_GP_EP(data['X'],likelihood=likelihood,inducing=inducing,ep_proxy=model_type) @@ -33,7 +41,7 @@ def crescent_data(model_type='Full', inducing=10, seed=default_seed): #FIXME print(m) # optimize - m.em() + m.optimize() print(m) # plot @@ -53,7 +61,7 @@ def oil(): likelihood = GPy.likelihoods.EP(data['Y'][:, 0:1],distribution) # Create GP model - m = GPy.models.GP(data['X'],kernel,likelihood=likelihood) + m = GPy.models.GP(data['X'],likelihood=likelihood,kernel=kernel) # Contrain all parameters to be positive m.constrain_positive('') @@ -71,17 +79,18 @@ def toy_linear_1d_classification(seed=default_seed): Simple 1D classification example :param seed : seed value for data generation (default is 4). :type seed: int - :type inducing: int """ data = GPy.util.datasets.toy_linear_1d_classification(seed=seed) + Y = data['Y'][:, 0:1] + Y[Y == -1] = 0 # Kernel object kernel = GPy.kern.rbf(1) # Likelihood object distribution = GPy.likelihoods.likelihood_functions.probit() - likelihood = GPy.likelihoods.EP(data['Y'][:, 0:1],distribution) + likelihood = GPy.likelihoods.EP(Y,distribution) # Model definition m = GPy.models.GP(data['X'],likelihood=likelihood,kernel=kernel) @@ -98,7 +107,7 @@ def toy_linear_1d_classification(seed=default_seed): # Plot pb.subplot(211) - m.plot_internal() + m.plot_f() pb.subplot(212) m.plot() print(m) diff --git a/GPy/likelihoods/likelihood_functions.py b/GPy/likelihoods/likelihood_functions.py index de97824a..23881899 100644 --- a/GPy/likelihoods/likelihood_functions.py +++ b/GPy/likelihoods/likelihood_functions.py @@ -38,6 +38,7 @@ class probit(likelihood_function): :param v_i: mean/variance of the cavity distribution (float) """ # TODO: some version of assert np.sum(np.abs(Y)-1) == 0, "Output values must be either -1 or 1" + if data_i == 0: data_i = -1 #NOTE Binary classification works better classes {-1,1}, 1D-plotting works better with classes {0,1}. z = data_i*v_i/np.sqrt(tau_i**2 + tau_i) Z_hat = stats.norm.cdf(z) phi = stats.norm.pdf(z) @@ -52,9 +53,9 @@ class probit(likelihood_function): mu = mu.flatten() var = var.flatten() mean = stats.norm.cdf(mu/np.sqrt(1+var)) - p_05 = np.zeros(mu.shape)#np.zeros([mu.size]) - p_95 = np.zeros(mu.shape)#np.ones([mu.size]) - return mean, p_05, p_95 + p_025 = np.zeros(mu.shape) + p_975 = np.ones(mu.shape) + return mean, p_025, p_975 class Poisson(likelihood_function): """ @@ -65,7 +66,7 @@ class Poisson(likelihood_function): L(x) = \exp(\lambda) * \lambda**Y_i / Y_i! $$ """ - def moments_match(self,i,tau_i,v_i): + def moments_match(self,data_i,tau_i,v_i): """ Moments match of the marginal approximation in EP algorithm @@ -81,14 +82,14 @@ class Poisson(likelihood_function): """ pdf_norm_f = stats.norm.pdf(f,loc=mu,scale=sigma) rate = np.exp( (f*self.scale)+self.location) - poisson = stats.poisson.pmf(float(self.Y[i]),rate) + poisson = stats.poisson.pmf(float(data_i),rate) return pdf_norm_f*poisson def log_pnm(f): """ Log of poisson_norm """ - return -(-.5*(f-mu)**2/sigma**2 - np.exp( (f*self.scale)+self.location) + ( (f*self.scale)+self.location)*self.Y[i]) + return -(-.5*(f-mu)**2/sigma**2 - np.exp( (f*self.scale)+self.location) + ( (f*self.scale)+self.location)*data_i) """ Golden Search and Simpson's Rule @@ -99,17 +100,17 @@ class Poisson(likelihood_function): #TODO golden search & simpson's rule can be defined in the general likelihood class, rather than in each specific case. #Golden search - golden_A = -1 if self.Y[i] == 0 else np.array([np.log(self.Y[i]),mu]).min() #Lower limit - golden_B = np.array([np.log(self.Y[i]),mu]).max() #Upper limit + golden_A = -1 if data_i == 0 else np.array([np.log(data_i),mu]).min() #Lower limit + golden_B = np.array([np.log(data_i),mu]).max() #Upper limit golden_A = (golden_A - self.location)/self.scale golden_B = (golden_B - self.location)/self.scale opt = sp.optimize.golden(log_pnm,brack=(golden_A,golden_B)) #Better to work with log_pnm than with poisson_norm # Simpson's approximation - width = 3./np.log(max(self.Y[i],2)) + width = 3./np.log(max(data_i,2)) A = opt - width #Lower limit B = opt + width #Upper limit - K = 10*int(np.log(max(self.Y[i],150))) #Number of points in the grid, we DON'T want K to be the same number for every case + K = 10*int(np.log(max(data_i,150))) #Number of points in the grid, we DON'T want K to be the same number for every case h = (B-A)/K # length of the intervals grid_x = np.hstack([np.linspace(opt-width,opt,K/2+1)[1:-1], np.linspace(opt,opt+width,K/2+1)]) # grid of points (X axis) x = np.hstack([A,B,grid_x[range(1,K,2)],grid_x[range(2,K-1,2)]]) # grid_x rearranged, just to make Simpson's algorithm easier @@ -127,7 +128,7 @@ class Poisson(likelihood_function): Compute mean, and conficence interval (percentiles 5 and 95) of the prediction """ mean = np.exp(mu*self.scale + self.location) - tmp = stats.poisson.ppf(np.array([.05,.95]),mu) - p_05 = tmp[:,0] - p_95 = tmp[:,1] - return mean,p_05,p_95 + tmp = stats.poisson.ppf(np.array([.025,.975]),mean) + p_025 = tmp[:,0] + p_975 = tmp[:,1] + return mean,p_025,p_975