mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-07 11:02:38 +02:00
Classification examples corrected (2/3)
This commit is contained in:
parent
5593d53828
commit
24b6dfa086
3 changed files with 54 additions and 96 deletions
|
|
@ -3,16 +3,15 @@
|
|||
|
||||
|
||||
"""
|
||||
Simple Gaussian Processes classification
|
||||
Gaussian Processes classification
|
||||
"""
|
||||
import pylab as pb
|
||||
import numpy as np
|
||||
import GPy
|
||||
|
||||
default_seed=10000
|
||||
######################################
|
||||
## 2 dimensional example
|
||||
def crescent_data(model_type='Full', inducing=10, seed=default_seed):
|
||||
|
||||
def crescent_data(model_type='Full', inducing=10, seed=default_seed): #FIXME
|
||||
"""Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
|
||||
|
||||
:param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
|
||||
|
|
@ -30,7 +29,7 @@ def crescent_data(model_type='Full', inducing=10, seed=default_seed):
|
|||
# create sparse GP EP model
|
||||
m = GPy.models.sparse_GP_EP(data['X'],likelihood=likelihood,inducing=inducing,ep_proxy=model_type)
|
||||
|
||||
m.approximate_likelihood()
|
||||
m.update_likelihood_approximation()
|
||||
print(m)
|
||||
|
||||
# optimize
|
||||
|
|
@ -42,53 +41,66 @@ def crescent_data(model_type='Full', inducing=10, seed=default_seed):
|
|||
return m
|
||||
|
||||
def oil():
|
||||
"""Run a Gaussian process classification on the oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood."""
|
||||
"""
|
||||
Run a Gaussian process classification on the oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
|
||||
"""
|
||||
data = GPy.util.datasets.oil()
|
||||
likelihood = GPy.inference.likelihoods.probit(data['Y'][:, 0:1])
|
||||
# Kernel object
|
||||
kernel = GPy.kern.rbf(12)
|
||||
|
||||
# create simple GP model
|
||||
m = GPy.models.GP_EP(data['X'],likelihood)
|
||||
# Likelihood object
|
||||
distribution = GPy.likelihoods.likelihood_functions.probit()
|
||||
likelihood = GPy.likelihoods.EP(data['Y'][:, 0:1],distribution)
|
||||
|
||||
# contrain all parameters to be positive
|
||||
# Create GP model
|
||||
m = GPy.models.GP(data['X'],kernel,likelihood=likelihood)
|
||||
|
||||
# Contrain all parameters to be positive
|
||||
m.constrain_positive('')
|
||||
m.tie_param('lengthscale')
|
||||
m.approximate_likelihood()
|
||||
m.update_likelihood_approximation()
|
||||
|
||||
# optimize
|
||||
# Optimize
|
||||
m.optimize()
|
||||
|
||||
# plot
|
||||
#m.plot()
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def toy_linear_1d_classification(model_type='Full', inducing=4, seed=default_seed):
|
||||
"""Simple 1D classification example.
|
||||
:param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
|
||||
def toy_linear_1d_classification(seed=default_seed):
|
||||
"""
|
||||
Simple 1D classification example
|
||||
:param seed : seed value for data generation (default is 4).
|
||||
:type seed: int
|
||||
:param inducing : number of inducing variables (only used for 'FITC' or 'DTC').
|
||||
:type inducing: int
|
||||
"""
|
||||
|
||||
data = GPy.util.datasets.toy_linear_1d_classification(seed=seed)
|
||||
likelihood = GPy.inference.likelihoods.probit(data['Y'][:, 0:1])
|
||||
assert model_type in ('Full','DTC','FITC')
|
||||
|
||||
# create simple GP model
|
||||
if model_type=='Full':
|
||||
m = GPy.models.GP_EP(data['X'],likelihood)
|
||||
else:
|
||||
# create sparse GP EP model
|
||||
m = GPy.models.sparse_GP_EP(data['X'],likelihood=likelihood,inducing=inducing,ep_proxy=model_type)
|
||||
# Kernel object
|
||||
kernel = GPy.kern.rbf(1)
|
||||
|
||||
m.constrain_positive('var')
|
||||
m.constrain_positive('len')
|
||||
m.tie_param('lengthscale')
|
||||
m.approximate_likelihood()
|
||||
# Likelihood object
|
||||
distribution = GPy.likelihoods.likelihood_functions.probit()
|
||||
likelihood = GPy.likelihoods.EP(data['Y'][:, 0:1],distribution)
|
||||
|
||||
# Optimize and plot
|
||||
m.em(plot_all=False) # EM algorithm
|
||||
m.plot()
|
||||
# Model definition
|
||||
m = GPy.models.GP(data['X'],kernel,likelihood=likelihood)
|
||||
|
||||
# Optimize
|
||||
"""
|
||||
EPEM runs a loop that consists of two steps:
|
||||
1) EP likelihood approximation:
|
||||
m.update_likelihood_approximation()
|
||||
2) Parameters optimization:
|
||||
m.optimize()
|
||||
"""
|
||||
m.EPEM()
|
||||
|
||||
# Plot
|
||||
pb.subplot(211)
|
||||
m.plot_GP()
|
||||
pb.subplot(212)
|
||||
m.plot_output()
|
||||
print(m)
|
||||
|
||||
return m
|
||||
|
|
|
|||
|
|
@ -1,53 +0,0 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
"""
|
||||
Simple Gaussian Processes classification 1D
|
||||
probit likelihood
|
||||
"""
|
||||
import pylab as pb
|
||||
import numpy as np
|
||||
import GPy
|
||||
pb.ion()
|
||||
|
||||
pb.close('all')
|
||||
|
||||
# Inputs
|
||||
N = 30
|
||||
X1 = np.random.normal(5,2,N/2)
|
||||
X2 = np.random.normal(10,2,N/2)
|
||||
X = np.hstack([X1,X2])[:,None]
|
||||
|
||||
# Output
|
||||
Y = np.hstack([np.ones(N/2),np.repeat(-1,N/2)])[:,None]
|
||||
|
||||
# Kernel object
|
||||
kernel = GPy.kern.rbf(1)
|
||||
|
||||
# Likelihood object
|
||||
distribution = GPy.likelihoods.likelihood_functions.probit()
|
||||
likelihood = GPy.likelihoods.EP(Y,distribution)
|
||||
|
||||
# Model definition
|
||||
m = GPy.models.GP(X,kernel,likelihood=likelihood)
|
||||
|
||||
# Model constraints
|
||||
m.ensure_default_constraints()
|
||||
|
||||
# Optimize model
|
||||
"""
|
||||
EPEM runs a loop that consists of two steps:
|
||||
1) EP likelihood approximation:
|
||||
m.update_likelihood_approximation()
|
||||
2) Parameters optimization:
|
||||
m.optimize()
|
||||
"""
|
||||
m.EPEM()
|
||||
|
||||
# Plot
|
||||
pb.subplot(211)
|
||||
m.plot_GP()
|
||||
pb.subplot(212)
|
||||
m.plot_output()
|
||||
|
||||
print(m)
|
||||
|
|
@ -154,17 +154,16 @@ class GradientTests(unittest.TestCase):
|
|||
m.constrain_positive('(linear|bias|white)')
|
||||
self.assertTrue(m.checkgrad())
|
||||
|
||||
def test_GP_EP(self):
|
||||
return # Disabled TODO
|
||||
def test_GP_EP_probit(self):
|
||||
N = 20
|
||||
X = np.hstack([np.random.rand(N/2)+1,np.random.rand(N/2)-1])[:,None]
|
||||
k = GPy.kern.rbf(1) + GPy.kern.white(1)
|
||||
Y = np.hstack([np.ones(N/2),-np.ones(N/2)])[:,None]
|
||||
likelihood = GPy.inference.likelihoods.probit(Y)
|
||||
m = GPy.models.GP_EP(X,likelihood,k)
|
||||
m.constrain_positive('(var|len)')
|
||||
m.approximate_likelihood()
|
||||
self.assertTrue(m.checkgrad())
|
||||
X = np.hstack([np.random.normal(5,2,N/2),np.random.normal(10,2,N/2)])[:,None]
|
||||
Y = np.hstack([np.ones(N/2),np.repeat(-1,N/2)])[:,None]
|
||||
kernel = GPy.kern.rbf(1)
|
||||
distribution = GPy.likelihoods.likelihood_functions.probit()
|
||||
likelihood = GPy.likelihoods.EP(Y,distribution)
|
||||
m = GPy.models.GP(X,kernel,likelihood=likelihood)
|
||||
m.ensure_default_constraints()
|
||||
self.assertTrue(m.EPEM)
|
||||
|
||||
@unittest.skip("FITC will be broken for a while")
|
||||
def test_generalized_FITC(self):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue