Classification examples corrected (2/3)

This commit is contained in:
Ricardo Andrade 2013-02-01 16:21:26 +00:00
parent 5593d53828
commit 24b6dfa086
3 changed files with 54 additions and 96 deletions

View file

@ -3,16 +3,15 @@
"""
Simple Gaussian Processes classification
Gaussian Processes classification
"""
import pylab as pb
import numpy as np
import GPy
default_seed=10000
######################################
## 2 dimensional example
def crescent_data(model_type='Full', inducing=10, seed=default_seed):
def crescent_data(model_type='Full', inducing=10, seed=default_seed): #FIXME
"""Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
:param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
@ -30,7 +29,7 @@ def crescent_data(model_type='Full', inducing=10, seed=default_seed):
# create sparse GP EP model
m = GPy.models.sparse_GP_EP(data['X'],likelihood=likelihood,inducing=inducing,ep_proxy=model_type)
m.approximate_likelihood()
m.update_likelihood_approximation()
print(m)
# optimize
@ -42,53 +41,66 @@ def crescent_data(model_type='Full', inducing=10, seed=default_seed):
return m
def oil():
"""Run a Gaussian process classification on the oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood."""
"""
Run a Gaussian process classification on the oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
"""
data = GPy.util.datasets.oil()
likelihood = GPy.inference.likelihoods.probit(data['Y'][:, 0:1])
# Kernel object
kernel = GPy.kern.rbf(12)
# create simple GP model
m = GPy.models.GP_EP(data['X'],likelihood)
# Likelihood object
distribution = GPy.likelihoods.likelihood_functions.probit()
likelihood = GPy.likelihoods.EP(data['Y'][:, 0:1],distribution)
# contrain all parameters to be positive
# Create GP model
m = GPy.models.GP(data['X'],kernel,likelihood=likelihood)
# Contrain all parameters to be positive
m.constrain_positive('')
m.tie_param('lengthscale')
m.approximate_likelihood()
m.update_likelihood_approximation()
# optimize
# Optimize
m.optimize()
# plot
#m.plot()
print(m)
return m
def toy_linear_1d_classification(model_type='Full', inducing=4, seed=default_seed):
"""Simple 1D classification example.
:param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
def toy_linear_1d_classification(seed=default_seed):
"""
Simple 1D classification example
:param seed : seed value for data generation (default is 4).
:type seed: int
:param inducing : number of inducing variables (only used for 'FITC' or 'DTC').
:type inducing: int
"""
data = GPy.util.datasets.toy_linear_1d_classification(seed=seed)
likelihood = GPy.inference.likelihoods.probit(data['Y'][:, 0:1])
assert model_type in ('Full','DTC','FITC')
# create simple GP model
if model_type=='Full':
m = GPy.models.GP_EP(data['X'],likelihood)
else:
# create sparse GP EP model
m = GPy.models.sparse_GP_EP(data['X'],likelihood=likelihood,inducing=inducing,ep_proxy=model_type)
# Kernel object
kernel = GPy.kern.rbf(1)
m.constrain_positive('var')
m.constrain_positive('len')
m.tie_param('lengthscale')
m.approximate_likelihood()
# Likelihood object
distribution = GPy.likelihoods.likelihood_functions.probit()
likelihood = GPy.likelihoods.EP(data['Y'][:, 0:1],distribution)
# Optimize and plot
m.em(plot_all=False) # EM algorithm
m.plot()
# Model definition
m = GPy.models.GP(data['X'],kernel,likelihood=likelihood)
# Optimize
"""
EPEM runs a loop that consists of two steps:
1) EP likelihood approximation:
m.update_likelihood_approximation()
2) Parameters optimization:
m.optimize()
"""
m.EPEM()
# Plot
pb.subplot(211)
m.plot_GP()
pb.subplot(212)
m.plot_output()
print(m)
return m