Irrelevant changes

This commit is contained in:
Ricardo Andrade 2013-03-11 11:41:46 +00:00
parent 1ddc059251
commit addb5da4e4
2 changed files with 82 additions and 53 deletions

View file

@ -11,7 +11,7 @@ import GPy
default_seed=10000 default_seed=10000
def crescent_data(model_type='Full', inducing=10, seed=default_seed): #FIXME def crescent_data(seed=default_seed): #FIXME
"""Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood. """Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
:param model_type: type of model to fit ['Full', 'FITC', 'DTC']. :param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
@ -31,11 +31,8 @@ def crescent_data(model_type='Full', inducing=10, seed=default_seed): #FIXME
likelihood = GPy.likelihoods.EP(data['Y'],distribution) likelihood = GPy.likelihoods.EP(data['Y'],distribution)
if model_type=='Full': m = GPy.models.GP(data['X'],likelihood,kernel)
m = GPy.models.GP(data['X'],likelihood,kernel) m.ensure_default_constraints()
else:
# create sparse GP EP model
m = GPy.models.sparse_GP_EP(data['X'],likelihood=likelihood,inducing=inducing,ep_proxy=model_type)
m.update_likelihood_approximation() m.update_likelihood_approximation()
print(m) print(m)
@ -94,16 +91,13 @@ def toy_linear_1d_classification(seed=default_seed):
# Model definition # Model definition
m = GPy.models.GP(data['X'],likelihood=likelihood,kernel=kernel) m = GPy.models.GP(data['X'],likelihood=likelihood,kernel=kernel)
m.ensure_default_constraints()
# Optimize # Optimize
""" m.update_likelihood_approximation()
EPEM runs a loop that consists of two steps: # Parameters optimization:
1) EP likelihood approximation: m.optimize()
m.update_likelihood_approximation() #m.EPEM() #FIXME
2) Parameters optimization:
m.optimize()
"""
m.EPEM()
# Plot # Plot
pb.subplot(211) pb.subplot(211)

View file

@ -10,51 +10,86 @@ import pylab as pb
import numpy as np import numpy as np
import GPy import GPy
np.random.seed(2) np.random.seed(2)
pb.ion()
N = 500 N = 500
M = 5 M = 5
pb.close('all') default_seed=10000
######################################
## 1 dimensional example
# sample inputs and outputs def crescent_data(inducing=10, seed=default_seed):
X = np.random.uniform(-3.,3.,(N,1)) """Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
#Y = np.sin(X)+np.random.randn(N,1)*0.05
F = np.sin(X)+np.random.randn(N,1)*0.05
Y = np.ones([F.shape[0],1])
Y[F<0] = -1
likelihood = GPy.inference.likelihoods.probit(Y)
# construct kernel :param model_type: type of model to fit ['Full', 'FITC', 'DTC'].
rbf = GPy.kern.rbf(1) :param seed : seed value for data generation.
noise = GPy.kern.white(1) :type seed: int
kernel = rbf + noise :param inducing : number of inducing variables (only used for 'FITC' or 'DTC').
:type inducing: int
"""
# create simple GP model data = GPy.util.datasets.crescent_data(seed=seed)
#m = GPy.models.sparse_GP(X,Y=None, kernel=kernel, M=M,likelihood= likelihood)
# contrain all parameters to be positive # Kernel object
#m.constrain_fixed('prec',100.) kernel = GPy.kern.rbf(data['X'].shape[1]) + GPy.kern.white(data['X'].shape[1])
m = GPy.models.sparse_GP(X, Y, kernel, M=M)
m.ensure_default_constraints()
#if not isinstance(m.likelihood,GPy.inference.likelihoods.gaussian):
# m.approximate_likelihood()
print m.checkgrad()
m.optimize('tnc', messages = 1)
m.plot(samples=3)
print m
n = GPy.models.sparse_GP(X,Y=None, kernel=kernel, M=M,likelihood= likelihood) # Likelihood object
n.ensure_default_constraints() distribution = GPy.likelihoods.likelihood_functions.probit()
if not isinstance(n.likelihood,GPy.inference.likelihoods.gaussian): likelihood = GPy.likelihoods.EP(data['Y'],distribution)
n.approximate_likelihood()
print n.checkgrad() sample = np.random.randint(0,data['X'].shape[0],inducing)
pb.figure() Z = data['X'][sample,:]
n.plot() #Z = (np.random.random_sample(2*inducing)*(data['X'].max()-data['X'].min())+data['X'].min()).reshape(inducing,-1)
# create sparse GP EP model
m = GPy.models.sparse_GP(data['X'],likelihood=likelihood,kernel=kernel,Z=Z)
m.ensure_default_constraints()
m.update_likelihood_approximation()
print(m)
# optimize
m.optimize()
print(m)
# plot
m.plot()
return m
def toy_linear_1d_classification(seed=default_seed):
"""
Simple 1D classification example
:param seed : seed value for data generation (default is 4).
:type seed: int
"""
data = GPy.util.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y == -1] = 0
# Kernel object
kernel = GPy.kern.rbf(1)
# Likelihood object
distribution = GPy.likelihoods.likelihood_functions.probit()
likelihood = GPy.likelihoods.EP(Y,distribution)
Z = np.random.uniform(data['X'].min(),data['X'].max(),(10,1))
# Model definition
m = GPy.models.sparse_GP(data['X'],likelihood=likelihood,kernel=kernel,Z=Z)
m.ensure_default_constraints()
# Optimize
m.update_likelihood_approximation()
# Parameters optimization:
m.optimize()
#m.EPEM() #FIXME
# Plot
pb.subplot(211)
m.plot_f()
pb.subplot(212)
m.plot()
print(m)
return m
"""
m = GPy.models.sparse_GP_regression(X, Y, kernel, M=M)
m.ensure_default_constraints()
print m.checkgrad()
"""