mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 04:22:38 +02:00
Fixed lots of examples
This commit is contained in:
parent
f65e92228d
commit
1cf4ad1ff4
11 changed files with 22 additions and 699 deletions
|
|
@ -4,6 +4,4 @@
|
|||
import classification
|
||||
import regression
|
||||
import dimensionality_reduction
|
||||
import tutorials
|
||||
import stochastic
|
||||
import non_gaussian
|
||||
|
|
|
|||
|
|
@ -28,13 +28,13 @@ def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
|
|||
m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, num_inducing=num_inducing)
|
||||
|
||||
# Contrain all parameters to be positive
|
||||
m.tie_params('.*len')
|
||||
#m.tie_params('.*len')
|
||||
m['.*len'] = 10.
|
||||
m.update_likelihood_approximation()
|
||||
|
||||
# Optimize
|
||||
if optimize:
|
||||
m.optimize(max_iters=max_iters)
|
||||
for _ in range(5):
|
||||
m.optimize(max_iters=int(max_iters/5))
|
||||
print(m)
|
||||
|
||||
#Test
|
||||
|
|
@ -150,7 +150,7 @@ def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, opti
|
|||
print m
|
||||
return m
|
||||
|
||||
def toy_heaviside(seed=default_seed, optimize=True, plot=True):
|
||||
def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
|
||||
"""
|
||||
Simple 1D classification example using a heavy side gp transformation
|
||||
|
||||
|
|
@ -166,16 +166,18 @@ def toy_heaviside(seed=default_seed, optimize=True, plot=True):
|
|||
Y[Y.flatten() == -1] = 0
|
||||
|
||||
# Model definition
|
||||
noise_model = GPy.likelihoods.bernoulli(GPy.likelihoods.noise_models.gp_transformations.Heaviside())
|
||||
likelihood = GPy.likelihoods.EP(Y, noise_model)
|
||||
m = GPy.models.GPClassification(data['X'], likelihood=likelihood)
|
||||
kernel = GPy.kern.RBF(1)
|
||||
likelihood = GPy.likelihoods.Bernoulli(gp_link=GPy.likelihoods.link_functions.Heaviside())
|
||||
ep = GPy.inference.latent_function_inference.expectation_propagation.EP()
|
||||
m = GPy.core.GP(X=data['X'], Y=Y, kernel=kernel, likelihood=likelihood, inference_method=ep, name='gp_classification_heaviside')
|
||||
#m = GPy.models.GPClassification(data['X'], likelihood=likelihood)
|
||||
|
||||
# Optimize
|
||||
if optimize:
|
||||
m.update_likelihood_approximation()
|
||||
# Parameters optimization:
|
||||
m.optimize()
|
||||
#m.pseudo_EM()
|
||||
for _ in range(5):
|
||||
m.optimize(max_iters=int(max_iters/5))
|
||||
print m
|
||||
|
||||
# Plot
|
||||
if plot:
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ def student_t_approx(optimize=True, plot=True):
|
|||
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
|
||||
laplace_inf = GPy.inference.latent_function_inference.Laplace()
|
||||
m3 = GPy.core.GP(X, Y.copy(), kernel3, likelihood=t_distribution, inference_method=laplace_inf)
|
||||
m3['.*t_noise'].constrain_bounded(1e-6, 10.)
|
||||
m3['.*t_scale2'].constrain_bounded(1e-6, 10.)
|
||||
m3['.*white'].constrain_fixed(1e-5)
|
||||
m3.randomize()
|
||||
|
||||
|
|
@ -67,7 +67,7 @@ def student_t_approx(optimize=True, plot=True):
|
|||
t_distribution = GPy.likelihoods.StudentT(deg_free=deg_free, sigma2=edited_real_sd)
|
||||
laplace_inf = GPy.inference.latent_function_inference.Laplace()
|
||||
m4 = GPy.core.GP(X, Yc.copy(), kernel4, likelihood=t_distribution, inference_method=laplace_inf)
|
||||
m4['.*t_noise'].constrain_bounded(1e-6, 10.)
|
||||
m4['.*t_scale2'].constrain_bounded(1e-6, 10.)
|
||||
m4['.*white'].constrain_fixed(1e-5)
|
||||
m4.randomize()
|
||||
print m4
|
||||
|
|
@ -124,6 +124,7 @@ def student_t_approx(optimize=True, plot=True):
|
|||
return m1, m2, m3, m4
|
||||
|
||||
def boston_example(optimize=True, plot=True):
|
||||
raise NotImplementedError("Needs updating")
|
||||
import sklearn
|
||||
from sklearn.cross_validation import KFold
|
||||
optimizer='bfgs'
|
||||
|
|
@ -152,8 +153,8 @@ def boston_example(optimize=True, plot=True):
|
|||
noise = 1e-1 #np.exp(-2)
|
||||
rbf_len = 0.5
|
||||
data_axis_plot = 4
|
||||
kernelstu = GPy.kern.rbf(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
kernelgp = GPy.kern.rbf(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
kernelstu = GPy.kern.RBF(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
kernelgp = GPy.kern.RBF(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
|
||||
#Baseline
|
||||
score_folds[0, n] = rmse(Y_test, np.mean(Y_train))
|
||||
|
|
@ -162,8 +163,8 @@ def boston_example(optimize=True, plot=True):
|
|||
print "Gauss GP"
|
||||
mgp = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelgp.copy())
|
||||
mgp.constrain_fixed('.*white', 1e-5)
|
||||
mgp['rbf_len'] = rbf_len
|
||||
mgp['noise'] = noise
|
||||
mgp['.*len'] = rbf_len
|
||||
mgp['.*noise'] = noise
|
||||
print mgp
|
||||
if optimize:
|
||||
mgp.optimize(optimizer=optimizer, messages=messages)
|
||||
|
|
@ -198,9 +199,9 @@ def boston_example(optimize=True, plot=True):
|
|||
stu_t_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), t_distribution)
|
||||
mstu_t = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=stu_t_likelihood)
|
||||
mstu_t.constrain_fixed('.*white', 1e-5)
|
||||
mstu_t.constrain_bounded('.*t_noise', 0.0001, 1000)
|
||||
mstu_t.constrain_bounded('.*t_scale2', 0.0001, 1000)
|
||||
mstu_t['rbf_len'] = rbf_len
|
||||
mstu_t['.*t_noise'] = noise
|
||||
mstu_t['.*t_scale2'] = noise
|
||||
print mstu_t
|
||||
if optimize:
|
||||
mstu_t.optimize(optimizer=optimizer, messages=messages)
|
||||
|
|
|
|||
|
|
@ -1,40 +0,0 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
try:
|
||||
import pylab as pb
|
||||
except:
|
||||
pass
|
||||
import numpy as np
|
||||
import GPy
|
||||
|
||||
def toy_1d(optimize=True, plot=True):
|
||||
N = 2000
|
||||
M = 20
|
||||
|
||||
#create data
|
||||
X = np.linspace(0,32,N)[:,None]
|
||||
Z = np.linspace(0,32,M)[:,None]
|
||||
Y = np.sin(X) + np.cos(0.3*X) + np.random.randn(*X.shape)/np.sqrt(50.)
|
||||
|
||||
m = GPy.models.SVIGPRegression(X,Y, batchsize=10, Z=Z)
|
||||
m.constrain_bounded('noise_variance',1e-3,1e-1)
|
||||
m.constrain_bounded('white_variance',1e-3,1e-1)
|
||||
|
||||
m.param_steplength = 1e-4
|
||||
|
||||
if plot:
|
||||
fig = pb.figure()
|
||||
ax = fig.add_subplot(111)
|
||||
def cb(foo):
|
||||
ax.cla()
|
||||
m.plot(ax=ax,Z_height=-3)
|
||||
ax.set_ylim(-3,3)
|
||||
fig.canvas.draw()
|
||||
|
||||
if optimize:
|
||||
m.optimize(500, callback=cb, callback_interval=1)
|
||||
|
||||
if plot:
|
||||
m.plot_traces()
|
||||
return m
|
||||
|
|
@ -1,156 +0,0 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
"""
|
||||
Code of Tutorials
|
||||
"""
|
||||
|
||||
try:
|
||||
import pylab as pb
|
||||
pb.ion()
|
||||
except:
|
||||
pass
|
||||
import numpy as np
|
||||
import GPy
|
||||
|
||||
def tuto_GP_regression(optimize=True, plot=True):
|
||||
"""The detailed explanations of the commands used in this file can be found in the tutorial section"""
|
||||
|
||||
X = np.random.uniform(-3.,3.,(20,1))
|
||||
Y = np.sin(X) + np.random.randn(20,1)*0.05
|
||||
|
||||
kernel = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)
|
||||
|
||||
m = GPy.models.GPRegression(X, Y, kernel)
|
||||
|
||||
print m
|
||||
if plot:
|
||||
m.plot()
|
||||
|
||||
m.constrain_positive('')
|
||||
|
||||
m.unconstrain('') # may be used to remove the previous constrains
|
||||
m.constrain_positive('.*rbf_variance')
|
||||
m.constrain_bounded('.*lengthscale',1.,10. )
|
||||
m.constrain_fixed('.*noise',0.0025)
|
||||
|
||||
if optimize:
|
||||
m.optimize()
|
||||
m.optimize_restarts(num_restarts = 10)
|
||||
|
||||
#######################################################
|
||||
#######################################################
|
||||
# sample inputs and outputs
|
||||
X = np.random.uniform(-3.,3.,(50,2))
|
||||
Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(50,1)*0.05
|
||||
|
||||
# define kernel
|
||||
ker = GPy.kern.Matern52(2,ARD=True) + GPy.kern.white(2)
|
||||
|
||||
# create simple GP model
|
||||
m = GPy.models.GPRegression(X, Y, ker)
|
||||
|
||||
# contrain all parameters to be positive
|
||||
m.constrain_positive('')
|
||||
|
||||
# optimize and plot
|
||||
if optimize:
|
||||
m.optimize('tnc', max_f_eval = 1000)
|
||||
if plot:
|
||||
m.plot()
|
||||
|
||||
print m
|
||||
return(m)
|
||||
|
||||
def tuto_kernel_overview(optimize=True, plot=True):
|
||||
"""The detailed explanations of the commands used in this file can be found in the tutorial section"""
|
||||
ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)
|
||||
ker2 = GPy.kern.rbf(input_dim=1, variance = .75, lengthscale=2.)
|
||||
ker3 = GPy.kern.rbf(1, .5, .5)
|
||||
|
||||
print ker2
|
||||
|
||||
if plot:
|
||||
ker1.plot()
|
||||
ker2.plot()
|
||||
ker3.plot()
|
||||
|
||||
k1 = GPy.kern.rbf(1,1.,2.)
|
||||
k2 = GPy.kern.Matern32(1, 0.5, 0.2)
|
||||
|
||||
# Product of kernels
|
||||
k_prod = k1.prod(k2) # By default, tensor=False
|
||||
k_prodtens = k1.prod(k2,tensor=True)
|
||||
|
||||
# Sum of kernels
|
||||
k_add = k1.add(k2) # By default, tensor=False
|
||||
k_addtens = k1.add(k2,tensor=True)
|
||||
|
||||
k1 = GPy.kern.rbf(1,1.,2)
|
||||
k2 = GPy.kern.periodic_Matern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5)
|
||||
|
||||
k = k1 * k2 # equivalent to k = k1.prod(k2)
|
||||
print k
|
||||
|
||||
# Simulate sample paths
|
||||
X = np.linspace(-5,5,501)[:,None]
|
||||
Y = np.random.multivariate_normal(np.zeros(501),k.K(X),1)
|
||||
|
||||
k1 = GPy.kern.rbf(1)
|
||||
k2 = GPy.kern.Matern32(1)
|
||||
k3 = GPy.kern.white(1)
|
||||
|
||||
k = k1 + k2 + k3
|
||||
print k
|
||||
|
||||
k.constrain_positive('.*var')
|
||||
k.constrain_fixed(np.array([1]),1.75)
|
||||
k.tie_params('.*len')
|
||||
k.unconstrain('white')
|
||||
k.constrain_bounded('white',lower=1e-5,upper=.5)
|
||||
print k
|
||||
|
||||
k_cst = GPy.kern.bias(1,variance=1.)
|
||||
k_mat = GPy.kern.Matern52(1,variance=1., lengthscale=3)
|
||||
Kanova = (k_cst + k_mat).prod(k_cst + k_mat,tensor=True)
|
||||
print Kanova
|
||||
|
||||
# sample inputs and outputs
|
||||
X = np.random.uniform(-3.,3.,(40,2))
|
||||
Y = 0.5*X[:,:1] + 0.5*X[:,1:] + 2*np.sin(X[:,:1]) * np.sin(X[:,1:])
|
||||
|
||||
# Create GP regression model
|
||||
m = GPy.models.GPRegression(X, Y, Kanova)
|
||||
|
||||
if plot:
|
||||
fig = pb.figure(figsize=(5,5))
|
||||
ax = fig.add_subplot(111)
|
||||
m.plot(ax=ax)
|
||||
|
||||
pb.figure(figsize=(20,3))
|
||||
pb.subplots_adjust(wspace=0.5)
|
||||
axs = pb.subplot(1,5,1)
|
||||
m.plot(ax=axs)
|
||||
pb.subplot(1,5,2)
|
||||
pb.ylabel("= ",rotation='horizontal',fontsize='30')
|
||||
axs = pb.subplot(1,5,3)
|
||||
m.plot(ax=axs, which_parts=[False,True,False,False])
|
||||
pb.ylabel("cst +",rotation='horizontal',fontsize='30')
|
||||
axs = pb.subplot(1,5,4)
|
||||
m.plot(ax=axs, which_parts=[False,False,True,False])
|
||||
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
||||
axs = pb.subplot(1,5,5)
|
||||
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
||||
m.plot(ax=axs, which_parts=[False,False,False,True])
|
||||
|
||||
return(m)
|
||||
|
||||
|
||||
def model_interaction(optimize=True, plot=True):
|
||||
X = np.random.randn(20,1)
|
||||
Y = np.sin(X) + np.random.randn(*X.shape)*0.01 + 5.
|
||||
k = GPy.kern.RBF(1) + GPy.kern.Bias(1)
|
||||
m = GPy.models.GPRegression(X, Y, kernel=k)
|
||||
return m
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue