hard-merging in the examples and testing dirs from master.

This is probably a dumb way to do it, but I don;t know better.
This commit is contained in:
James Hensman 2014-01-24 09:41:07 +00:00
parent 8022de2a86
commit 375e2f6225
16 changed files with 1747 additions and 758 deletions

View file

@ -6,12 +6,11 @@
Gaussian Processes classification
"""
import pylab as pb
import numpy as np
import GPy
default_seed = 10000
def oil(num_inducing=50, max_iters=100, kernel=None):
def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the three phase oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
@ -25,7 +24,7 @@ def oil(num_inducing=50, max_iters=100, kernel=None):
Ytest[Ytest.flatten()==-1] = 0
# Create GP model
m = GPy.models.SparseGPClassification(X, Y,kernel=kernel,num_inducing=num_inducing)
m = GPy.models.SparseGPClassification(X, Y, kernel=kernel, num_inducing=num_inducing)
# Contrain all parameters to be positive
m.tie_params('.*len')
@ -33,17 +32,18 @@ def oil(num_inducing=50, max_iters=100, kernel=None):
m.update_likelihood_approximation()
# Optimize
m.optimize(max_iters=max_iters)
if optimize:
m.optimize(max_iters=max_iters)
print(m)
#Test
probs = m.predict(Xtest)[0]
GPy.util.classification.conf_matrix(probs,Ytest)
GPy.util.classification.conf_matrix(probs, Ytest)
return m
def toy_linear_1d_classification(seed=default_seed):
def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True):
"""
Simple 1D classification example
Simple 1D classification example using EP approximation
:param seed: seed value for data generation (default is 4).
:type seed: int
@ -58,20 +58,59 @@ def toy_linear_1d_classification(seed=default_seed):
m = GPy.models.GPClassification(data['X'], Y)
# Optimize
#m.update_likelihood_approximation()
# Parameters optimization:
#m.optimize()
m.pseudo_EM()
if optimize:
#m.update_likelihood_approximation()
# Parameters optimization:
#m.optimize()
#m.update_likelihood_approximation()
m.pseudo_EM()
# Plot
fig, axes = pb.subplots(2,1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print(m)
if plot:
fig, axes = pb.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def sparse_toy_linear_1d_classification(num_inducing=10,seed=default_seed):
def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=True):
"""
Simple 1D classification example using Laplace approximation
:param seed: seed value for data generation (default is 4).
:type seed: int
"""
data = GPy.util.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
bern_noise_model = GPy.likelihoods.bernoulli()
laplace_likelihood = GPy.likelihoods.Laplace(Y.copy(), bern_noise_model)
# Model definition
m = GPy.models.GPClassification(data['X'], Y, likelihood=laplace_likelihood)
print m
# Optimize
if optimize:
#m.update_likelihood_approximation()
# Parameters optimization:
m.optimize('bfgs', messages=1)
#m.pseudo_EM()
# Plot
if plot:
fig, axes = pb.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, optimize=True, plot=True):
"""
Sparse 1D classification example
@ -85,24 +124,26 @@ def sparse_toy_linear_1d_classification(num_inducing=10,seed=default_seed):
Y[Y.flatten() == -1] = 0
# Model definition
m = GPy.models.SparseGPClassification(data['X'], Y,num_inducing=num_inducing)
m['.*len']= 4.
m = GPy.models.SparseGPClassification(data['X'], Y, num_inducing=num_inducing)
m['.*len'] = 4.
# Optimize
#m.update_likelihood_approximation()
# Parameters optimization:
#m.optimize()
m.pseudo_EM()
if optimize:
#m.update_likelihood_approximation()
# Parameters optimization:
#m.optimize()
m.pseudo_EM()
# Plot
fig, axes = pb.subplots(2,1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print(m)
if plot:
fig, axes = pb.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def toy_heaviside(seed=default_seed):
def toy_heaviside(seed=default_seed, optimize=True, plot=True):
"""
Simple 1D classification example using a heavy side gp transformation
@ -116,25 +157,27 @@ def toy_heaviside(seed=default_seed):
Y[Y.flatten() == -1] = 0
# Model definition
noise_model = GPy.likelihoods.binomial(GPy.likelihoods.noise_models.gp_transformations.Heaviside())
likelihood = GPy.likelihoods.EP(Y,noise_model)
noise_model = GPy.likelihoods.bernoulli(GPy.likelihoods.noise_models.gp_transformations.Heaviside())
likelihood = GPy.likelihoods.EP(Y, noise_model)
m = GPy.models.GPClassification(data['X'], likelihood=likelihood)
# Optimize
m.update_likelihood_approximation()
# Parameters optimization:
m.optimize()
#m.pseudo_EM()
if optimize:
m.update_likelihood_approximation()
# Parameters optimization:
m.optimize()
#m.pseudo_EM()
# Plot
fig, axes = pb.subplots(2,1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print(m)
if plot:
fig, axes = pb.subplots(2, 1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print m
return m
def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None):
def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True):
"""
Run a Gaussian process classification on the crescent data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
@ -151,7 +194,7 @@ def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=
Y[Y.flatten()==-1] = 0
if model_type == 'Full':
m = GPy.models.GPClassification(data['X'], Y,kernel=kernel)
m = GPy.models.GPClassification(data['X'], Y, kernel=kernel)
elif model_type == 'DTC':
m = GPy.models.SparseGPClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
@ -161,8 +204,11 @@ def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=
m = GPy.models.FITCClassification(data['X'], Y, kernel=kernel, num_inducing=num_inducing)
m['.*len'] = 3.
m.pseudo_EM()
print(m)
m.plot()
if optimize:
m.pseudo_EM()
if plot:
m.plot()
print m
return m

View file

@ -1,96 +1,105 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as _np
default_seed = _np.random.seed(123344)
import numpy as np
from matplotlib import pyplot as plt, cm
def bgplvm_test_model(seed=default_seed, optimize=False, verbose=1, plot=False):
"""
model for testing purposes. Samples from a GP with rbf kernel and learns
the samples with a new kernel. Normally not for optimization, just model cheking
"""
from GPy.likelihoods.gaussian import Gaussian
import GPy
from ..models.bayesian_gplvm import BayesianGPLVM
from ..likelihoods.gaussian import Gaussian
import GPy
num_inputs = 13
num_inducing = 5
if plot:
output_dim = 1
input_dim = 2
else:
input_dim = 2
output_dim = 25
default_seed = np.random.seed(123344)
def BGPLVM(seed=default_seed):
N = 5
num_inducing = 4
input_dim = 3
D = 2
# generate GPLVM-like data
X = np.random.rand(N, input_dim)
lengthscales = np.random.rand(input_dim)
X = _np.random.rand(num_inputs, input_dim)
lengthscales = _np.random.rand(input_dim)
k = (GPy.kern.rbf(input_dim, .5, lengthscales, ARD=True)
+ GPy.kern.white(input_dim, 0.01))
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N), K, D).T
Y = _np.random.multivariate_normal(_np.zeros(num_inputs), K, output_dim).T
lik = Gaussian(Y, normalize=True)
# k = GPy.kern.rbf_inv(input_dim, .5, np.ones(input_dim) * 2., ARD=True) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim)
k = GPy.kern.rbf(input_dim, ARD=1, name="rbf1") + GPy.kern.rbf(input_dim, ARD=1, name='rbf2') + GPy.kern.linear(input_dim, ARD=1, name='linear_part')
# k = GPy.kern.rbf(input_dim, ARD = False)
k = GPy.kern.rbf_inv(input_dim, .5, _np.ones(input_dim) * 2., ARD=True) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim)
# k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
# k = GPy.kern.rbf(input_dim, ARD = False) + GPy.kern.white(input_dim, 0.00001)
# k = GPy.kern.rbf(input_dim, .5, _np.ones(input_dim) * 2., ARD=True) + GPy.kern.rbf(input_dim, .3, _np.ones(input_dim) * .2, ARD=True)
# k = GPy.kern.rbf(input_dim, .5, 2., ARD=0) + GPy.kern.rbf(input_dim, .3, .2, ARD=0)
# k = GPy.kern.rbf(input_dim, .5, _np.ones(input_dim) * 2., ARD=True) + GPy.kern.linear(input_dim, _np.ones(input_dim) * .2, ARD=True)
m = BayesianGPLVM(lik, input_dim, kernel=k, num_inducing=num_inducing)
m = GPy.models.BayesianGPLVM(lik, input_dim, kernel=k, num_inducing=num_inducing)
#===========================================================================
# randomly obstruct data with percentage p
p = .8
Y_obstruct = Y.copy()
Y_obstruct[_np.random.uniform(size=(Y.shape)) < p] = _np.nan
#===========================================================================
m2 = GPy.models.BayesianGPLVMWithMissingData(Y_obstruct, input_dim, kernel=k, num_inducing=num_inducing)
m.lengthscales = lengthscales
# m.constrain_positive('(rbf|bias|noise|white|S)')
# m.constrain_fixed('S', 1)
# pb.figure()
# m.plot()
# pb.title('PCA initialisation')
# pb.figure()
# m.optimize(messages = 1)
# m.plot()
# pb.title('After optimisation')
# m.randomize()
# m.checkgrad(verbose=1)
if plot:
import matplotlib.pyplot as pb
m.plot()
pb.title('PCA initialisation')
m2.plot()
pb.title('PCA initialisation')
return m
if optimize:
m.optimize('scg', messages=verbose)
m2.optimize('scg', messages=verbose)
if plot:
m.plot()
pb.title('After optimisation')
m2.plot()
pb.title('After optimisation')
def GPLVM_oil_100(optimize=True, plot=True):
return m, m2
def gplvm_oil_100(optimize=True, verbose=1, plot=True):
import GPy
data = GPy.util.datasets.oil_100()
Y = data['X']
# create simple GP model
kernel = GPy.kern.rbf(6, ARD=True) + GPy.kern.bias(6)
m = GPy.models.GPLVM(Y, 6, kernel=kernel)
m.data_labels = data['Y'].argmax(axis=1)
# optimize
if optimize:
m.optimize('scg', messages=1)
# plot
print(m)
if plot:
m.plot_latent(labels=m.data_labels)
if optimize: m.optimize('scg', messages=verbose)
if plot: m.plot_latent(labels=m.data_labels)
return m
def sparseGPLVM_oil(optimize=True, N=100, input_dim=6, num_inducing=15, max_iters=50):
np.random.seed(0)
def sparse_gplvm_oil(optimize=True, verbose=0, plot=True, N=100, Q=6, num_inducing=15, max_iters=50):
import GPy
_np.random.seed(0)
data = GPy.util.datasets.oil()
Y = data['X'][:N]
Y = Y - Y.mean(0)
Y /= Y.std(0)
# Create the model
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q)
m = GPy.models.SparseGPLVM(Y, Q, kernel=kernel, num_inducing=num_inducing)
m.data_labels = data['Y'][:N].argmax(axis=1)
# create simple GP model
kernel = GPy.kern.rbf(input_dim, ARD=True) + GPy.kern.bias(input_dim)
m = GPy.models.SparseGPLVM(Y, input_dim, kernel=kernel, num_inducing=num_inducing)
m.data_labels = data['Y'].argmax(axis=1)
# optimize
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
# plot
print(m)
# m.plot_latent(labels=m.data_labels)
if optimize: m.optimize('scg', messages=verbose, max_iters=max_iters)
if plot:
m.plot_latent(labels=m.data_labels)
m.kern.plot_ARD()
return m
def swiss_roll(optimize=True, N=1000, num_inducing=15, input_dim=4, sigma=.2, plot=False):
def swiss_roll(optimize=True, verbose=1, plot=True, N=1000, num_inducing=15, Q=4, sigma=.2):
import GPy
from GPy.util.datasets import swiss_roll_generated
from GPy.core.transformations import LogexpClipped
from GPy.models import BayesianGPLVM
data = swiss_roll_generated(N=N, sigma=sigma)
data = swiss_roll_generated(num_samples=N, sigma=sigma)
Y = data['Y']
Y -= Y.mean()
Y /= Y.std()
@ -102,120 +111,99 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, input_dim=4, sigma=.2, pl
from sklearn.manifold.isomap import Isomap
iso = Isomap().fit(Y)
X = iso.embedding_
if input_dim > 2:
X = np.hstack((X, np.random.randn(N, input_dim - 2)))
if Q > 2:
X = _np.hstack((X, _np.random.randn(N, Q - 2)))
except ImportError:
X = np.random.randn(N, input_dim)
X = _np.random.randn(N, Q)
if plot:
from mpl_toolkits import mplot3d
import pylab
fig = pylab.figure("Swiss Roll Data")
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # @UnusedImport
fig = plt.figure("Swiss Roll Data")
ax = fig.add_subplot(121, projection='3d')
ax.scatter(*Y.T, c=c)
ax.set_title("Swiss Roll")
ax = fig.add_subplot(122)
ax.scatter(*X.T[:2], c=c)
ax.set_title("Initialization")
ax.set_title("BGPLVM init")
var = .5
S = (var * np.ones_like(X) + np.clip(np.random.randn(N, input_dim) * var ** 2,
S = (var * _np.ones_like(X) + _np.clip(_np.random.randn(N, Q) * var ** 2,
- (1 - var),
(1 - var))) + .001
Z = np.random.permutation(X)[:num_inducing]
Z = _np.random.permutation(X)[:num_inducing]
kernel = GPy.kern.rbf(input_dim, ARD=True) + GPy.kern.bias(input_dim, np.exp(-2)) + GPy.kern.white(input_dim, np.exp(-2))
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, _np.exp(-2)) + GPy.kern.white(Q, _np.exp(-2))
m = BayesianGPLVM(Y, input_dim, X=X, X_variance=S, num_inducing=num_inducing, Z=Z, kernel=kernel)
m = BayesianGPLVM(Y, Q, X=X, X_variance=S, num_inducing=num_inducing, Z=Z, kernel=kernel)
m.data_colors = c
m.data_t = t
m['rbf_lengthscale'] = 1. # X.var(0).max() / X.var(0)
m['noise_variance'] = Y.var() / 100.
m['bias_variance'] = 0.05
if optimize:
m.optimize('scg', messages=1)
m.optimize('scg', messages=verbose, max_iters=2e3)
if plot:
fig = plt.figure('fitted')
ax = fig.add_subplot(111)
s = m.input_sensitivity().argsort()[::-1][:2]
ax.scatter(*m.X.T[s], c=c)
return m
def BGPLVM_oil(optimize=True, N=200, input_dim=7, num_inducing=40, max_iters=1000, plot=False, **k):
np.random.seed(0)
def bgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40, max_iters=1000, **k):
import GPy
from GPy.likelihoods import Gaussian
from matplotlib import pyplot as plt
_np.random.seed(0)
data = GPy.util.datasets.oil()
# create simple GP model
kernel = GPy.kern.rbf_inv(input_dim, 1., [.1] * input_dim, ARD=True) + GPy.kern.bias(input_dim, np.exp(-2))
kernel = GPy.kern.rbf_inv(Q, 1., [.1] * Q, ARD=True) + GPy.kern.bias(Q, _np.exp(-2))
Y = data['X'][:N]
Yn = Gaussian(Y, normalize=True)
# Yn = Y - Y.mean(0)
# Yn /= Yn.std(0)
m = GPy.models.BayesianGPLVM(Yn, input_dim, kernel=kernel, num_inducing=num_inducing, **k)
m = GPy.models.BayesianGPLVM(Yn, Q, kernel=kernel, num_inducing=num_inducing, **k)
m.data_labels = data['Y'][:N].argmax(axis=1)
m['noise'] = Yn.Y.var() / 100.
# m.constrain('variance|leng', LogexpClipped())
# m['.*lengt'] = m.X.var(0).max() / m.X.var(0)
m['gaussian'] = Yn.Y.var() / 100.
# optimize
if optimize:
m.gaussian.variance.fix() # m.constrain_fixed('noise')
m.optimize('scg', messages=1, max_iters=200, gtol=.05)
m.gaussian.variance.constrain_positive() # m.constrain_positive('noise')
#m.constrain_bounded('white', 1e-7, 1)
m.optimize('scg', messages=1, max_iters=max_iters, gtol=.05)
m.optimize('scg', messages=verbose, max_iters=max_iters, gtol=.05)
if plot:
y = m.likelihood.Y[0, :]
fig, (latent_axes, sense_axes) = plt.subplots(1, 2)
plt.sca(latent_axes)
m.plot_latent()
m.plot_latent(ax=latent_axes)
data_show = GPy.util.visualize.vector_show(y)
lvm_visualizer = GPy.util.visualize.lvm_dimselect(m.X[0, :], m, data_show, latent_axes=latent_axes) # , sense_axes=sense_axes)
lvm_visualizer = GPy.util.visualize.lvm_dimselect(m.X[0, :], # @UnusedVariable
m, data_show, latent_axes=latent_axes, sense_axes=sense_axes)
raw_input('Press enter to finish')
plt.close(fig)
return m
def oil_100():
data = GPy.util.datasets.oil_100()
m = GPy.models.GPLVM(data['X'], 2)
# optimize
m.optimize(messages=1, max_iters=2)
# plot
print(m)
# m.plot_latent(labels=data['Y'].argmax(axis=1))
return m
def _simulate_sincos(D1, D2, D3, N, num_inducing, input_dim, plot_sim=False):
x = np.linspace(0, 4 * np.pi, N)[:, None]
s1 = np.vectorize(lambda x: np.sin(x))
s2 = np.vectorize(lambda x: np.cos(x))
s3 = np.vectorize(lambda x:-np.exp(-np.cos(2 * x)))
sS = np.vectorize(lambda x: np.sin(2 * x))
def _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim=False):
x = _np.linspace(0, 4 * _np.pi, N)[:, None]
s1 = _np.vectorize(lambda x: _np.sin(x))
s2 = _np.vectorize(lambda x: _np.cos(x))
s3 = _np.vectorize(lambda x:-_np.exp(-_np.cos(2 * x)))
sS = _np.vectorize(lambda x: _np.sin(2 * x))
s1 = s1(x)
s2 = s2(x)
s3 = s3(x)
sS = sS(x)
S1 = np.hstack([s1, sS])
S2 = np.hstack([s2, s3, sS])
S3 = np.hstack([s3, sS])
S1 = _np.hstack([s1, sS])
S2 = _np.hstack([s2, s3, sS])
S3 = _np.hstack([s3, sS])
Y1 = S1.dot(np.random.randn(S1.shape[1], D1))
Y2 = S2.dot(np.random.randn(S2.shape[1], D2))
Y3 = S3.dot(np.random.randn(S3.shape[1], D3))
Y1 = S1.dot(_np.random.randn(S1.shape[1], D1))
Y2 = S2.dot(_np.random.randn(S2.shape[1], D2))
Y3 = S3.dot(_np.random.randn(S3.shape[1], D3))
Y1 += .3 * np.random.randn(*Y1.shape)
Y2 += .2 * np.random.randn(*Y2.shape)
Y3 += .25 * np.random.randn(*Y3.shape)
Y1 += .3 * _np.random.randn(*Y1.shape)
Y2 += .2 * _np.random.randn(*Y2.shape)
Y3 += .25 * _np.random.randn(*Y3.shape)
Y1 -= Y1.mean(0)
Y2 -= Y2.mean(0)
@ -230,6 +218,7 @@ def _simulate_sincos(D1, D2, D3, N, num_inducing, input_dim, plot_sim=False):
if plot_sim:
import pylab
import matplotlib.cm as cm
import itertools
fig = pylab.figure("MRD Simulation Data", figsize=(8, 6))
fig.clf()
@ -247,114 +236,99 @@ def _simulate_sincos(D1, D2, D3, N, num_inducing, input_dim, plot_sim=False):
return slist, [S1, S2, S3], Ylist
def bgplvm_simulation_matlab_compare():
from GPy.util.datasets import simulation_BGPLVM
sim_data = simulation_BGPLVM()
Y = sim_data['Y']
S = sim_data['S']
mu = sim_data['mu']
num_inducing, [_, input_dim] = 3, mu.shape
# def bgplvm_simulation_matlab_compare():
# from GPy.util.datasets import simulation_BGPLVM
# from GPy import kern
# from GPy.models import BayesianGPLVM
#
# sim_data = simulation_BGPLVM()
# Y = sim_data['Y']
# mu = sim_data['mu']
# num_inducing, [_, Q] = 3, mu.shape
#
# k = kern.linear(Q, ARD=True) + kern.bias(Q, _np.exp(-2)) + kern.white(Q, _np.exp(-2))
# m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k,
# _debug=False)
# m.auto_scale_factor = True
# m['noise'] = Y.var() / 100.
# m['linear_variance'] = .01
# return m
from GPy.models import mrd
from GPy import kern
reload(mrd); reload(kern)
k = kern.linear(input_dim, ARD=True) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2))
m = BayesianGPLVM(Y, input_dim, init="PCA", num_inducing=num_inducing, kernel=k,
# X=mu,
# X_variance=S,
_debug=False)
m.auto_scale_factor = True
m['gaussian'] = Y.var() / 100.
m['linear_variance'] = .01
return m
def bgplvm_simulation(optimize='scg',
plot=True,
def bgplvm_simulation(optimize=True, verbose=1,
plot=True, plot_sim=False,
max_iters=2e4,
plot_sim=False):
# from GPy.core.transformations import LogexpClipped
D1, D2, D3, N, num_inducing, input_dim = 15, 5, 8, 30, 3, 10
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, input_dim, plot_sim)
from GPy.models import mrd
):
from GPy import kern
reload(mrd); reload(kern)
from GPy.models import BayesianGPLVM
D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 30, 3, 10
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
Y = Ylist[0]
k = kern.linear(input_dim, ARD=True) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2)) # + kern.bias(input_dim)
m = BayesianGPLVM(Y, input_dim, init="PCA", num_inducing=num_inducing, kernel=k)
import ipdb; ipdb.set_trace()
# m.constrain('variance|noise', LogexpClipped())
m['gaussian'] = Y.var() / 100.
k = kern.linear(Q, ARD=True) + kern.bias(Q, _np.exp(-2)) + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k)
m['noise'] = Y.var() / 100.
if optimize:
print "Optimizing model:"
m.optimize(optimize, max_iters=max_iters,
messages=True, gtol=.05)
m.optimize('scg', messages=verbose, max_iters=max_iters,
gtol=.05)
if plot:
m.plot_X_1d("BGPLVM Latent Space 1D")
m.kern.plot_ARD('BGPLVM Simulation ARD Parameters')
return m
def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw):
D1, D2, D3, N, num_inducing, input_dim = 60, 20, 36, 60, 6, 5
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, input_dim, plot_sim)
def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw):
from GPy import kern
from GPy.models import MRD
from GPy.likelihoods import Gaussian
D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
likelihood_list = [Gaussian(x, normalize=True) for x in Ylist]
from GPy.models import mrd
from GPy import kern
reload(mrd); reload(kern)
k = kern.linear(input_dim, ARD=True) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2))
m = mrd.MRD(likelihood_list, input_dim=input_dim, num_inducing=num_inducing, kernels=k, initx="", initz='permute', **kw)
k = kern.linear(Q, ARD=True) + kern.bias(Q, _np.exp(-2)) + kern.white(Q, _np.exp(-2))
m = MRD(likelihood_list, input_dim=Q, num_inducing=num_inducing, kernels=k, initx="", initz='permute', **kw)
m.ensure_default_constraints()
for i, bgplvm in enumerate(m.bgplvms):
m['{}_noise'.format(i)] = bgplvm.likelihood.Y.var() / 500.
# DEBUG
# np.seterr("raise")
if optimize:
print "Optimizing Model:"
m.optimize(messages=1, max_iters=8e3, gtol=.1)
m.optimize(messages=verbose, max_iters=8e3, gtol=.1)
if plot:
m.plot_X_1d("MRD Latent Space 1D")
m.plot_scales("MRD Scales")
return m
def brendan_faces():
from GPy import kern
def brendan_faces(optimize=True, verbose=True, plot=True):
import GPy
data = GPy.util.datasets.brendan_faces()
input_dim = 2
Y = data['Y'][0:-1:10, :]
# Y = data['Y']
Q = 2
Y = data['Y']
Yn = Y - Y.mean()
Yn /= Yn.std()
m = GPy.models.GPLVM(Yn, input_dim)
# m = GPy.models.BayesianGPLVM(Yn, input_dim, num_inducing=100)
m = GPy.models.GPLVM(Yn, Q)
# optimize
m.constrain('rbf|noise|white', GPy.core.transformations.LogexpClipped())
m.constrain('rbf|noise|white', GPy.core.transformations.logexp_clipped())
m.optimize('scg', messages=1, max_f_eval=10000)
if optimize: m.optimize('scg', messages=verbose, max_iters=1000)
ax = m.plot_latent(which_indices=(0, 1))
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.image_show(y[None, :], dimensions=(20, 28), transpose=True, invert=False, scale=False)
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
if plot:
ax = m.plot_latent(which_indices=(0, 1))
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.image_show(y[None, :], dimensions=(20, 28), transpose=True, order='F', invert=False, scale=False)
GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
return m
def olivetti_faces():
from GPy import kern
def olivetti_faces(optimize=True, verbose=True, plot=True):
import GPy
data = GPy.util.datasets.olivetti_faces()
Q = 2
Y = data['Y']
@ -362,152 +336,142 @@ def olivetti_faces():
Yn /= Yn.std()
m = GPy.models.GPLVM(Yn, Q)
m.optimize('scg', messages=1, max_iters=1000)
ax = m.plot_latent(which_indices=(0, 1))
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.image_show(y[None, :], dimensions=(112, 92), transpose=False, invert=False, scale=False)
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
if optimize: m.optimize('scg', messages=verbose, max_iters=1000)
if plot:
ax = m.plot_latent(which_indices=(0, 1))
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.image_show(y[None, :], dimensions=(112, 92), transpose=False, invert=False, scale=False)
GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
return m
def stick_play(range=None, frame_rate=15):
def stick_play(range=None, frame_rate=15, optimize=False, verbose=True, plot=True):
import GPy
data = GPy.util.datasets.osu_run1()
# optimize
if range == None:
Y = data['Y'].copy()
else:
Y = data['Y'][range[0]:range[1], :].copy()
y = Y[0, :]
data_show = GPy.util.visualize.stick_show(y[None, :], connect=data['connect'])
GPy.util.visualize.data_play(Y, data_show, frame_rate)
if plot:
y = Y[0, :]
data_show = GPy.util.visualize.stick_show(y[None, :], connect=data['connect'])
GPy.util.visualize.data_play(Y, data_show, frame_rate)
return Y
def stick(kernel=None):
def stick(kernel=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt
import GPy
data = GPy.util.datasets.osu_run1()
# optimize
m = GPy.models.GPLVM(data['Y'], 2, kernel=kernel)
m.optimize(messages=1, max_f_eval=10000)
if GPy.util.visualize.visual_available:
if optimize: m.optimize(messages=verbose, max_f_eval=10000)
if plot and GPy.util.visualize.visual_available:
plt.clf
ax = m.plot_latent()
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.stick_show(y[None, :], connect=data['connect'])
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
return m
def bcgplvm_linear_stick(kernel=None):
def bcgplvm_linear_stick(kernel=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt
import GPy
data = GPy.util.datasets.osu_run1()
# optimize
mapping = GPy.mappings.Linear(data['Y'].shape[1], 2)
m = GPy.models.BCGPLVM(data['Y'], 2, kernel=kernel, mapping=mapping)
m.optimize(messages=1, max_f_eval=10000)
if GPy.util.visualize.visual_available:
if optimize: m.optimize(messages=verbose, max_f_eval=10000)
if plot and GPy.util.visualize.visual_available:
plt.clf
ax = m.plot_latent()
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.stick_show(y[None, :], connect=data['connect'])
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
return m
def bcgplvm_stick(kernel=None):
def bcgplvm_stick(kernel=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt
import GPy
data = GPy.util.datasets.osu_run1()
# optimize
back_kernel=GPy.kern.rbf(data['Y'].shape[1], lengthscale=5.)
mapping = GPy.mappings.Kernel(X=data['Y'], output_dim=2, kernel=back_kernel)
m = GPy.models.BCGPLVM(data['Y'], 2, kernel=kernel, mapping=mapping)
m.optimize(messages=1, max_f_eval=10000)
if GPy.util.visualize.visual_available:
if optimize: m.optimize(messages=verbose, max_f_eval=10000)
if plot and GPy.util.visualize.visual_available:
plt.clf
ax = m.plot_latent()
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.stick_show(y[None, :], connect=data['connect'])
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
return m
def robot_wireless():
def robot_wireless(optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt
import GPy
data = GPy.util.datasets.robot_wireless()
# optimize
m = GPy.models.GPLVM(data['Y'], 2)
m.optimize(messages=1, max_f_eval=10000)
if optimize: m.optimize(messages=verbose, max_f_eval=10000)
m._set_params(m._get_params())
plt.clf
ax = m.plot_latent()
if plot:
m.plot_latent()
return m
def stick_bgplvm(model=None):
def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
from GPy.models import BayesianGPLVM
from matplotlib import pyplot as plt
import GPy
data = GPy.util.datasets.osu_run1()
input_dim = 6
kernel = GPy.kern.rbf(input_dim, ARD=True) + GPy.kern.bias(input_dim, np.exp(-2)) + GPy.kern.white(input_dim, np.exp(-2))
m = BayesianGPLVM(data['Y'], input_dim, init="PCA", num_inducing=20, kernel=kernel)
Q = 6
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, _np.exp(-2)) + GPy.kern.white(Q, _np.exp(-2))
m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)
# optimize
m.ensure_default_constraints()
m.optimize('scg', messages=1, max_iters=200, xtol=1e-300, ftol=1e-300)
if optimize: m.optimize('scg', messages=verbose, max_iters=200, xtol=1e-300, ftol=1e-300)
m._set_params(m._get_params())
plt.clf, (latent_axes, sense_axes) = plt.subplots(1, 2)
plt.sca(latent_axes)
m.plot_latent()
y = m.likelihood.Y[0, :].copy()
data_show = GPy.util.visualize.stick_show(y[None, :], connect=data['connect'])
lvm_visualizer = GPy.util.visualize.lvm_dimselect(m.X[0, :].copy(), m, data_show, latent_axes=latent_axes, sense_axes=sense_axes)
raw_input('Press enter to finish')
if plot:
plt.clf, (latent_axes, sense_axes) = plt.subplots(1, 2)
plt.sca(latent_axes)
m.plot_latent()
y = m.likelihood.Y[0, :].copy()
data_show = GPy.util.visualize.stick_show(y[None, :], connect=data['connect'])
GPy.util.visualize.lvm_dimselect(m.X[0, :].copy(), m, data_show, latent_axes=latent_axes, sense_axes=sense_axes)
raw_input('Press enter to finish')
return m
def cmu_mocap(subject='35', motion=['01'], in_place=True):
def cmu_mocap(subject='35', motion=['01'], in_place=True, optimize=True, verbose=True, plot=True):
import GPy
data = GPy.util.datasets.cmu_mocap(subject, motion)
Y = data['Y']
if in_place:
# Make figure move in place.
data['Y'][:, 0:3] = 0.0
m = GPy.models.GPLVM(data['Y'], 2, normalize_Y=True)
# optimize
m.optimize(messages=1, max_f_eval=10000)
ax = m.plot_latent()
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.skeleton_show(y[None, :], data['skel'])
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
lvm_visualizer.close()
if optimize: m.optimize(messages=verbose, max_f_eval=10000)
if plot:
ax = m.plot_latent()
y = m.likelihood.Y[0, :]
data_show = GPy.util.visualize.skeleton_show(y[None, :], data['skel'])
lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish')
lvm_visualizer.close()
return m
# def BGPLVM_oil():
# data = GPy.util.datasets.oil()
# Y, X = data['Y'], data['X']
# X -= X.mean(axis=0)
# X /= X.std(axis=0)
#
# input_dim = 10
# num_inducing = 30
#
# kernel = GPy.kern.rbf(input_dim, ARD=True) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim)
# m = GPy.models.BayesianGPLVM(X, input_dim, kernel=kernel, num_inducing=num_inducing)
# # m.scale_factor = 100.0
# m.constrain_positive('(white|noise|bias|X_variance|rbf_variance|rbf_length)')
# from sklearn import cluster
# km = cluster.KMeans(num_inducing, verbose=10)
# Z = km.fit(m.X).cluster_centers_
# # Z = GPy.util.misc.kmm_init(m.X, num_inducing)
# m.set('iip', Z)
# m.set('bias', 1e-4)
# # optimize
#
# import pdb; pdb.set_trace()
# m.optimize('tnc', messages=1)
# print m
# m.plot_latent(labels=data['Y'].argmax(axis=1))
# return m

View file

@ -0,0 +1,286 @@
import GPy
import numpy as np
import matplotlib.pyplot as plt
from GPy.util import datasets
def student_t_approx(optimize=True, plot=True):
"""
Example of regressing with a student t likelihood using Laplace
"""
real_std = 0.1
#Start a function, any function
X = np.linspace(0.0, np.pi*2, 100)[:, None]
Y = np.sin(X) + np.random.randn(*X.shape)*real_std
Y = Y/Y.max()
Yc = Y.copy()
X_full = np.linspace(0.0, np.pi*2, 500)[:, None]
Y_full = np.sin(X_full)
Y_full = Y_full/Y_full.max()
#Slightly noisy data
Yc[75:80] += 1
#Very noisy data
#Yc[10] += 100
#Yc[25] += 10
#Yc[23] += 10
#Yc[26] += 1000
#Yc[24] += 10
#Yc = Yc/Yc.max()
#Add student t random noise to datapoints
deg_free = 5
print "Real noise: ", real_std
initial_var_guess = 0.5
edited_real_sd = initial_var_guess
# Kernel object
kernel1 = GPy.kern.rbf(X.shape[1]) + GPy.kern.white(X.shape[1])
kernel2 = kernel1.copy()
kernel3 = kernel1.copy()
kernel4 = kernel1.copy()
#Gaussian GP model on clean data
m1 = GPy.models.GPRegression(X, Y.copy(), kernel=kernel1)
# optimize
m1.ensure_default_constraints()
m1.constrain_fixed('white', 1e-5)
m1.randomize()
#Gaussian GP model on corrupt data
m2 = GPy.models.GPRegression(X, Yc.copy(), kernel=kernel2)
m2.ensure_default_constraints()
m2.constrain_fixed('white', 1e-5)
m2.randomize()
#Student t GP model on clean data
t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=deg_free, sigma2=edited_real_sd)
stu_t_likelihood = GPy.likelihoods.Laplace(Y.copy(), t_distribution)
m3 = GPy.models.GPRegression(X, Y.copy(), kernel3, likelihood=stu_t_likelihood)
m3.ensure_default_constraints()
m3.constrain_bounded('t_noise', 1e-6, 10.)
m3.constrain_fixed('white', 1e-5)
m3.randomize()
#Student t GP model on corrupt data
t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=deg_free, sigma2=edited_real_sd)
corrupt_stu_t_likelihood = GPy.likelihoods.Laplace(Yc.copy(), t_distribution)
m4 = GPy.models.GPRegression(X, Yc.copy(), kernel4, likelihood=corrupt_stu_t_likelihood)
m4.ensure_default_constraints()
m4.constrain_bounded('t_noise', 1e-6, 10.)
m4.constrain_fixed('white', 1e-5)
m4.randomize()
if optimize:
optimizer='scg'
print "Clean Gaussian"
m1.optimize(optimizer, messages=1)
print "Corrupt Gaussian"
m2.optimize(optimizer, messages=1)
print "Clean student t"
m3.optimize(optimizer, messages=1)
print "Corrupt student t"
m4.optimize(optimizer, messages=1)
if plot:
plt.figure(1)
plt.suptitle('Gaussian likelihood')
ax = plt.subplot(211)
m1.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Gaussian clean')
ax = plt.subplot(212)
m2.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Gaussian corrupt')
plt.figure(2)
plt.suptitle('Student-t likelihood')
ax = plt.subplot(211)
m3.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Student-t rasm clean')
ax = plt.subplot(212)
m4.plot(ax=ax)
plt.plot(X_full, Y_full)
plt.ylim(-1.5, 1.5)
plt.title('Student-t rasm corrupt')
return m1, m2, m3, m4
def boston_example(optimize=True, plot=True):
import sklearn
from sklearn.cross_validation import KFold
optimizer='bfgs'
messages=0
data = datasets.boston_housing()
degrees_freedoms = [3, 5, 8, 10]
X = data['X'].copy()
Y = data['Y'].copy()
X = X-X.mean(axis=0)
X = X/X.std(axis=0)
Y = Y-Y.mean()
Y = Y/Y.std()
num_folds = 10
kf = KFold(len(Y), n_folds=num_folds, indices=True)
num_models = len(degrees_freedoms) + 3 #3 for baseline, gaussian, gaussian laplace approx
score_folds = np.zeros((num_models, num_folds))
pred_density = score_folds.copy()
def rmse(Y, Ystar):
return np.sqrt(np.mean((Y-Ystar)**2))
for n, (train, test) in enumerate(kf):
X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
print "Fold {}".format(n)
noise = 1e-1 #np.exp(-2)
rbf_len = 0.5
data_axis_plot = 4
kernelstu = GPy.kern.rbf(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
kernelgp = GPy.kern.rbf(X.shape[1]) + GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
#Baseline
score_folds[0, n] = rmse(Y_test, np.mean(Y_train))
#Gaussian GP
print "Gauss GP"
mgp = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelgp.copy())
mgp.ensure_default_constraints()
mgp.constrain_fixed('white', 1e-5)
mgp['rbf_len'] = rbf_len
mgp['noise'] = noise
print mgp
if optimize:
mgp.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mgp.predict(X_test)
score_folds[1, n] = rmse(Y_test, Y_test_pred[0])
pred_density[1, n] = np.mean(mgp.log_predictive_density(X_test, Y_test))
print mgp
print pred_density
print "Gaussian Laplace GP"
N, D = Y_train.shape
g_distribution = GPy.likelihoods.noise_model_constructors.gaussian(variance=noise, N=N, D=D)
g_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), g_distribution)
mg = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=g_likelihood)
mg.ensure_default_constraints()
mg.constrain_positive('noise_variance')
mg.constrain_fixed('white', 1e-5)
mg['rbf_len'] = rbf_len
mg['noise'] = noise
print mg
if optimize:
mg.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mg.predict(X_test)
score_folds[2, n] = rmse(Y_test, Y_test_pred[0])
pred_density[2, n] = np.mean(mg.log_predictive_density(X_test, Y_test))
print pred_density
print mg
for stu_num, df in enumerate(degrees_freedoms):
#Student T
print "Student-T GP {}df".format(df)
t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=df, sigma2=noise)
stu_t_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), t_distribution)
mstu_t = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=stu_t_likelihood)
mstu_t.ensure_default_constraints()
mstu_t.constrain_fixed('white', 1e-5)
mstu_t.constrain_bounded('t_noise', 0.0001, 1000)
mstu_t['rbf_len'] = rbf_len
mstu_t['t_noise'] = noise
print mstu_t
if optimize:
mstu_t.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mstu_t.predict(X_test)
score_folds[3+stu_num, n] = rmse(Y_test, Y_test_pred[0])
pred_density[3+stu_num, n] = np.mean(mstu_t.log_predictive_density(X_test, Y_test))
print pred_density
print mstu_t
if plot:
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('GP gauss')
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('Lap gauss')
plt.figure()
plt.scatter(X_test[:, data_axis_plot], Y_test_pred[0])
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('Stu t {}df'.format(df))
print "Average scores: {}".format(np.mean(score_folds, 1))
print "Average pred density: {}".format(np.mean(pred_density, 1))
if plot:
#Plotting
stu_t_legends = ['Student T, df={}'.format(df) for df in degrees_freedoms]
legends = ['Baseline', 'Gaussian', 'Laplace Approx Gaussian'] + stu_t_legends
#Plot boxplots for RMSE density
fig = plt.figure()
ax=fig.add_subplot(111)
plt.title('RMSE')
bp = ax.boxplot(score_folds.T, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
xtickNames = plt.setp(ax, xticklabels=legends)
plt.setp(xtickNames, rotation=45, fontsize=8)
ax.set_ylabel('RMSE')
ax.set_xlabel('Distribution')
#Make grid and put it below boxes
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
#Plot boxplots for predictive density
fig = plt.figure()
ax=fig.add_subplot(111)
plt.title('Predictive density')
bp = ax.boxplot(pred_density[1:,:].T, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
xtickNames = plt.setp(ax, xticklabels=legends[1:])
plt.setp(xtickNames, rotation=45, fontsize=8)
ax.set_ylabel('Mean Log probability P(Y*|Y)')
ax.set_xlabel('Distribution')
#Make grid and put it below boxes
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax.set_axisbelow(True)
return mstu_t
#def precipitation_example():
#import sklearn
#from sklearn.cross_validation import KFold
#data = datasets.boston_housing()
#X = data['X'].copy()
#Y = data['Y'].copy()
#X = X-X.mean(axis=0)
#X = X/X.std(axis=0)
#Y = Y-Y.mean()
#Y = Y/Y.std()
#import ipdb; ipdb.set_trace() # XXX BREAKPOINT
#num_folds = 10
#kf = KFold(len(Y), n_folds=num_folds, indices=True)
#score_folds = np.zeros((4, num_folds))
#def rmse(Y, Ystar):
#return np.sqrt(np.mean((Y-Ystar)**2))
##for train, test in kf:
#for n, (train, test) in enumerate(kf):
#X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
#print "Fold {}".format(n)

View file

@ -1,7 +1,6 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Gaussian Processes regression examples
"""
@ -9,88 +8,105 @@ import pylab as pb
import numpy as np
import GPy
def coregionalization_toy2(max_iters=100):
def olympic_marathon_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Olympic marathon data."""
data = GPy.util.datasets.olympic_marathon_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
m['rbf_lengthscale'] = 10
if optimize:
m.optimize('bfgs', max_iters=200)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def coregionalization_toy2(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions.
"""
#build a design matrix with a column of integers indicating the output
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
index = np.vstack((np.zeros_like(X1), np.ones_like(X2)))
X = np.hstack((np.vstack((X1, X2)), index))
#build a suitable set of observed variables
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
Y = np.vstack((Y1, Y2))
#build the kernel
k1 = GPy.kern.rbf(1) + GPy.kern.bias(1)
k2 = GPy.kern.coregionalize(2,1)
k = k1**k2 #k = k1.prod(k2,tensor=True)
k = k1**k2
m = GPy.models.GPRegression(X, Y, kernel=k)
m.constrain_fixed('.*rbf_var', 1.)
# m.constrain_positive('.*kappa')
m.optimize('sim', messages=1, max_iters=max_iters)
pb.figure()
Xtest1 = np.hstack((np.linspace(0, 9, 100)[:, None], np.zeros((100, 1))))
Xtest2 = np.hstack((np.linspace(0, 9, 100)[:, None], np.ones((100, 1))))
mean, var, low, up = m.predict(Xtest1)
GPy.util.plot.gpplot(Xtest1[:, 0], mean, low, up)
mean, var, low, up = m.predict(Xtest2)
GPy.util.plot.gpplot(Xtest2[:, 0], mean, low, up)
pb.plot(X1[:, 0], Y1[:, 0], 'rx', mew=2)
pb.plot(X2[:, 0], Y2[:, 0], 'gx', mew=2)
if optimize:
m.optimize('bfgs', max_iters=100)
if plot:
m.plot(fixed_inputs=[(1,0)])
m.plot(fixed_inputs=[(1,1)], ax=pb.gca())
return m
def coregionalization_toy(max_iters=100):
"""
A simple demonstration of coregionalization on two sinusoidal functions.
"""
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
X = np.vstack((X1, X2))
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
#FIXME: Needs recovering once likelihoods are consolidated
#def coregionalization_toy(optimize=True, plot=True):
# """
# A simple demonstration of coregionalization on two sinusoidal functions.
# """
# X1 = np.random.rand(50, 1) * 8
# X2 = np.random.rand(30, 1) * 5
# X = np.vstack((X1, X2))
# Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
# Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
# Y = np.vstack((Y1, Y2))
#
# k1 = GPy.kern.rbf(1)
# m = GPy.models.GPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1])
# m.constrain_fixed('.*rbf_var', 1.)
# m.optimize(max_iters=100)
#
# fig, axes = pb.subplots(2,1)
# m.plot(fixed_inputs=[(1,0)],ax=axes[0])
# m.plot(fixed_inputs=[(1,1)],ax=axes[1])
# axes[0].set_title('Output 0')
# axes[1].set_title('Output 1')
# return m
k1 = GPy.kern.rbf(1)
m = GPy.models.GPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1])
m.constrain_fixed('.*rbf_var', 1.)
m.optimize(max_iters=max_iters)
fig, axes = pb.subplots(2,1)
m.plot_single_output(output=0,ax=axes[0])
m.plot_single_output(output=1,ax=axes[1])
axes[0].set_title('Output 0')
axes[1].set_title('Output 1')
return m
def coregionalization_sparse(max_iters=100):
def coregionalization_sparse(optimize=True, plot=True):
"""
A simple demonstration of coregionalization on two sinusoidal functions using sparse approximations.
"""
X1 = np.random.rand(500, 1) * 8
X2 = np.random.rand(300, 1) * 5
index = np.vstack((np.zeros_like(X1), np.ones_like(X2)))
X = np.hstack((np.vstack((X1, X2)), index))
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
#fetch the data from the non sparse examples
m = coregionalization_toy2(optimize=False, plot=False)
X, Y = m.X, m.likelihood.Y
k1 = GPy.kern.rbf(1)
#construct a model
m = GPy.models.SparseGPRegression(X,Y)
m.constrain_fixed('iip_\d+_1') # don't optimize the inducing input indexes
m = GPy.models.SparseGPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1],num_inducing=5)
m.constrain_fixed('.*rbf_var',1.)
#m.optimize(messages=1)
m.optimize_restarts(5, robust=True, messages=1, max_iters=max_iters, optimizer='bfgs')
if optimize:
m.optimize('bfgs', max_iters=100, messages=1)
if plot:
m.plot(fixed_inputs=[(1,0)])
m.plot(fixed_inputs=[(1,1)], ax=pb.gca())
fig, axes = pb.subplots(2,1)
m.plot_single_output(output=0,ax=axes[0],plot_limits=(-1,9))
m.plot_single_output(output=1,ax=axes[1],plot_limits=(-1,9))
axes[0].set_title('Output 0')
axes[1].set_title('Output 1')
return m
def epomeo_gpx(max_iters=100):
"""Perform Gaussian process regression on the latitude and longitude data from the Mount Epomeo runs. Requires gpxpy to be installed on your system to load in the data."""
def epomeo_gpx(max_iters=200, optimize=True, plot=True):
"""
Perform Gaussian process regression on the latitude and longitude data
from the Mount Epomeo runs. Requires gpxpy to be installed on your system
to load in the data.
"""
data = GPy.util.datasets.epomeo_gpx()
num_data_list = []
for Xpart in data['X']:
@ -119,14 +135,16 @@ def epomeo_gpx(max_iters=100):
m.constrain_fixed('.*rbf_var', 1.)
m.constrain_fixed('iip')
m.constrain_bounded('noise_variance', 1e-3, 1e-1)
# m.optimize_restarts(5, robust=True, messages=1, max_iters=max_iters, optimizer='bfgs')
m.optimize(max_iters=max_iters,messages=True)
return m
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300):
"""Show an example of a multimodal error surface for Gaussian process regression. Gene 939 has bimodal behaviour where the noisy mode is higher."""
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300, optimize=True, plot=True):
"""
Show an example of a multimodal error surface for Gaussian process
regression. Gene 939 has bimodal behaviour where the noisy mode is
higher.
"""
# Contour over a range of length scales and signal/noise ratios.
length_scales = np.linspace(0.1, 60., resolution)
@ -139,13 +157,14 @@ def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=1000
data['Y'] = data['Y'] - np.mean(data['Y'])
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.rbf)
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet) # @UndefinedVariable
ax = pb.gca()
pb.xlabel('length scale')
pb.ylabel('log_10 SNR')
if plot:
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)
ax = pb.gca()
pb.xlabel('length scale')
pb.ylabel('log_10 SNR')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Now run a few optimizations
models = []
@ -162,25 +181,31 @@ def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=1000
optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
# optimize
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
if optimize:
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
optim_point_x[1] = m['rbf_lengthscale']
optim_point_y[1] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
if plot:
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
models.append(m)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if plot:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return m # (models, lls)
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf):
"""Evaluate the GP objective function for a given data set for a range of signal to noise ratios and a range of lengthscales.
"""
Evaluate the GP objective function for a given data set for a range of
signal to noise ratios and a range of lengthscales.
:data_set: A data set from the utils.datasets director.
:length_scales: a list of length scales to explore for the contour plot.
:log_SNRs: a list of base 10 logarithm signal to noise ratios to explore for the contour plot.
:kernel: a kernel to use for the 'signal' portion of the data."""
:kernel: a kernel to use for the 'signal' portion of the data.
"""
lls = []
total_var = np.var(data['Y'])
@ -203,75 +228,75 @@ def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf):
return np.array(lls)
def olympic_100m_men(max_iters=100, kernel=None):
def olympic_100m_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
data = GPy.util.datasets.olympic_100m_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'], kernel)
m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1)
if kernel==None:
m['rbf_lengthscale'] = 10
m['rbf_lengthscale'] = 10
# optimize
m.optimize(max_iters=max_iters)
if optimize:
m.optimize('bfgs', max_iters=200)
# plot
m.plot(plot_limits=(1850, 2050))
print(m)
if plot:
m.plot(plot_limits=(1850, 2050))
return m
def olympic_marathon_men(max_iters=100, kernel=None):
"""Run a standard Gaussian process regression on the Olympic marathon data."""
data = GPy.util.datasets.olympic_marathon_men()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'], kernel)
# set the lengthscale to be something sensible (defaults to 1)
if kernel==None:
m['rbf_lengthscale'] = 10
# optimize
m.optimize(max_iters=max_iters)
# plot
m.plot(plot_limits=(1850, 2050))
print(m)
return m
def toy_rbf_1d(optimizer='tnc', max_nb_eval_optim=100):
def toy_rbf_1d(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
data = GPy.util.datasets.toy_rbf_1d()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# optimize
m.optimize(optimizer, max_f_eval=max_nb_eval_optim)
# plot
m.plot()
print(m)
if optimize:
m.optimize('bfgs')
if plot:
m.plot()
return m
def toy_rbf_1d_50(max_iters=100, optimize=True):
def toy_rbf_1d_50(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
data = GPy.util.datasets.toy_rbf_1d_50()
# create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y'])
# optimize
if optimize:
m.optimize(max_iters=max_iters)
m.optimize('bfgs')
if plot:
m.plot()
# plot
m.plot()
print(m)
return m
def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True):
def toy_poisson_rbf_1d_laplace(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
optimizer='scg'
x_len = 30
X = np.linspace(0, 10, x_len)[:, None]
f_true = np.random.multivariate_normal(np.zeros(x_len), GPy.kern.rbf(1).K(X))
Y = np.array([np.random.poisson(np.exp(f)) for f in f_true])[:,None]
noise_model = GPy.likelihoods.poisson()
likelihood = GPy.likelihoods.Laplace(Y,noise_model)
# create simple GP Model
m = GPy.models.GPRegression(X, Y, likelihood=likelihood)
if optimize:
m.optimize(optimizer)
if plot:
m.plot()
# plot the real underlying rate function
pb.plot(X, np.exp(f_true), '--k', linewidth=2)
return m
def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
@ -301,13 +326,16 @@ def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
if optimize: m.optimize(optimizer='scg', max_iters=max_iters, messages=1)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters, messages=1)
m.kern.plot_ARD()
print(m)
if plot:
m.kern.plot_ARD()
print m
return m
def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4):
def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
# Create an artificial dataset where the values in the targets (Y)
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
# see if this dependency can be recovered
@ -338,13 +366,16 @@ def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4):
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
# m.set_prior('.*lengthscale',len_prior)
m.optimize(optimizer='scg', max_iters=max_iters, messages=1)
if optimize:
m.optimize(optimizer='scg', max_iters=max_iters, messages=1)
m.kern.plot_ARD()
print(m)
if plot:
m.kern.plot_ARD()
print m
return m
def robot_wireless(max_iters=100, kernel=None):
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings."""
data = GPy.util.datasets.robot_wireless()
@ -352,20 +383,24 @@ def robot_wireless(max_iters=100, kernel=None):
m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)
# optimize
m.optimize(messages=True, max_iters=max_iters)
if optimize:
m.optimize(messages=True, max_iters=max_iters)
Xpredict = m.predict(data['Ytest'])[0]
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
pb.axis('equal')
pb.title('WiFi Localization with Gaussian Processes')
pb.legend(('True Location', 'Predicted Location'))
if plot:
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
pb.axis('equal')
pb.title('WiFi Localization with Gaussian Processes')
pb.legend(('True Location', 'Predicted Location'))
sse = ((data['Xtest'] - Xpredict)**2).sum()
print(m)
print m
print('Sum of squares error on test data: ' + str(sse))
return m
def silhouette(max_iters=100):
def silhouette(max_iters=100, optimize=True, plot=True):
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
data = GPy.util.datasets.silhouette()
@ -373,12 +408,13 @@ def silhouette(max_iters=100):
m = GPy.models.GPRegression(data['X'], data['Y'])
# optimize
m.optimize(messages=True, max_iters=max_iters)
if optimize:
m.optimize(messages=True, max_iters=max_iters)
print(m)
print m
return m
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, checkgrad=True):
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True):
"""Run a 1D example of a sparse GP regression."""
# sample inputs and outputs
X = np.random.uniform(-3., 3., (num_samples, 1))
@ -387,15 +423,17 @@ def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, opti
rbf = GPy.kern.rbf(1)
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
m.checkgrad(verbose=1)
if checkgrad:
m.checkgrad(verbose=1)
if optimize:
m.optimize('tnc', messages=1, max_iters=max_iters)
m.plot()
if plot:
m.plot()
return m
def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100):
def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, optimize=True, plot=True):
"""Run a 2D example of a sparse GP regression."""
X = np.random.uniform(-3., 3., (num_samples, 2))
Y = np.sin(X[:, 0:1]) * np.sin(X[:, 1:2]) + np.random.randn(num_samples, 1) * 0.05
@ -411,13 +449,18 @@ def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100):
m.checkgrad()
# optimize and plot
m.optimize('tnc', messages=1, max_iters=max_iters)
m.plot()
print(m)
# optimize
if optimize:
m.optimize('tnc', messages=1, max_iters=max_iters)
# plot
if plot:
m.plot()
print m
return m
def uncertain_inputs_sparse_regression(max_iters=100):
def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
fig, axes = pb.subplots(1, 2, figsize=(12, 5))
@ -432,18 +475,23 @@ def uncertain_inputs_sparse_regression(max_iters=100):
# create simple GP Model - no input uncertainty on this one
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
m.optimize('scg', messages=1, max_iters=max_iters)
m.plot(ax=axes[0])
axes[0].set_title('no input uncertainty')
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[0])
axes[0].set_title('no input uncertainty')
print m
# the same Model with uncertainty
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S)
m.optimize('scg', messages=1, max_iters=max_iters)
m.plot(ax=axes[1])
axes[1].set_title('with input uncertainty')
print(m)
fig.canvas.draw()
if optimize:
m.optimize('scg', messages=1, max_iters=max_iters)
if plot:
m.plot(ax=axes[1])
axes[1].set_title('with input uncertainty')
fig.canvas.draw()
print m
return m

View file

@ -5,7 +5,7 @@ import pylab as pb
import numpy as np
import GPy
def toy_1d():
def toy_1d(optimize=True, plot=True):
N = 2000
M = 20
@ -20,22 +20,18 @@ def toy_1d():
m.param_steplength = 1e-4
fig = pb.figure()
ax = fig.add_subplot(111)
def cb():
ax.cla()
m.plot(ax=ax,Z_height=-3)
ax.set_ylim(-3,3)
fig.canvas.draw()
if plot:
fig = pb.figure()
ax = fig.add_subplot(111)
def cb(foo):
ax.cla()
m.plot(ax=ax,Z_height=-3)
ax.set_ylim(-3,3)
fig.canvas.draw()
m.optimize(500, callback=cb, callback_interval=1)
if optimize:
m.optimize(500, callback=cb, callback_interval=1)
m.plot_traces()
if plot:
m.plot_traces()
return m

View file

@ -11,7 +11,7 @@ pb.ion()
import numpy as np
import GPy
def tuto_GP_regression():
def tuto_GP_regression(optimize=True, plot=True):
"""The detailed explanations of the commands used in this file can be found in the tutorial section"""
X = np.random.uniform(-3.,3.,(20,1))
@ -22,7 +22,8 @@ def tuto_GP_regression():
m = GPy.models.GPRegression(X, Y, kernel)
print m
m.plot()
if plot:
m.plot()
m.constrain_positive('')
@ -31,9 +32,9 @@ def tuto_GP_regression():
m.constrain_bounded('.*lengthscale',1.,10. )
m.constrain_fixed('.*noise',0.0025)
m.optimize()
m.optimize_restarts(num_restarts = 10)
if optimize:
m.optimize()
m.optimize_restarts(num_restarts = 10)
#######################################################
#######################################################
@ -51,22 +52,26 @@ def tuto_GP_regression():
m.constrain_positive('')
# optimize and plot
m.optimize('tnc', max_f_eval = 1000)
m.plot()
print(m)
if optimize:
m.optimize('tnc', max_f_eval = 1000)
if plot:
m.plot()
print m
return(m)
def tuto_kernel_overview():
def tuto_kernel_overview(optimize=True, plot=True):
"""The detailed explanations of the commands used in this file can be found in the tutorial section"""
ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)
ker2 = GPy.kern.rbf(input_dim=1, variance = .75, lengthscale=2.)
ker3 = GPy.kern.rbf(1, .5, .5)
print ker2
ker1.plot()
ker2.plot()
ker3.plot()
if plot:
ker1.plot()
ker2.plot()
ker3.plot()
k1 = GPy.kern.rbf(1,1.,2.)
k2 = GPy.kern.Matern32(1, 0.5, 0.2)
@ -77,8 +82,8 @@ def tuto_kernel_overview():
# Sum of kernels
k_add = k1.add(k2) # By default, tensor=False
k_addtens = k1.add(k2,tensor=True)
k_addtens = k1.add(k2,tensor=True)
k1 = GPy.kern.rbf(1,1.,2)
k2 = GPy.kern.periodic_Matern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5)
@ -102,7 +107,7 @@ def tuto_kernel_overview():
k.unconstrain('white')
k.constrain_bounded('white',lower=1e-5,upper=.5)
print k
k_cst = GPy.kern.bias(1,variance=1.)
k_mat = GPy.kern.Matern52(1,variance=1., lengthscale=3)
Kanova = (k_cst + k_mat).prod(k_cst + k_mat,tensor=True)
@ -114,30 +119,32 @@ def tuto_kernel_overview():
# Create GP regression model
m = GPy.models.GPRegression(X, Y, Kanova)
fig = pb.figure(figsize=(5,5))
ax = fig.add_subplot(111)
m.plot(ax=ax)
pb.figure(figsize=(20,3))
pb.subplots_adjust(wspace=0.5)
axs = pb.subplot(1,5,1)
m.plot(ax=axs)
pb.subplot(1,5,2)
pb.ylabel("= ",rotation='horizontal',fontsize='30')
axs = pb.subplot(1,5,3)
m.plot(ax=axs, which_parts=[False,True,False,False])
pb.ylabel("cst +",rotation='horizontal',fontsize='30')
axs = pb.subplot(1,5,4)
m.plot(ax=axs, which_parts=[False,False,True,False])
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
axs = pb.subplot(1,5,5)
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
m.plot(ax=axs, which_parts=[False,False,False,True])
if plot:
fig = pb.figure(figsize=(5,5))
ax = fig.add_subplot(111)
m.plot(ax=ax)
pb.figure(figsize=(20,3))
pb.subplots_adjust(wspace=0.5)
axs = pb.subplot(1,5,1)
m.plot(ax=axs)
pb.subplot(1,5,2)
pb.ylabel("= ",rotation='horizontal',fontsize='30')
axs = pb.subplot(1,5,3)
m.plot(ax=axs, which_parts=[False,True,False,False])
pb.ylabel("cst +",rotation='horizontal',fontsize='30')
axs = pb.subplot(1,5,4)
m.plot(ax=axs, which_parts=[False,False,True,False])
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
axs = pb.subplot(1,5,5)
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
m.plot(ax=axs, which_parts=[False,False,False,True])
return(m)
def model_interaction():
def model_interaction(optimize=True, plot=True):
X = np.random.randn(20,1)
Y = np.sin(X) + np.random.randn(*X.shape)*0.01 + 5.
k = GPy.kern.rbf(1) + GPy.kern.bias(1)