Additions to week2 MLAI.

This commit is contained in:
Neil Lawrence 2014-10-16 16:38:24 +01:00
parent fb3946aa0a
commit 765e808c2b
3 changed files with 62 additions and 47 deletions

View file

@ -6,6 +6,7 @@
Gaussian Processes classification Gaussian Processes classification
""" """
import GPy import GPy
import pods
try: try:
import pylab as pb import pylab as pb
@ -19,7 +20,7 @@ def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
Run a Gaussian process classification on the three phase oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood. Run a Gaussian process classification on the three phase oil data. The demonstration calls the basic GP classification model and uses EP to approximate the likelihood.
""" """
data = GPy.util.datasets.oil() data = pods.datasets.oil()
X = data['X'] X = data['X']
Xtest = data['Xtest'] Xtest = data['Xtest']
Y = data['Y'][:, 0:1] Y = data['Y'][:, 0:1]
@ -54,7 +55,7 @@ def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True):
""" """
data = GPy.util.datasets.toy_linear_1d_classification(seed=seed) data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1] Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0 Y[Y.flatten() == -1] = 0
@ -87,7 +88,7 @@ def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=
""" """
data = GPy.util.datasets.toy_linear_1d_classification(seed=seed) data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1] Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0 Y[Y.flatten() == -1] = 0
@ -123,7 +124,7 @@ def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, opti
""" """
data = GPy.util.datasets.toy_linear_1d_classification(seed=seed) data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1] Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0 Y[Y.flatten() == -1] = 0
@ -153,7 +154,7 @@ def toy_heaviside(seed=default_seed, optimize=True, plot=True):
""" """
data = GPy.util.datasets.toy_linear_1d_classification(seed=seed) data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1] Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0 Y[Y.flatten() == -1] = 0
@ -190,7 +191,7 @@ def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=
:param kernel: kernel to use in the model :param kernel: kernel to use in the model
:type kernel: a GPy kernel :type kernel: a GPy kernel
""" """
data = GPy.util.datasets.crescent_data(seed=seed) data = pods.datasets.crescent_data(seed=seed)
Y = data['Y'] Y = data['Y']
Y[Y.flatten()==-1] = 0 Y[Y.flatten()==-1] = 0

View file

@ -1,6 +1,7 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as _np import numpy as _np
#default_seed = _np.random.seed(123344) #default_seed = _np.random.seed(123344)
def bgplvm_test_model(optimize=False, verbose=1, plot=False, output_dim=200, nan=False): def bgplvm_test_model(optimize=False, verbose=1, plot=False, output_dim=200, nan=False):
@ -68,7 +69,8 @@ def bgplvm_test_model(optimize=False, verbose=1, plot=False, output_dim=200, nan
def gplvm_oil_100(optimize=True, verbose=1, plot=True): def gplvm_oil_100(optimize=True, verbose=1, plot=True):
import GPy import GPy
data = GPy.util.datasets.oil_100() import pods
data = pods.datasets.oil_100()
Y = data['X'] Y = data['X']
# create simple GP model # create simple GP model
kernel = GPy.kern.RBF(6, ARD=True) + GPy.kern.Bias(6) kernel = GPy.kern.RBF(6, ARD=True) + GPy.kern.Bias(6)
@ -80,8 +82,10 @@ def gplvm_oil_100(optimize=True, verbose=1, plot=True):
def sparse_gplvm_oil(optimize=True, verbose=0, plot=True, N=100, Q=6, num_inducing=15, max_iters=50): def sparse_gplvm_oil(optimize=True, verbose=0, plot=True, N=100, Q=6, num_inducing=15, max_iters=50):
import GPy import GPy
import pods
_np.random.seed(0) _np.random.seed(0)
data = GPy.util.datasets.oil() data = pods.datasets.oil()
Y = data['X'][:N] Y = data['X'][:N]
Y = Y - Y.mean(0) Y = Y - Y.mean(0)
Y /= Y.std(0) Y /= Y.std(0)
@ -98,7 +102,7 @@ def sparse_gplvm_oil(optimize=True, verbose=0, plot=True, N=100, Q=6, num_induci
def swiss_roll(optimize=True, verbose=1, plot=True, N=1000, num_inducing=25, Q=4, sigma=.2): def swiss_roll(optimize=True, verbose=1, plot=True, N=1000, num_inducing=25, Q=4, sigma=.2):
import GPy import GPy
from GPy.util.datasets import swiss_roll_generated from pods.datasets import swiss_roll_generated
from GPy.models import BayesianGPLVM from GPy.models import BayesianGPLVM
data = swiss_roll_generated(num_samples=N, sigma=sigma) data = swiss_roll_generated(num_samples=N, sigma=sigma)
@ -157,9 +161,10 @@ def bgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40,
import GPy import GPy
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
import numpy as np import numpy as np
import pods
_np.random.seed(0) _np.random.seed(0)
data = GPy.util.datasets.oil() data = pods.datasets.oil()
kernel = GPy.kern.RBF(Q, 1., 1./_np.random.uniform(0,1,(Q,)), ARD=True)# + GPy.kern.Bias(Q, _np.exp(-2)) kernel = GPy.kern.RBF(Q, 1., 1./_np.random.uniform(0,1,(Q,)), ARD=True)# + GPy.kern.Bias(Q, _np.exp(-2))
Y = data['X'][:N] Y = data['X'][:N]
@ -182,9 +187,10 @@ def bgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40,
def ssgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40, max_iters=1000, **k): def ssgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40, max_iters=1000, **k):
import GPy import GPy
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
import pods
_np.random.seed(0) _np.random.seed(0)
data = GPy.util.datasets.oil() data = pods.datasets.oil()
kernel = GPy.kern.RBF(Q, 1., 1./_np.random.uniform(0,1,(Q,)), ARD=True)# + GPy.kern.Bias(Q, _np.exp(-2)) kernel = GPy.kern.RBF(Q, 1., 1./_np.random.uniform(0,1,(Q,)), ARD=True)# + GPy.kern.Bias(Q, _np.exp(-2))
Y = data['X'][:N] Y = data['X'][:N]
@ -441,8 +447,9 @@ def mrd_simulation_missing_data(optimize=True, verbose=True, plot=True, plot_sim
def brendan_faces(optimize=True, verbose=True, plot=True): def brendan_faces(optimize=True, verbose=True, plot=True):
import GPy import GPy
import pods
data = GPy.util.datasets.brendan_faces() data = pods.datasets.brendan_faces()
Q = 2 Q = 2
Y = data['Y'] Y = data['Y']
Yn = Y - Y.mean() Yn = Y - Y.mean()
@ -465,8 +472,9 @@ def brendan_faces(optimize=True, verbose=True, plot=True):
def olivetti_faces(optimize=True, verbose=True, plot=True): def olivetti_faces(optimize=True, verbose=True, plot=True):
import GPy import GPy
import pods
data = GPy.util.datasets.olivetti_faces() data = pods.datasets.olivetti_faces()
Q = 2 Q = 2
Y = data['Y'] Y = data['Y']
Yn = Y - Y.mean() Yn = Y - Y.mean()
@ -486,7 +494,9 @@ def olivetti_faces(optimize=True, verbose=True, plot=True):
def stick_play(range=None, frame_rate=15, optimize=False, verbose=True, plot=True): def stick_play(range=None, frame_rate=15, optimize=False, verbose=True, plot=True):
import GPy import GPy
data = GPy.util.datasets.osu_run1() import pods
data = pods.datasets.osu_run1()
# optimize # optimize
if range == None: if range == None:
Y = data['Y'].copy() Y = data['Y'].copy()
@ -501,8 +511,9 @@ def stick_play(range=None, frame_rate=15, optimize=False, verbose=True, plot=Tru
def stick(kernel=None, optimize=True, verbose=True, plot=True): def stick(kernel=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
import GPy import GPy
import pods
data = GPy.util.datasets.osu_run1() data = pods.datasets.osu_run1()
# optimize # optimize
m = GPy.models.GPLVM(data['Y'], 2, kernel=kernel) m = GPy.models.GPLVM(data['Y'], 2, kernel=kernel)
if optimize: m.optimize('bfgs', messages=verbose, max_f_eval=10000) if optimize: m.optimize('bfgs', messages=verbose, max_f_eval=10000)
@ -520,8 +531,9 @@ def stick(kernel=None, optimize=True, verbose=True, plot=True):
def bcgplvm_linear_stick(kernel=None, optimize=True, verbose=True, plot=True): def bcgplvm_linear_stick(kernel=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
import GPy import GPy
import pods
data = GPy.util.datasets.osu_run1() data = pods.datasets.osu_run1()
# optimize # optimize
mapping = GPy.mappings.Linear(data['Y'].shape[1], 2) mapping = GPy.mappings.Linear(data['Y'].shape[1], 2)
m = GPy.models.BCGPLVM(data['Y'], 2, kernel=kernel, mapping=mapping) m = GPy.models.BCGPLVM(data['Y'], 2, kernel=kernel, mapping=mapping)
@ -539,8 +551,9 @@ def bcgplvm_linear_stick(kernel=None, optimize=True, verbose=True, plot=True):
def bcgplvm_stick(kernel=None, optimize=True, verbose=True, plot=True): def bcgplvm_stick(kernel=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
import GPy import GPy
import pods
data = GPy.util.datasets.osu_run1() data = pods.datasets.osu_run1()
# optimize # optimize
back_kernel=GPy.kern.RBF(data['Y'].shape[1], lengthscale=5.) back_kernel=GPy.kern.RBF(data['Y'].shape[1], lengthscale=5.)
mapping = GPy.mappings.Kernel(X=data['Y'], output_dim=2, kernel=back_kernel) mapping = GPy.mappings.Kernel(X=data['Y'], output_dim=2, kernel=back_kernel)
@ -559,8 +572,9 @@ def bcgplvm_stick(kernel=None, optimize=True, verbose=True, plot=True):
def robot_wireless(optimize=True, verbose=True, plot=True): def robot_wireless(optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
import GPy import GPy
import pods
data = GPy.util.datasets.robot_wireless() data = pods.datasets.robot_wireless()
# optimize # optimize
m = GPy.models.BayesianGPLVM(data['Y'], 4, num_inducing=25) m = GPy.models.BayesianGPLVM(data['Y'], 4, num_inducing=25)
if optimize: m.optimize(messages=verbose, max_f_eval=10000) if optimize: m.optimize(messages=verbose, max_f_eval=10000)
@ -574,8 +588,9 @@ def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
import numpy as np import numpy as np
import GPy import GPy
import pods
data = GPy.util.datasets.osu_run1() data = pods.datasets.osu_run1()
Q = 6 Q = 6
kernel = GPy.kern.RBF(Q, lengthscale=np.repeat(.5, Q), ARD=True) kernel = GPy.kern.RBF(Q, lengthscale=np.repeat(.5, Q), ARD=True)
m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel) m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)
@ -605,8 +620,9 @@ def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
def cmu_mocap(subject='35', motion=['01'], in_place=True, optimize=True, verbose=True, plot=True): def cmu_mocap(subject='35', motion=['01'], in_place=True, optimize=True, verbose=True, plot=True):
import GPy import GPy
import pods
data = GPy.util.datasets.cmu_mocap(subject, motion) data = pods.datasets.cmu_mocap(subject, motion)
if in_place: if in_place:
# Make figure move in place. # Make figure move in place.
data['Y'][:, 0:3] = 0.0 data['Y'][:, 0:3] = 0.0

View file

@ -10,10 +10,11 @@ except:
pass pass
import numpy as np import numpy as np
import GPy import GPy
import pods
def olympic_marathon_men(optimize=True, plot=True): def olympic_marathon_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Olympic marathon data.""" """Run a standard Gaussian process regression on the Olympic marathon data."""
data = GPy.util.datasets.olympic_marathon_men() data = pods.datasets.olympic_marathon_men()
# create simple GP Model # create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y']) m = GPy.models.GPRegression(data['X'], data['Y'])
@ -82,7 +83,7 @@ def epomeo_gpx(max_iters=200, optimize=True, plot=True):
from the Mount Epomeo runs. Requires gpxpy to be installed on your system from the Mount Epomeo runs. Requires gpxpy to be installed on your system
to load in the data. to load in the data.
""" """
data = GPy.util.datasets.epomeo_gpx() data = pods.datasets.epomeo_gpx()
num_data_list = [] num_data_list = []
for Xpart in data['X']: for Xpart in data['X']:
num_data_list.append(Xpart.shape[0]) num_data_list.append(Xpart.shape[0])
@ -107,9 +108,9 @@ def epomeo_gpx(max_iters=200, optimize=True, plot=True):
k = k1**k2 k = k1**k2
m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True) m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True)
m.constrain_fixed('.*rbf_var', 1.) m.constrain_fixed('.*variance', 1.)
m.constrain_fixed('iip') m.inducing_inputs.constrain_fixed()
m.constrain_bounded('noise_variance', 1e-3, 1e-1) m.Gaussian_noise.variance.constrain_bounded(1e-3, 1e-1)
m.optimize(max_iters=max_iters,messages=True) m.optimize(max_iters=max_iters,messages=True)
return m return m
@ -125,7 +126,7 @@ def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=1000
length_scales = np.linspace(0.1, 60., resolution) length_scales = np.linspace(0.1, 60., resolution)
log_SNRs = np.linspace(-3., 4., resolution) log_SNRs = np.linspace(-3., 4., resolution)
data = GPy.util.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number) data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number)
# data['Y'] = data['Y'][0::2, :] # data['Y'] = data['Y'][0::2, :]
# data['X'] = data['X'][0::2, :] # data['X'] = data['X'][0::2, :]
@ -151,16 +152,16 @@ def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=1000
kern = GPy.kern.RBF(1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50)) kern = GPy.kern.RBF(1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50))
m = GPy.models.GPRegression(data['X'], data['Y'], kernel=kern) m = GPy.models.GPRegression(data['X'], data['Y'], kernel=kern)
m['noise_variance'] = np.random.uniform(1e-3, 1) m.Gaussian_noise.variance = np.random.uniform(1e-3, 1)
optim_point_x[0] = m['rbf_lengthscale'] optim_point_x[0] = m.rbf.lengthscale
optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']); optim_point_y[0] = np.log10(m.rbf.variance) - np.log10(m.Gaussian_noise.variance);
# optimize # optimize
if optimize: if optimize:
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters) m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
optim_point_x[1] = m['rbf_lengthscale'] optim_point_x[1] = m.rbf.lengthscale
optim_point_y[1] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']); optim_point_y[1] = np.log10(m.rbf.variance) - np.log10(m.Gaussian_noise.variance);
if plot: if plot:
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k') pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
@ -191,7 +192,7 @@ def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.RBF):
noise_var = total_var / (1. + SNR) noise_var = total_var / (1. + SNR)
signal_var = total_var - noise_var signal_var = total_var - noise_var
model.kern['.*variance'] = signal_var model.kern['.*variance'] = signal_var
model['noise_variance'] = noise_var model.Gaussian_noise.variance = noise_var
length_scale_lls = [] length_scale_lls = []
for length_scale in length_scales: for length_scale in length_scales:
@ -205,13 +206,13 @@ def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.RBF):
def olympic_100m_men(optimize=True, plot=True): def olympic_100m_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data.""" """Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
data = GPy.util.datasets.olympic_100m_men() data = pods.datasets.olympic_100m_men()
# create simple GP Model # create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y']) m = GPy.models.GPRegression(data['X'], data['Y'])
# set the lengthscale to be something sensible (defaults to 1) # set the lengthscale to be something sensible (defaults to 1)
m['rbf_lengthscale'] = 10 m.rbf.lengthscale = 10
if optimize: if optimize:
m.optimize('bfgs', max_iters=200) m.optimize('bfgs', max_iters=200)
@ -222,7 +223,7 @@ def olympic_100m_men(optimize=True, plot=True):
def toy_rbf_1d(optimize=True, plot=True): def toy_rbf_1d(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
data = GPy.util.datasets.toy_rbf_1d() data = pods.datasets.toy_rbf_1d()
# create simple GP Model # create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y']) m = GPy.models.GPRegression(data['X'], data['Y'])
@ -236,7 +237,7 @@ def toy_rbf_1d(optimize=True, plot=True):
def toy_rbf_1d_50(optimize=True, plot=True): def toy_rbf_1d_50(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
data = GPy.util.datasets.toy_rbf_1d_50() data = pods.datasets.toy_rbf_1d_50()
# create simple GP Model # create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y']) m = GPy.models.GPRegression(data['X'], data['Y'])
@ -303,12 +304,11 @@ def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize
# m.set_prior('.*lengthscale',len_prior) # m.set_prior('.*lengthscale',len_prior)
if optimize: if optimize:
m.optimize(optimizer='scg', max_iters=max_iters, messages=1) m.optimize(optimizer='scg', max_iters=max_iters)
if plot: if plot:
m.kern.plot_ARD() m.kern.plot_ARD()
print m
return m return m
def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True): def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
@ -343,24 +343,23 @@ def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, o
# m.set_prior('.*lengthscale',len_prior) # m.set_prior('.*lengthscale',len_prior)
if optimize: if optimize:
m.optimize(optimizer='scg', max_iters=max_iters, messages=1) m.optimize(optimizer='scg', max_iters=max_iters)
if plot: if plot:
m.kern.plot_ARD() m.kern.plot_ARD()
print m
return m return m
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True): def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings.""" """Predict the location of a robot given wirelss signal strength readings."""
data = GPy.util.datasets.robot_wireless() data = pods.datasets.robot_wireless()
# create simple GP Model # create simple GP Model
m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel) m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)
# optimize # optimize
if optimize: if optimize:
m.optimize(messages=True, max_iters=max_iters) m.optimize(max_iters=max_iters)
Xpredict = m.predict(data['Ytest'])[0] Xpredict = m.predict(data['Ytest'])[0]
if plot: if plot:
@ -372,13 +371,12 @@ def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
sse = ((data['Xtest'] - Xpredict)**2).sum() sse = ((data['Xtest'] - Xpredict)**2).sum()
print m
print('Sum of squares error on test data: ' + str(sse)) print('Sum of squares error on test data: ' + str(sse))
return m return m
def silhouette(max_iters=100, optimize=True, plot=True): def silhouette(max_iters=100, optimize=True, plot=True):
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper.""" """Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
data = GPy.util.datasets.silhouette() data = pods.datasets.silhouette()
# create simple GP Model # create simple GP Model
m = GPy.models.GPRegression(data['X'], data['Y']) m = GPy.models.GPRegression(data['X'], data['Y'])
@ -390,7 +388,7 @@ def silhouette(max_iters=100, optimize=True, plot=True):
print m print m
return m return m
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True, checkgrad=True): def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True, checkgrad=False):
"""Run a 1D example of a sparse GP regression.""" """Run a 1D example of a sparse GP regression."""
# sample inputs and outputs # sample inputs and outputs
X = np.random.uniform(-3., 3., (num_samples, 1)) X = np.random.uniform(-3., 3., (num_samples, 1))
@ -401,10 +399,10 @@ def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, opti
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing) m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
if checkgrad: if checkgrad:
m.checkgrad(verbose=1) m.checkgrad()
if optimize: if optimize:
m.optimize('tnc', messages=1, max_iters=max_iters) m.optimize('tnc', max_iters=max_iters)
if plot: if plot:
m.plot() m.plot()