mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-30 23:36:23 +02:00
Changed more examples to accept optimize and plot
This commit is contained in:
parent
0a36d98a71
commit
98074e1e6c
3 changed files with 144 additions and 113 deletions
|
|
@ -263,24 +263,24 @@ def boston_example(optimize=True, plot=True):
|
|||
ax.set_axisbelow(True)
|
||||
return mstu_t
|
||||
|
||||
def precipitation_example():
|
||||
import sklearn
|
||||
from sklearn.cross_validation import KFold
|
||||
data = datasets.boston_housing()
|
||||
X = data['X'].copy()
|
||||
Y = data['Y'].copy()
|
||||
X = X-X.mean(axis=0)
|
||||
X = X/X.std(axis=0)
|
||||
Y = Y-Y.mean()
|
||||
Y = Y/Y.std()
|
||||
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
|
||||
num_folds = 10
|
||||
kf = KFold(len(Y), n_folds=num_folds, indices=True)
|
||||
score_folds = np.zeros((4, num_folds))
|
||||
def rmse(Y, Ystar):
|
||||
return np.sqrt(np.mean((Y-Ystar)**2))
|
||||
#for train, test in kf:
|
||||
for n, (train, test) in enumerate(kf):
|
||||
X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
|
||||
print "Fold {}".format(n)
|
||||
#def precipitation_example():
|
||||
#import sklearn
|
||||
#from sklearn.cross_validation import KFold
|
||||
#data = datasets.boston_housing()
|
||||
#X = data['X'].copy()
|
||||
#Y = data['Y'].copy()
|
||||
#X = X-X.mean(axis=0)
|
||||
#X = X/X.std(axis=0)
|
||||
#Y = Y-Y.mean()
|
||||
#Y = Y/Y.std()
|
||||
#import ipdb; ipdb.set_trace() # XXX BREAKPOINT
|
||||
#num_folds = 10
|
||||
#kf = KFold(len(Y), n_folds=num_folds, indices=True)
|
||||
#score_folds = np.zeros((4, num_folds))
|
||||
#def rmse(Y, Ystar):
|
||||
#return np.sqrt(np.mean((Y-Ystar)**2))
|
||||
##for train, test in kf:
|
||||
#for n, (train, test) in enumerate(kf):
|
||||
#X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
|
||||
#print "Fold {}".format(n)
|
||||
|
||||
|
|
|
|||
|
|
@ -101,9 +101,7 @@ def coregionalization_sparse(optimize=True, plot=True):
|
|||
|
||||
return m
|
||||
|
||||
|
||||
|
||||
def epomeo_gpx(optimize=True, plot=True):
|
||||
def epomeo_gpx(max_iters=200, optimize=True, plot=True):
|
||||
"""
|
||||
Perform Gaussian process regression on the latitude and longitude data
|
||||
from the Mount Epomeo runs. Requires gpxpy to be installed on your system
|
||||
|
|
@ -141,8 +139,7 @@ def epomeo_gpx(optimize=True, plot=True):
|
|||
|
||||
return m
|
||||
|
||||
|
||||
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300):
|
||||
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300, optimize=True, plot=True):
|
||||
"""
|
||||
Show an example of a multimodal error surface for Gaussian process
|
||||
regression. Gene 939 has bimodal behaviour where the noisy mode is
|
||||
|
|
@ -160,13 +157,14 @@ def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=1000
|
|||
data['Y'] = data['Y'] - np.mean(data['Y'])
|
||||
|
||||
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.rbf)
|
||||
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)
|
||||
ax = pb.gca()
|
||||
pb.xlabel('length scale')
|
||||
pb.ylabel('log_10 SNR')
|
||||
if plot:
|
||||
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)
|
||||
ax = pb.gca()
|
||||
pb.xlabel('length scale')
|
||||
pb.ylabel('log_10 SNR')
|
||||
|
||||
xlim = ax.get_xlim()
|
||||
ylim = ax.get_ylim()
|
||||
xlim = ax.get_xlim()
|
||||
ylim = ax.get_ylim()
|
||||
|
||||
# Now run a few optimizations
|
||||
models = []
|
||||
|
|
@ -183,16 +181,19 @@ def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=1000
|
|||
optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
|
||||
|
||||
# optimize
|
||||
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
|
||||
if optimize:
|
||||
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)
|
||||
|
||||
optim_point_x[1] = m['rbf_lengthscale']
|
||||
optim_point_y[1] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
|
||||
|
||||
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
|
||||
if plot:
|
||||
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
|
||||
models.append(m)
|
||||
|
||||
ax.set_xlim(xlim)
|
||||
ax.set_ylim(ylim)
|
||||
if plot:
|
||||
ax.set_xlim(xlim)
|
||||
ax.set_ylim(ylim)
|
||||
return m # (models, lls)
|
||||
|
||||
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf):
|
||||
|
|
@ -295,6 +296,7 @@ def toy_poisson_rbf_1d(optimize=True, plot=True):
|
|||
|
||||
def toy_poisson_rbf_1d_laplace(optimize=True, plot=True):
|
||||
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
|
||||
optimizer='scg'
|
||||
x_len = 30
|
||||
X = np.linspace(0, 10, x_len)[:, None]
|
||||
f_true = np.random.multivariate_normal(np.zeros(x_len), GPy.kern.rbf(1).K(X))
|
||||
|
|
@ -307,7 +309,7 @@ def toy_poisson_rbf_1d_laplace(optimize=True, plot=True):
|
|||
m = GPy.models.GPRegression(X, Y, likelihood=likelihood)
|
||||
|
||||
if optimize:
|
||||
m.optimize(optimizer, max_f_eval=max_nb_eval_optim)
|
||||
m.optimize(optimizer)
|
||||
if plot:
|
||||
m.plot()
|
||||
# plot the real underlying rate function
|
||||
|
|
@ -315,9 +317,7 @@ def toy_poisson_rbf_1d_laplace(optimize=True, plot=True):
|
|||
|
||||
return m
|
||||
|
||||
|
||||
|
||||
def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4):
|
||||
def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
|
||||
# Create an artificial dataset where the values in the targets (Y)
|
||||
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
|
||||
# see if this dependency can be recovered
|
||||
|
|
@ -347,13 +347,16 @@ def toy_ARD(max_iters=1000, kernel_type='linear', num_samples=300, D=4):
|
|||
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
# m.set_prior('.*lengthscale',len_prior)
|
||||
|
||||
m.optimize(optimizer='scg', max_iters=max_iters, messages=1)
|
||||
if optimize:
|
||||
m.optimize(optimizer='scg', max_iters=max_iters, messages=1)
|
||||
|
||||
m.kern.plot_ARD()
|
||||
print(m)
|
||||
if plot:
|
||||
m.kern.plot_ARD()
|
||||
|
||||
print m
|
||||
return m
|
||||
|
||||
def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4):
|
||||
def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4, optimize=True, plot=True):
|
||||
# Create an artificial dataset where the values in the targets (Y)
|
||||
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
|
||||
# see if this dependency can be recovered
|
||||
|
|
@ -384,13 +387,16 @@ def toy_ARD_sparse(max_iters=1000, kernel_type='linear', num_samples=300, D=4):
|
|||
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
# m.set_prior('.*lengthscale',len_prior)
|
||||
|
||||
m.optimize(optimizer='scg', max_iters=max_iters, messages=1)
|
||||
if optimize:
|
||||
m.optimize(optimizer='scg', max_iters=max_iters, messages=1)
|
||||
|
||||
m.kern.plot_ARD()
|
||||
print(m)
|
||||
if plot:
|
||||
m.kern.plot_ARD()
|
||||
|
||||
print m
|
||||
return m
|
||||
|
||||
def robot_wireless(max_iters=100, kernel=None):
|
||||
def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
|
||||
"""Predict the location of a robot given wirelss signal strength readings."""
|
||||
data = GPy.util.datasets.robot_wireless()
|
||||
|
||||
|
|
@ -398,20 +404,24 @@ def robot_wireless(max_iters=100, kernel=None):
|
|||
m = GPy.models.GPRegression(data['Y'], data['X'], kernel=kernel)
|
||||
|
||||
# optimize
|
||||
m.optimize(messages=True, max_iters=max_iters)
|
||||
if optimize:
|
||||
m.optimize(messages=True, max_iters=max_iters)
|
||||
|
||||
Xpredict = m.predict(data['Ytest'])[0]
|
||||
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
|
||||
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
|
||||
pb.axis('equal')
|
||||
pb.title('WiFi Localization with Gaussian Processes')
|
||||
pb.legend(('True Location', 'Predicted Location'))
|
||||
if plot:
|
||||
pb.plot(data['Xtest'][:, 0], data['Xtest'][:, 1], 'r-')
|
||||
pb.plot(Xpredict[:, 0], Xpredict[:, 1], 'b-')
|
||||
pb.axis('equal')
|
||||
pb.title('WiFi Localization with Gaussian Processes')
|
||||
pb.legend(('True Location', 'Predicted Location'))
|
||||
|
||||
sse = ((data['Xtest'] - Xpredict)**2).sum()
|
||||
print(m)
|
||||
|
||||
print m
|
||||
print('Sum of squares error on test data: ' + str(sse))
|
||||
return m
|
||||
|
||||
def silhouette(max_iters=100):
|
||||
def silhouette(max_iters=100, optimize=True, plot=True):
|
||||
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
|
||||
data = GPy.util.datasets.silhouette()
|
||||
|
||||
|
|
@ -419,12 +429,13 @@ def silhouette(max_iters=100):
|
|||
m = GPy.models.GPRegression(data['X'], data['Y'])
|
||||
|
||||
# optimize
|
||||
m.optimize(messages=True, max_iters=max_iters)
|
||||
if optimize:
|
||||
m.optimize(messages=True, max_iters=max_iters)
|
||||
|
||||
print(m)
|
||||
print m
|
||||
return m
|
||||
|
||||
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100):
|
||||
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True):
|
||||
"""Run a 1D example of a sparse GP regression."""
|
||||
# sample inputs and outputs
|
||||
X = np.random.uniform(-3., 3., (num_samples, 1))
|
||||
|
|
@ -433,14 +444,17 @@ def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100):
|
|||
rbf = GPy.kern.rbf(1)
|
||||
# create simple GP Model
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
|
||||
|
||||
|
||||
m.checkgrad(verbose=1)
|
||||
m.optimize('tnc', messages=1, max_iters=max_iters)
|
||||
m.plot()
|
||||
|
||||
if optimize:
|
||||
m.optimize('tnc', messages=1, max_iters=max_iters)
|
||||
|
||||
if plot:
|
||||
m.plot()
|
||||
|
||||
return m
|
||||
|
||||
def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100):
|
||||
def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, optimize=True, plot=True):
|
||||
"""Run a 2D example of a sparse GP regression."""
|
||||
X = np.random.uniform(-3., 3., (num_samples, 2))
|
||||
Y = np.sin(X[:, 0:1]) * np.sin(X[:, 1:2]) + np.random.randn(num_samples, 1) * 0.05
|
||||
|
|
@ -456,13 +470,18 @@ def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100):
|
|||
|
||||
m.checkgrad()
|
||||
|
||||
# optimize and plot
|
||||
m.optimize('tnc', messages=1, max_iters=max_iters)
|
||||
m.plot()
|
||||
print(m)
|
||||
# optimize
|
||||
if optimize:
|
||||
m.optimize('tnc', messages=1, max_iters=max_iters)
|
||||
|
||||
# plot
|
||||
if plot:
|
||||
m.plot()
|
||||
|
||||
print m
|
||||
return m
|
||||
|
||||
def uncertain_inputs_sparse_regression(optimize=True, plot=True):
|
||||
def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
|
||||
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
|
||||
fig, axes = pb.subplots(1, 2, figsize=(12, 5))
|
||||
|
||||
|
|
@ -477,18 +496,23 @@ def uncertain_inputs_sparse_regression(optimize=True, plot=True):
|
|||
|
||||
# create simple GP Model - no input uncertainty on this one
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
|
||||
m.optimize('scg', messages=1, max_iters=max_iters)
|
||||
m.plot(ax=axes[0])
|
||||
axes[0].set_title('no input uncertainty')
|
||||
|
||||
if optimize:
|
||||
m.optimize('scg', messages=1, max_iters=max_iters)
|
||||
|
||||
if plot:
|
||||
m.plot(ax=axes[0])
|
||||
axes[0].set_title('no input uncertainty')
|
||||
print m
|
||||
|
||||
# the same Model with uncertainty
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S)
|
||||
m.optimize('scg', messages=1, max_iters=max_iters)
|
||||
m.plot(ax=axes[1])
|
||||
axes[1].set_title('with input uncertainty')
|
||||
print(m)
|
||||
|
||||
fig.canvas.draw()
|
||||
if optimize:
|
||||
m.optimize('scg', messages=1, max_iters=max_iters)
|
||||
if plot:
|
||||
m.plot(ax=axes[1])
|
||||
axes[1].set_title('with input uncertainty')
|
||||
fig.canvas.draw()
|
||||
|
||||
print m
|
||||
return m
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ pb.ion()
|
|||
import numpy as np
|
||||
import GPy
|
||||
|
||||
def tuto_GP_regression():
|
||||
def tuto_GP_regression(optimize=True, plot=True):
|
||||
"""The detailed explanations of the commands used in this file can be found in the tutorial section"""
|
||||
|
||||
X = np.random.uniform(-3.,3.,(20,1))
|
||||
|
|
@ -22,7 +22,8 @@ def tuto_GP_regression():
|
|||
m = GPy.models.GPRegression(X, Y, kernel)
|
||||
|
||||
print m
|
||||
m.plot()
|
||||
if plot:
|
||||
m.plot()
|
||||
|
||||
m.constrain_positive('')
|
||||
|
||||
|
|
@ -31,9 +32,9 @@ def tuto_GP_regression():
|
|||
m.constrain_bounded('.*lengthscale',1.,10. )
|
||||
m.constrain_fixed('.*noise',0.0025)
|
||||
|
||||
m.optimize()
|
||||
|
||||
m.optimize_restarts(num_restarts = 10)
|
||||
if optimize:
|
||||
m.optimize()
|
||||
m.optimize_restarts(num_restarts = 10)
|
||||
|
||||
#######################################################
|
||||
#######################################################
|
||||
|
|
@ -51,22 +52,26 @@ def tuto_GP_regression():
|
|||
m.constrain_positive('')
|
||||
|
||||
# optimize and plot
|
||||
m.optimize('tnc', max_f_eval = 1000)
|
||||
m.plot()
|
||||
print(m)
|
||||
if optimize:
|
||||
m.optimize('tnc', max_f_eval = 1000)
|
||||
if plot:
|
||||
m.plot()
|
||||
|
||||
print m
|
||||
return(m)
|
||||
|
||||
def tuto_kernel_overview():
|
||||
def tuto_kernel_overview(optimize=True, plot=True):
|
||||
"""The detailed explanations of the commands used in this file can be found in the tutorial section"""
|
||||
ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)
|
||||
ker2 = GPy.kern.rbf(input_dim=1, variance = .75, lengthscale=2.)
|
||||
ker3 = GPy.kern.rbf(1, .5, .5)
|
||||
|
||||
|
||||
print ker2
|
||||
|
||||
ker1.plot()
|
||||
ker2.plot()
|
||||
ker3.plot()
|
||||
if plot:
|
||||
ker1.plot()
|
||||
ker2.plot()
|
||||
ker3.plot()
|
||||
|
||||
k1 = GPy.kern.rbf(1,1.,2.)
|
||||
k2 = GPy.kern.Matern32(1, 0.5, 0.2)
|
||||
|
|
@ -77,8 +82,8 @@ def tuto_kernel_overview():
|
|||
|
||||
# Sum of kernels
|
||||
k_add = k1.add(k2) # By default, tensor=False
|
||||
k_addtens = k1.add(k2,tensor=True)
|
||||
|
||||
k_addtens = k1.add(k2,tensor=True)
|
||||
|
||||
k1 = GPy.kern.rbf(1,1.,2)
|
||||
k2 = GPy.kern.periodic_Matern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5)
|
||||
|
||||
|
|
@ -102,7 +107,7 @@ def tuto_kernel_overview():
|
|||
k.unconstrain('white')
|
||||
k.constrain_bounded('white',lower=1e-5,upper=.5)
|
||||
print k
|
||||
|
||||
|
||||
k_cst = GPy.kern.bias(1,variance=1.)
|
||||
k_mat = GPy.kern.Matern52(1,variance=1., lengthscale=3)
|
||||
Kanova = (k_cst + k_mat).prod(k_cst + k_mat,tensor=True)
|
||||
|
|
@ -114,30 +119,32 @@ def tuto_kernel_overview():
|
|||
|
||||
# Create GP regression model
|
||||
m = GPy.models.GPRegression(X, Y, Kanova)
|
||||
fig = pb.figure(figsize=(5,5))
|
||||
ax = fig.add_subplot(111)
|
||||
m.plot(ax=ax)
|
||||
|
||||
pb.figure(figsize=(20,3))
|
||||
pb.subplots_adjust(wspace=0.5)
|
||||
axs = pb.subplot(1,5,1)
|
||||
m.plot(ax=axs)
|
||||
pb.subplot(1,5,2)
|
||||
pb.ylabel("= ",rotation='horizontal',fontsize='30')
|
||||
axs = pb.subplot(1,5,3)
|
||||
m.plot(ax=axs, which_parts=[False,True,False,False])
|
||||
pb.ylabel("cst +",rotation='horizontal',fontsize='30')
|
||||
axs = pb.subplot(1,5,4)
|
||||
m.plot(ax=axs, which_parts=[False,False,True,False])
|
||||
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
||||
axs = pb.subplot(1,5,5)
|
||||
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
||||
m.plot(ax=axs, which_parts=[False,False,False,True])
|
||||
|
||||
if plot:
|
||||
fig = pb.figure(figsize=(5,5))
|
||||
ax = fig.add_subplot(111)
|
||||
m.plot(ax=ax)
|
||||
|
||||
pb.figure(figsize=(20,3))
|
||||
pb.subplots_adjust(wspace=0.5)
|
||||
axs = pb.subplot(1,5,1)
|
||||
m.plot(ax=axs)
|
||||
pb.subplot(1,5,2)
|
||||
pb.ylabel("= ",rotation='horizontal',fontsize='30')
|
||||
axs = pb.subplot(1,5,3)
|
||||
m.plot(ax=axs, which_parts=[False,True,False,False])
|
||||
pb.ylabel("cst +",rotation='horizontal',fontsize='30')
|
||||
axs = pb.subplot(1,5,4)
|
||||
m.plot(ax=axs, which_parts=[False,False,True,False])
|
||||
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
||||
axs = pb.subplot(1,5,5)
|
||||
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
||||
m.plot(ax=axs, which_parts=[False,False,False,True])
|
||||
|
||||
return(m)
|
||||
|
||||
|
||||
def model_interaction():
|
||||
def model_interaction(optimize=True, plot=True):
|
||||
X = np.random.randn(20,1)
|
||||
Y = np.sin(X) + np.random.randn(*X.shape)*0.01 + 5.
|
||||
k = GPy.kern.rbf(1) + GPy.kern.bias(1)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue