Merge branch 'devel' into params

Conflicts:
	GPy/core/transformations.py
	GPy/kern/parts/kernpart.py
This commit is contained in:
Max Zwiessele 2013-09-20 17:20:46 +01:00
commit c2d217e72c
77 changed files with 3608 additions and 807 deletions

View file

@ -166,3 +166,35 @@ def FITC_crescent_data(num_inducing=10, seed=default_seed):
print(m)
m.plot()
return m
def toy_heaviside(seed=default_seed):
"""
Simple 1D classification example using a heavy side gp transformation
:param seed : seed value for data generation (default is 4).
:type seed: int
"""
data = GPy.util.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
# Model definition
noise_model = GPy.likelihoods.binomial(GPy.likelihoods.noise_models.gp_transformations.Heaviside())
likelihood = GPy.likelihoods.EP(Y,noise_model)
m = GPy.models.GPClassification(data['X'], likelihood=likelihood)
# Optimize
m.update_likelihood_approximation()
# Parameters optimization:
m.optimize()
#m.pseudo_EM()
# Plot
fig, axes = pb.subplots(2,1)
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
print(m)
return m

View file

@ -9,9 +9,9 @@ import pylab as pb
import numpy as np
import GPy
def coregionalisation_toy2(max_iters=100):
def coregionalization_toy2(max_iters=100):
"""
A simple demonstration of coregionalisation on two sinusoidal functions.
A simple demonstration of coregionalization on two sinusoidal functions.
"""
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
@ -22,8 +22,8 @@ def coregionalisation_toy2(max_iters=100):
Y = np.vstack((Y1, Y2))
k1 = GPy.kern.rbf(1) + GPy.kern.bias(1)
k2 = GPy.kern.coregionalise(2, 1)
k = k1**k2
k2 = GPy.kern.coregionalize(2,1)
k = k1**k2 #k = k1.prod(k2,tensor=True)
m = GPy.models.GPRegression(X, Y, kernel=k)
m.constrain_fixed('.*rbf_var', 1.)
# m.constrain_positive('.*kappa')
@ -40,41 +40,32 @@ def coregionalisation_toy2(max_iters=100):
pb.plot(X2[:, 0], Y2[:, 0], 'gx', mew=2)
return m
def coregionalisation_toy(max_iters=100):
def coregionalization_toy(max_iters=100):
"""
A simple demonstration of coregionalisation on two sinusoidal functions.
A simple demonstration of coregionalization on two sinusoidal functions.
"""
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
index = np.vstack((np.zeros_like(X1), np.ones_like(X2)))
X = np.hstack((np.vstack((X1, X2)), index))
X = np.vstack((X1, X2))
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
k1 = GPy.kern.rbf(1)
k2 = GPy.kern.coregionalise(2, 2)
k = k1**k2 #k1.prod(k2, tensor=True)
m = GPy.models.GPRegression(X, Y, kernel=k)
m = GPy.models.GPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1])
m.constrain_fixed('.*rbf_var', 1.)
# m.constrain_positive('kappa')
m.optimize(max_iters=max_iters)
pb.figure()
Xtest1 = np.hstack((np.linspace(0, 9, 100)[:, None], np.zeros((100, 1))))
Xtest2 = np.hstack((np.linspace(0, 9, 100)[:, None], np.ones((100, 1))))
mean, var, low, up = m.predict(Xtest1)
GPy.util.plot.gpplot(Xtest1[:, 0], mean, low, up)
mean, var, low, up = m.predict(Xtest2)
GPy.util.plot.gpplot(Xtest2[:, 0], mean, low, up)
pb.plot(X1[:, 0], Y1[:, 0], 'rx', mew=2)
pb.plot(X2[:, 0], Y2[:, 0], 'gx', mew=2)
fig, axes = pb.subplots(2,1)
m.plot(output=0,ax=axes[0])
m.plot(output=1,ax=axes[1])
axes[0].set_title('Output 0')
axes[1].set_title('Output 1')
return m
def coregionalisation_sparse(max_iters=100):
def coregionalization_sparse(max_iters=100):
"""
A simple demonstration of coregionalisation on two sinusoidal functions using sparse approximations.
A simple demonstration of coregionalization on two sinusoidal functions using sparse approximations.
"""
X1 = np.random.rand(500, 1) * 8
X2 = np.random.rand(300, 1) * 5
@ -84,33 +75,18 @@ def coregionalisation_sparse(max_iters=100):
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
Y = np.vstack((Y1, Y2))
num_inducing = 40
Z = np.hstack((np.random.rand(num_inducing, 1) * 8, np.random.randint(0, 2, num_inducing)[:, None]))
k1 = GPy.kern.rbf(1)
k2 = GPy.kern.coregionalise(2, 2)
k = k1**k2 #.prod(k2, tensor=True) # + GPy.kern.white(2,0.001)
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
m.constrain_fixed('.*rbf_var', 1.)
m.constrain_fixed('iip')
m.constrain_bounded('noise_variance', 1e-3, 1e-1)
# m.optimize_restarts(5, robust=True, messages=1, max_iters=max_iters, optimizer='bfgs')
m.optimize(max_iters=max_iters)
m = GPy.models.SparseGPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1],num_inducing=20)
m.constrain_fixed('.*rbf_var',1.)
m.optimize(messages=1)
#m.optimize_restarts(5, robust=True, messages=1, max_iters=max_iters, optimizer='bfgs')
# plotting:
pb.figure()
Xtest1 = np.hstack((np.linspace(0, 9, 100)[:, None], np.zeros((100, 1))))
Xtest2 = np.hstack((np.linspace(0, 9, 100)[:, None], np.ones((100, 1))))
mean, var, low, up = m.predict(Xtest1)
GPy.util.plot.gpplot(Xtest1[:, 0], mean, low, up)
mean, var, low, up = m.predict(Xtest2)
GPy.util.plot.gpplot(Xtest2[:, 0], mean, low, up)
pb.plot(X1[:, 0], Y1[:, 0], 'rx', mew=2)
pb.plot(X2[:, 0], Y2[:, 0], 'gx', mew=2)
y = pb.ylim()[0]
pb.plot(Z[:, 0][Z[:, 1] == 0], np.zeros(np.sum(Z[:, 1] == 0)) + y, 'r|', mew=2)
pb.plot(Z[:, 0][Z[:, 1] == 1], np.zeros(np.sum(Z[:, 1] == 1)) + y, 'g|', mew=2)
fig, axes = pb.subplots(2,1)
m.plot(output=0,ax=axes[0])
m.plot(output=1,ax=axes[1])
axes[0].set_title('Output 0')
axes[1].set_title('Output 1')
return m
def epomeo_gpx(max_iters=100):
@ -136,8 +112,8 @@ def epomeo_gpx(max_iters=100):
np.random.randint(0, 4, num_inducing)[:, None]))
k1 = GPy.kern.rbf(1)
k2 = GPy.kern.coregionalise(output_dim=5, rank=5)
k = k1**k2
k2 = GPy.kern.coregionalize(output_dim=5, rank=5)
k = k1**k2
m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True)
m.constrain_fixed('.*rbf_var', 1.)
@ -401,8 +377,6 @@ def silhouette(max_iters=100):
print(m)
return m
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100):
"""Run a 1D example of a sparse GP regression."""
# sample inputs and outputs