mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 12:32:40 +02:00
constant jitter to Kmm, deleted some white kernels in models and examples
This commit is contained in:
parent
1cc8f95717
commit
5570e82943
5 changed files with 165 additions and 161 deletions
|
|
@ -50,6 +50,8 @@ class SparseGP(GPBase):
|
|||
if self.has_uncertain_inputs:
|
||||
self.X_variance /= np.square(self._Xscale)
|
||||
|
||||
self._const_jitter = None
|
||||
|
||||
def getstate(self):
|
||||
"""
|
||||
Get the current state of the class,
|
||||
|
|
@ -81,7 +83,10 @@ class SparseGP(GPBase):
|
|||
|
||||
def _computations(self):
|
||||
# factor Kmm
|
||||
self.Lm = jitchol(self.Kmm)
|
||||
if self._const_jitter is None or not(self._const_jitter.shape[0] == self.num_inducing):
|
||||
self._const_jitter = np.eye(self.num_inducing) * 1e-7
|
||||
self.Lm = jitchol(self.Kmm + self._const_jitter)
|
||||
# TODO: no white kernel needed anymore, all noise in likelihood --------
|
||||
|
||||
# The rather complex computations of self.A
|
||||
if self.has_uncertain_inputs:
|
||||
|
|
@ -92,7 +97,7 @@ class SparseGP(GPBase):
|
|||
evals, evecs = linalg.eigh(psi2_beta)
|
||||
clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable
|
||||
if not np.array_equal(evals, clipped_evals):
|
||||
pass#print evals
|
||||
pass # print evals
|
||||
tmp = evecs * np.sqrt(clipped_evals)
|
||||
tmp = tmp.T
|
||||
else:
|
||||
|
|
@ -114,7 +119,7 @@ class SparseGP(GPBase):
|
|||
# back substutue C into psi1Vf
|
||||
tmp, info1 = dtrtrs(self.Lm, np.asfortranarray(self.psi1Vf), lower=1, trans=0)
|
||||
self._LBi_Lmi_psi1Vf, _ = dtrtrs(self.LB, np.asfortranarray(tmp), lower=1, trans=0)
|
||||
#tmp, info2 = dpotrs(self.LB, tmp, lower=1)
|
||||
# tmp, info2 = dpotrs(self.LB, tmp, lower=1)
|
||||
tmp, info2 = dtrtrs(self.LB, self._LBi_Lmi_psi1Vf, lower=1, trans=1)
|
||||
self.Cpsi1Vf, info3 = dtrtrs(self.Lm, tmp, lower=1, trans=1)
|
||||
|
||||
|
|
|
|||
|
|
@ -140,30 +140,32 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False
|
|||
m.optimize('scg', messages=1)
|
||||
return m
|
||||
|
||||
def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=150, plot=False, **k):
|
||||
def BGPLVM_oil(optimize=True, N=200, Q=7, num_inducing=40, max_iters=1000, plot=False, **k):
|
||||
np.random.seed(0)
|
||||
data = GPy.util.datasets.oil()
|
||||
|
||||
# create simple GP model
|
||||
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
|
||||
kernel = GPy.kern.rbf_inv(Q, 1., [.1] * Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2))
|
||||
|
||||
Y = data['X'][:N]
|
||||
Yn = Y - Y.mean(0)
|
||||
Yn /= Yn.std(0)
|
||||
Yn = Gaussian(Y, normalize=True)
|
||||
# Yn = Y - Y.mean(0)
|
||||
# Yn /= Yn.std(0)
|
||||
|
||||
m = GPy.models.BayesianGPLVM(Yn, Q, kernel=kernel, num_inducing=num_inducing, **k)
|
||||
m.data_labels = data['Y'][:N].argmax(axis=1)
|
||||
|
||||
# m.constrain('variance|leng', logexp_clipped())
|
||||
# m['.*lengt'] = m.X.var(0).max() / m.X.var(0)
|
||||
m['noise'] = Yn.var() / 100.
|
||||
m['noise'] = Yn.Y.var() / 100.
|
||||
|
||||
|
||||
# optimize
|
||||
if optimize:
|
||||
# m.constrain_fixed('noise')
|
||||
# m.optimize('scg', messages=1, max_iters=200, gtol=.05)
|
||||
# m.constrain_positive('noise')
|
||||
m.constrain_fixed('noise')
|
||||
m.optimize('scg', messages=1, max_iters=200, gtol=.05)
|
||||
m.constrain_positive('noise')
|
||||
m.constrain_bounded('white', 1e-7, 1)
|
||||
m.optimize('scg', messages=1, max_iters=max_iters, gtol=.05)
|
||||
|
||||
if plot:
|
||||
|
|
@ -271,7 +273,7 @@ def bgplvm_simulation(optimize='scg',
|
|||
max_iters=2e4,
|
||||
plot_sim=False):
|
||||
# from GPy.core.transformations import logexp_clipped
|
||||
D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 300, 30, 6
|
||||
D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 30, 3, 10
|
||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
|
||||
|
||||
from GPy.models import mrd
|
||||
|
|
@ -296,7 +298,7 @@ def bgplvm_simulation(optimize='scg',
|
|||
return m
|
||||
|
||||
def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw):
|
||||
D1, D2, D3, N, num_inducing, Q = 150, 200, 400, 500, 3, 7
|
||||
D1, D2, D3, N, num_inducing, Q = 30, 10, 15, 60, 3, 10
|
||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
|
||||
|
||||
likelihood_list = [Gaussian(x, normalize=True) for x in Ylist]
|
||||
|
|
@ -383,7 +385,7 @@ def stick_bgplvm(model=None):
|
|||
m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)
|
||||
# optimize
|
||||
m.ensure_default_constraints()
|
||||
m.optimize(messages=1, max_iters=3000, xtol=1e-300, ftol=1e-300)
|
||||
m.optimize('scg', messages=1, max_iters=200, xtol=1e-300, ftol=1e-300)
|
||||
m._set_params(m._get_params())
|
||||
plt.clf, (latent_axes, sense_axes) = plt.subplots(1, 2)
|
||||
plt.sca(latent_axes)
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ def toy_rbf_1d(optimizer='tnc', max_nb_eval_optim=100):
|
|||
data = GPy.util.datasets.toy_rbf_1d()
|
||||
|
||||
# create simple GP Model
|
||||
m = GPy.models.GPRegression(data['X'],data['Y'])
|
||||
m = GPy.models.GPRegression(data['X'], data['Y'])
|
||||
|
||||
# optimize
|
||||
m.optimize(optimizer, max_f_eval=max_nb_eval_optim)
|
||||
|
|
@ -29,16 +29,16 @@ def rogers_girolami_olympics(optim_iters=100):
|
|||
data = GPy.util.datasets.rogers_girolami_olympics()
|
||||
|
||||
# create simple GP Model
|
||||
m = GPy.models.GPRegression(data['X'],data['Y'])
|
||||
m = GPy.models.GPRegression(data['X'], data['Y'])
|
||||
|
||||
#set the lengthscale to be something sensible (defaults to 1)
|
||||
# set the lengthscale to be something sensible (defaults to 1)
|
||||
m['rbf_lengthscale'] = 10
|
||||
|
||||
# optimize
|
||||
m.optimize(max_f_eval=optim_iters)
|
||||
|
||||
# plot
|
||||
m.plot(plot_limits = (1850, 2050))
|
||||
m.plot(plot_limits=(1850, 2050))
|
||||
print(m)
|
||||
return m
|
||||
|
||||
|
|
@ -47,7 +47,7 @@ def toy_rbf_1d_50(optim_iters=100):
|
|||
data = GPy.util.datasets.toy_rbf_1d_50()
|
||||
|
||||
# create simple GP Model
|
||||
m = GPy.models.GPRegression(data['X'],data['Y'])
|
||||
m = GPy.models.GPRegression(data['X'], data['Y'])
|
||||
|
||||
# optimize
|
||||
m.optimize(max_f_eval=optim_iters)
|
||||
|
|
@ -61,33 +61,33 @@ def toy_ARD(optim_iters=1000, kernel_type='linear', N=300, D=4):
|
|||
# Create an artificial dataset where the values in the targets (Y)
|
||||
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
|
||||
# see if this dependency can be recovered
|
||||
X1 = np.sin(np.sort(np.random.rand(N,1)*10,0))
|
||||
X2 = np.cos(np.sort(np.random.rand(N,1)*10,0))
|
||||
X3 = np.exp(np.sort(np.random.rand(N,1),0))
|
||||
X4 = np.log(np.sort(np.random.rand(N,1),0))
|
||||
X1 = np.sin(np.sort(np.random.rand(N, 1) * 10, 0))
|
||||
X2 = np.cos(np.sort(np.random.rand(N, 1) * 10, 0))
|
||||
X3 = np.exp(np.sort(np.random.rand(N, 1), 0))
|
||||
X4 = np.log(np.sort(np.random.rand(N, 1), 0))
|
||||
X = np.hstack((X1, X2, X3, X4))
|
||||
|
||||
Y1 = np.asarray(2*X[:,0]+3).reshape(-1,1)
|
||||
Y2 = np.asarray(4*(X[:,2]-1.5*X[:,0])).reshape(-1,1)
|
||||
Y1 = np.asarray(2 * X[:, 0] + 3).reshape(-1, 1)
|
||||
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0])).reshape(-1, 1)
|
||||
Y = np.hstack((Y1, Y2))
|
||||
|
||||
Y = np.dot(Y, np.random.rand(2,D));
|
||||
Y = Y + 0.2*np.random.randn(Y.shape[0], Y.shape[1])
|
||||
Y = np.dot(Y, np.random.rand(2, D));
|
||||
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
|
||||
Y -= Y.mean()
|
||||
Y /= Y.std()
|
||||
|
||||
if kernel_type == 'linear':
|
||||
kernel = GPy.kern.linear(X.shape[1], ARD = 1)
|
||||
kernel = GPy.kern.linear(X.shape[1], ARD=1)
|
||||
elif kernel_type == 'rbf_inv':
|
||||
kernel = GPy.kern.rbf_inv(X.shape[1], ARD = 1)
|
||||
kernel = GPy.kern.rbf_inv(X.shape[1], ARD=1)
|
||||
else:
|
||||
kernel = GPy.kern.rbf(X.shape[1], ARD = 1)
|
||||
kernel = GPy.kern.rbf(X.shape[1], ARD=1)
|
||||
kernel += GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
m = GPy.models.GPRegression(X, Y, kernel)
|
||||
#len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
#m.set_prior('.*lengthscale',len_prior)
|
||||
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
# m.set_prior('.*lengthscale',len_prior)
|
||||
|
||||
m.optimize(optimizer = 'scg', max_iters = optim_iters, messages = 1)
|
||||
m.optimize(optimizer='scg', max_iters=optim_iters, messages=1)
|
||||
|
||||
m.kern.plot_ARD()
|
||||
print(m)
|
||||
|
|
@ -97,34 +97,34 @@ def toy_ARD_sparse(optim_iters=1000, kernel_type='linear', N=300, D=4):
|
|||
# Create an artificial dataset where the values in the targets (Y)
|
||||
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
|
||||
# see if this dependency can be recovered
|
||||
X1 = np.sin(np.sort(np.random.rand(N,1)*10,0))
|
||||
X2 = np.cos(np.sort(np.random.rand(N,1)*10,0))
|
||||
X3 = np.exp(np.sort(np.random.rand(N,1),0))
|
||||
X4 = np.log(np.sort(np.random.rand(N,1),0))
|
||||
X1 = np.sin(np.sort(np.random.rand(N, 1) * 10, 0))
|
||||
X2 = np.cos(np.sort(np.random.rand(N, 1) * 10, 0))
|
||||
X3 = np.exp(np.sort(np.random.rand(N, 1), 0))
|
||||
X4 = np.log(np.sort(np.random.rand(N, 1), 0))
|
||||
X = np.hstack((X1, X2, X3, X4))
|
||||
|
||||
Y1 = np.asarray(2*X[:,0]+3)[:,None]
|
||||
Y2 = np.asarray(4*(X[:,2]-1.5*X[:,0]))[:,None]
|
||||
Y1 = np.asarray(2 * X[:, 0] + 3)[:, None]
|
||||
Y2 = np.asarray(4 * (X[:, 2] - 1.5 * X[:, 0]))[:, None]
|
||||
Y = np.hstack((Y1, Y2))
|
||||
|
||||
Y = np.dot(Y, np.random.rand(2,D));
|
||||
Y = Y + 0.2*np.random.randn(Y.shape[0], Y.shape[1])
|
||||
Y = np.dot(Y, np.random.rand(2, D));
|
||||
Y = Y + 0.2 * np.random.randn(Y.shape[0], Y.shape[1])
|
||||
Y -= Y.mean()
|
||||
Y /= Y.std()
|
||||
|
||||
if kernel_type == 'linear':
|
||||
kernel = GPy.kern.linear(X.shape[1], ARD = 1)
|
||||
kernel = GPy.kern.linear(X.shape[1], ARD=1)
|
||||
elif kernel_type == 'rbf_inv':
|
||||
kernel = GPy.kern.rbf_inv(X.shape[1], ARD = 1)
|
||||
kernel = GPy.kern.rbf_inv(X.shape[1], ARD=1)
|
||||
else:
|
||||
kernel = GPy.kern.rbf(X.shape[1], ARD = 1)
|
||||
kernel += GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
X_variance = np.ones(X.shape)*0.5
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance = X_variance)
|
||||
#len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
#m.set_prior('.*lengthscale',len_prior)
|
||||
kernel = GPy.kern.rbf(X.shape[1], ARD=1)
|
||||
kernel += GPy.kern.bias(X.shape[1])
|
||||
X_variance = np.ones(X.shape) * 0.5
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance=X_variance)
|
||||
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
# m.set_prior('.*lengthscale',len_prior)
|
||||
|
||||
m.optimize(optimizer = 'scg', max_iters = optim_iters, messages = 1)
|
||||
m.optimize(optimizer='scg', max_iters=optim_iters, messages=1)
|
||||
|
||||
m.kern.plot_ARD()
|
||||
print(m)
|
||||
|
|
@ -135,10 +135,10 @@ def silhouette(optim_iters=100):
|
|||
data = GPy.util.datasets.silhouette()
|
||||
|
||||
# create simple GP Model
|
||||
m = GPy.models.GPRegression(data['X'],data['Y'])
|
||||
m = GPy.models.GPRegression(data['X'], data['Y'])
|
||||
|
||||
# optimize
|
||||
m.optimize(messages=True,max_f_eval=optim_iters)
|
||||
m.optimize(messages=True, max_f_eval=optim_iters)
|
||||
|
||||
print(m)
|
||||
return m
|
||||
|
|
@ -147,62 +147,62 @@ def coregionalisation_toy2(optim_iters=100):
|
|||
"""
|
||||
A simple demonstration of coregionalisation on two sinusoidal functions.
|
||||
"""
|
||||
X1 = np.random.rand(50,1)*8
|
||||
X2 = np.random.rand(30,1)*5
|
||||
index = np.vstack((np.zeros_like(X1),np.ones_like(X2)))
|
||||
X = np.hstack((np.vstack((X1,X2)),index))
|
||||
Y1 = np.sin(X1) + np.random.randn(*X1.shape)*0.05
|
||||
Y2 = np.sin(X2) + np.random.randn(*X2.shape)*0.05 + 2.
|
||||
Y = np.vstack((Y1,Y2))
|
||||
X1 = np.random.rand(50, 1) * 8
|
||||
X2 = np.random.rand(30, 1) * 5
|
||||
index = np.vstack((np.zeros_like(X1), np.ones_like(X2)))
|
||||
X = np.hstack((np.vstack((X1, X2)), index))
|
||||
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
|
||||
Y2 = np.sin(X2) + np.random.randn(*X2.shape) * 0.05 + 2.
|
||||
Y = np.vstack((Y1, Y2))
|
||||
|
||||
k1 = GPy.kern.rbf(1) + GPy.kern.bias(1)
|
||||
k2 = GPy.kern.coregionalise(2,1)
|
||||
k = k1.prod(k2,tensor=True)
|
||||
m = GPy.models.GPRegression(X,Y,kernel=k)
|
||||
m.constrain_fixed('.*rbf_var',1.)
|
||||
#m.constrain_positive('.*kappa')
|
||||
m.optimize('sim',messages=1,max_f_eval=optim_iters)
|
||||
k2 = GPy.kern.coregionalise(2, 1)
|
||||
k = k1.prod(k2, tensor=True)
|
||||
m = GPy.models.GPRegression(X, Y, kernel=k)
|
||||
m.constrain_fixed('.*rbf_var', 1.)
|
||||
# m.constrain_positive('.*kappa')
|
||||
m.optimize('sim', messages=1, max_f_eval=optim_iters)
|
||||
|
||||
pb.figure()
|
||||
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
||||
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
|
||||
mean, var,low,up = m.predict(Xtest1)
|
||||
GPy.util.plot.gpplot(Xtest1[:,0],mean,low,up)
|
||||
mean, var,low,up = m.predict(Xtest2)
|
||||
GPy.util.plot.gpplot(Xtest2[:,0],mean,low,up)
|
||||
pb.plot(X1[:,0],Y1[:,0],'rx',mew=2)
|
||||
pb.plot(X2[:,0],Y2[:,0],'gx',mew=2)
|
||||
Xtest1 = np.hstack((np.linspace(0, 9, 100)[:, None], np.zeros((100, 1))))
|
||||
Xtest2 = np.hstack((np.linspace(0, 9, 100)[:, None], np.ones((100, 1))))
|
||||
mean, var, low, up = m.predict(Xtest1)
|
||||
GPy.util.plot.gpplot(Xtest1[:, 0], mean, low, up)
|
||||
mean, var, low, up = m.predict(Xtest2)
|
||||
GPy.util.plot.gpplot(Xtest2[:, 0], mean, low, up)
|
||||
pb.plot(X1[:, 0], Y1[:, 0], 'rx', mew=2)
|
||||
pb.plot(X2[:, 0], Y2[:, 0], 'gx', mew=2)
|
||||
return m
|
||||
|
||||
def coregionalisation_toy(optim_iters=100):
|
||||
"""
|
||||
A simple demonstration of coregionalisation on two sinusoidal functions.
|
||||
"""
|
||||
X1 = np.random.rand(50,1)*8
|
||||
X2 = np.random.rand(30,1)*5
|
||||
index = np.vstack((np.zeros_like(X1),np.ones_like(X2)))
|
||||
X = np.hstack((np.vstack((X1,X2)),index))
|
||||
Y1 = np.sin(X1) + np.random.randn(*X1.shape)*0.05
|
||||
Y2 = -np.sin(X2) + np.random.randn(*X2.shape)*0.05
|
||||
Y = np.vstack((Y1,Y2))
|
||||
X1 = np.random.rand(50, 1) * 8
|
||||
X2 = np.random.rand(30, 1) * 5
|
||||
index = np.vstack((np.zeros_like(X1), np.ones_like(X2)))
|
||||
X = np.hstack((np.vstack((X1, X2)), index))
|
||||
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
|
||||
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
|
||||
Y = np.vstack((Y1, Y2))
|
||||
|
||||
k1 = GPy.kern.rbf(1)
|
||||
k2 = GPy.kern.coregionalise(2,2)
|
||||
k = k1.prod(k2,tensor=True)
|
||||
m = GPy.models.GPRegression(X,Y,kernel=k)
|
||||
m.constrain_fixed('.*rbf_var',1.)
|
||||
#m.constrain_positive('kappa')
|
||||
k2 = GPy.kern.coregionalise(2, 2)
|
||||
k = k1.prod(k2, tensor=True)
|
||||
m = GPy.models.GPRegression(X, Y, kernel=k)
|
||||
m.constrain_fixed('.*rbf_var', 1.)
|
||||
# m.constrain_positive('kappa')
|
||||
m.optimize(max_f_eval=optim_iters)
|
||||
|
||||
pb.figure()
|
||||
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
||||
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
|
||||
mean, var,low,up = m.predict(Xtest1)
|
||||
GPy.util.plot.gpplot(Xtest1[:,0],mean,low,up)
|
||||
mean, var,low,up = m.predict(Xtest2)
|
||||
GPy.util.plot.gpplot(Xtest2[:,0],mean,low,up)
|
||||
pb.plot(X1[:,0],Y1[:,0],'rx',mew=2)
|
||||
pb.plot(X2[:,0],Y2[:,0],'gx',mew=2)
|
||||
Xtest1 = np.hstack((np.linspace(0, 9, 100)[:, None], np.zeros((100, 1))))
|
||||
Xtest2 = np.hstack((np.linspace(0, 9, 100)[:, None], np.ones((100, 1))))
|
||||
mean, var, low, up = m.predict(Xtest1)
|
||||
GPy.util.plot.gpplot(Xtest1[:, 0], mean, low, up)
|
||||
mean, var, low, up = m.predict(Xtest2)
|
||||
GPy.util.plot.gpplot(Xtest2[:, 0], mean, low, up)
|
||||
pb.plot(X1[:, 0], Y1[:, 0], 'rx', mew=2)
|
||||
pb.plot(X2[:, 0], Y2[:, 0], 'gx', mew=2)
|
||||
return m
|
||||
|
||||
|
||||
|
|
@ -210,44 +210,45 @@ def coregionalisation_sparse(optim_iters=100):
|
|||
"""
|
||||
A simple demonstration of coregionalisation on two sinusoidal functions using sparse approximations.
|
||||
"""
|
||||
X1 = np.random.rand(500,1)*8
|
||||
X2 = np.random.rand(300,1)*5
|
||||
index = np.vstack((np.zeros_like(X1),np.ones_like(X2)))
|
||||
X = np.hstack((np.vstack((X1,X2)),index))
|
||||
Y1 = np.sin(X1) + np.random.randn(*X1.shape)*0.05
|
||||
Y2 = -np.sin(X2) + np.random.randn(*X2.shape)*0.05
|
||||
Y = np.vstack((Y1,Y2))
|
||||
X1 = np.random.rand(500, 1) * 8
|
||||
X2 = np.random.rand(300, 1) * 5
|
||||
index = np.vstack((np.zeros_like(X1), np.ones_like(X2)))
|
||||
X = np.hstack((np.vstack((X1, X2)), index))
|
||||
Y1 = np.sin(X1) + np.random.randn(*X1.shape) * 0.05
|
||||
Y2 = -np.sin(X2) + np.random.randn(*X2.shape) * 0.05
|
||||
Y = np.vstack((Y1, Y2))
|
||||
|
||||
num_inducing = 40
|
||||
Z = np.hstack((np.random.rand(num_inducing,1)*8,np.random.randint(0,2,num_inducing)[:,None]))
|
||||
Z = np.hstack((np.random.rand(num_inducing, 1) * 8, np.random.randint(0, 2, num_inducing)[:, None]))
|
||||
|
||||
k1 = GPy.kern.rbf(1)
|
||||
k2 = GPy.kern.coregionalise(2,2)
|
||||
k = k1.prod(k2,tensor=True) + GPy.kern.white(2,0.001)
|
||||
k2 = GPy.kern.coregionalise(2, 2)
|
||||
k = k1.prod(k2, tensor=True) # + GPy.kern.white(2,0.001)
|
||||
|
||||
m = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=Z)
|
||||
m.constrain_fixed('.*rbf_var',1.)
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
|
||||
m.constrain_fixed('.*rbf_var', 1.)
|
||||
m.constrain_fixed('iip')
|
||||
m.constrain_bounded('noise_variance',1e-3,1e-1)
|
||||
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
|
||||
m.constrain_bounded('noise_variance', 1e-3, 1e-1)
|
||||
# m.optimize_restarts(5, robust=True, messages=1, max_iters=optim_iters, optimizer='bfgs')
|
||||
m.optimize('bfgs', messages=1, max_iters=optim_iters)
|
||||
|
||||
#plotting:
|
||||
# plotting:
|
||||
pb.figure()
|
||||
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
||||
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
|
||||
mean, var,low,up = m.predict(Xtest1)
|
||||
GPy.util.plot.gpplot(Xtest1[:,0],mean,low,up)
|
||||
mean, var,low,up = m.predict(Xtest2)
|
||||
GPy.util.plot.gpplot(Xtest2[:,0],mean,low,up)
|
||||
pb.plot(X1[:,0],Y1[:,0],'rx',mew=2)
|
||||
pb.plot(X2[:,0],Y2[:,0],'gx',mew=2)
|
||||
Xtest1 = np.hstack((np.linspace(0, 9, 100)[:, None], np.zeros((100, 1))))
|
||||
Xtest2 = np.hstack((np.linspace(0, 9, 100)[:, None], np.ones((100, 1))))
|
||||
mean, var, low, up = m.predict(Xtest1)
|
||||
GPy.util.plot.gpplot(Xtest1[:, 0], mean, low, up)
|
||||
mean, var, low, up = m.predict(Xtest2)
|
||||
GPy.util.plot.gpplot(Xtest2[:, 0], mean, low, up)
|
||||
pb.plot(X1[:, 0], Y1[:, 0], 'rx', mew=2)
|
||||
pb.plot(X2[:, 0], Y2[:, 0], 'gx', mew=2)
|
||||
y = pb.ylim()[0]
|
||||
pb.plot(Z[:,0][Z[:,1]==0],np.zeros(np.sum(Z[:,1]==0))+y,'r|',mew=2)
|
||||
pb.plot(Z[:,0][Z[:,1]==1],np.zeros(np.sum(Z[:,1]==1))+y,'g|',mew=2)
|
||||
pb.plot(Z[:, 0][Z[:, 1] == 0], np.zeros(np.sum(Z[:, 1] == 0)) + y, 'r|', mew=2)
|
||||
pb.plot(Z[:, 0][Z[:, 1] == 1], np.zeros(np.sum(Z[:, 1] == 1)) + y, 'g|', mew=2)
|
||||
return m
|
||||
|
||||
|
||||
def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000, optim_iters=300):
|
||||
def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, optim_iters=300):
|
||||
"""Show an example of a multimodal error surface for Gaussian process regression. Gene 939 has bimodal behaviour where the noisey mode is higher."""
|
||||
|
||||
# Contour over a range of length scales and signal/noise ratios.
|
||||
|
|
@ -255,8 +256,8 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000
|
|||
log_SNRs = np.linspace(-3., 4., resolution)
|
||||
|
||||
data = GPy.util.datasets.della_gatta_TRP63_gene_expression(gene_number)
|
||||
#data['Y'] = data['Y'][0::2, :]
|
||||
#data['X'] = data['X'][0::2, :]
|
||||
# data['Y'] = data['Y'][0::2, :]
|
||||
# data['X'] = data['X'][0::2, :]
|
||||
|
||||
data['Y'] = data['Y'] - np.mean(data['Y'])
|
||||
|
||||
|
|
@ -275,11 +276,11 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000
|
|||
optim_point_y = np.empty(2)
|
||||
np.random.seed(seed=seed)
|
||||
for i in range(0, model_restarts):
|
||||
#kern = GPy.kern.rbf(1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.))
|
||||
kern = GPy.kern.rbf(1, variance=np.random.uniform(1e-3,1), lengthscale=np.random.uniform(5,50))
|
||||
# kern = GPy.kern.rbf(1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.))
|
||||
kern = GPy.kern.rbf(1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50))
|
||||
|
||||
m = GPy.models.GPRegression(data['X'],data['Y'], kernel=kern)
|
||||
m['noise_variance'] = np.random.uniform(1e-3,1)
|
||||
m = GPy.models.GPRegression(data['X'], data['Y'], kernel=kern)
|
||||
m['noise_variance'] = np.random.uniform(1e-3, 1)
|
||||
optim_point_x[0] = m['rbf_lengthscale']
|
||||
optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
|
||||
|
||||
|
|
@ -289,12 +290,12 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000
|
|||
optim_point_x[1] = m['rbf_lengthscale']
|
||||
optim_point_y[1] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
|
||||
|
||||
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1]-optim_point_x[0], optim_point_y[1]-optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
|
||||
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
|
||||
models.append(m)
|
||||
|
||||
ax.set_xlim(xlim)
|
||||
ax.set_ylim(ylim)
|
||||
return m #(models, lls)
|
||||
return m # (models, lls)
|
||||
|
||||
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf):
|
||||
"""Evaluate the GP objective function for a given data set for a range of signal to noise ratios and a range of lengthscales.
|
||||
|
|
@ -307,77 +308,73 @@ def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf):
|
|||
lls = []
|
||||
total_var = np.var(data['Y'])
|
||||
kernel = kernel_call(1, variance=1., lengthscale=1.)
|
||||
Model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel)
|
||||
model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel)
|
||||
for log_SNR in log_SNRs:
|
||||
SNR = 10.**log_SNR
|
||||
noise_var = total_var/(1.+SNR)
|
||||
noise_var = total_var / (1. + SNR)
|
||||
signal_var = total_var - noise_var
|
||||
Model.kern['.*variance'] = signal_var
|
||||
Model['noise_variance'] = noise_var
|
||||
model.kern['.*variance'] = signal_var
|
||||
model['noise_variance'] = noise_var
|
||||
length_scale_lls = []
|
||||
|
||||
for length_scale in length_scales:
|
||||
Model['.*lengthscale'] = length_scale
|
||||
length_scale_lls.append(Model.log_likelihood())
|
||||
model['.*lengthscale'] = length_scale
|
||||
length_scale_lls.append(model.log_likelihood())
|
||||
|
||||
lls.append(length_scale_lls)
|
||||
|
||||
return np.array(lls)
|
||||
|
||||
def sparse_GP_regression_1D(N = 400, num_inducing = 5, optim_iters=100):
|
||||
def sparse_GP_regression_1D(N=400, num_inducing=5, optim_iters=100):
|
||||
"""Run a 1D example of a sparse GP regression."""
|
||||
# sample inputs and outputs
|
||||
X = np.random.uniform(-3.,3.,(N,1))
|
||||
Y = np.sin(X)+np.random.randn(N,1)*0.05
|
||||
X = np.random.uniform(-3., 3., (N, 1))
|
||||
Y = np.sin(X) + np.random.randn(N, 1) * 0.05
|
||||
# construct kernel
|
||||
rbf = GPy.kern.rbf(1)
|
||||
noise = GPy.kern.white(1)
|
||||
kernel = rbf + noise
|
||||
# create simple GP Model
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel, num_inducing=num_inducing)
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
|
||||
|
||||
|
||||
m.checkgrad(verbose=1)
|
||||
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
|
||||
m.optimize('tnc', messages=1, max_f_eval=optim_iters)
|
||||
m.plot()
|
||||
return m
|
||||
|
||||
def sparse_GP_regression_2D(N = 400, num_inducing = 50, optim_iters=100):
|
||||
def sparse_GP_regression_2D(N=400, num_inducing=50, optim_iters=100):
|
||||
"""Run a 2D example of a sparse GP regression."""
|
||||
X = np.random.uniform(-3.,3.,(N,2))
|
||||
Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(N,1)*0.05
|
||||
X = np.random.uniform(-3., 3., (N, 2))
|
||||
Y = np.sin(X[:, 0:1]) * np.sin(X[:, 1:2]) + np.random.randn(N, 1) * 0.05
|
||||
|
||||
# construct kernel
|
||||
rbf = GPy.kern.rbf(2)
|
||||
noise = GPy.kern.white(2)
|
||||
kernel = rbf + noise
|
||||
|
||||
# create simple GP Model
|
||||
m = GPy.models.SparseGPRegression(X,Y,kernel, num_inducing = num_inducing)
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
|
||||
|
||||
# contrain all parameters to be positive (but not inducing inputs)
|
||||
m.set('.*len',2.)
|
||||
m['.*len'] = 2.
|
||||
|
||||
m.checkgrad()
|
||||
|
||||
# optimize and plot
|
||||
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
|
||||
m.optimize('tnc', messages=1, max_f_eval=optim_iters)
|
||||
m.plot()
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def uncertain_inputs_sparse_regression(optim_iters=100):
|
||||
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
|
||||
fig, axes = pb.subplots(1,2,figsize=(12,5))
|
||||
fig, axes = pb.subplots(1, 2, figsize=(12, 5))
|
||||
|
||||
# sample inputs and outputs
|
||||
S = np.ones((20,1))
|
||||
X = np.random.uniform(-3.,3.,(20,1))
|
||||
Y = np.sin(X)+np.random.randn(20,1)*0.05
|
||||
#likelihood = GPy.likelihoods.Gaussian(Y)
|
||||
Z = np.random.uniform(-3.,3.,(7,1))
|
||||
S = np.ones((20, 1))
|
||||
X = np.random.uniform(-3., 3., (20, 1))
|
||||
Y = np.sin(X) + np.random.randn(20, 1) * 0.05
|
||||
# likelihood = GPy.likelihoods.Gaussian(Y)
|
||||
Z = np.random.uniform(-3., 3., (7, 1))
|
||||
|
||||
k = GPy.kern.rbf(1) + GPy.kern.white(1)
|
||||
k = GPy.kern.rbf(1)
|
||||
|
||||
# create simple GP Model - no input uncertainty on this one
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
|
||||
|
|
@ -386,7 +383,7 @@ def uncertain_inputs_sparse_regression(optim_iters=100):
|
|||
axes[0].set_title('no input uncertainty')
|
||||
|
||||
|
||||
#the same Model with uncertainty
|
||||
# the same Model with uncertainty
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S)
|
||||
m.optimize('scg', messages=1, max_f_eval=optim_iters)
|
||||
m.plot(ax=axes[1])
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
assert Z.shape[1] == X.shape[1]
|
||||
|
||||
if kernel is None:
|
||||
kernel = kern.rbf(input_dim) + kern.white(input_dim)
|
||||
kernel = kern.rbf(input_dim) # + kern.white(input_dim)
|
||||
|
||||
SparseGP.__init__(self, X, likelihood, kernel, Z=Z, X_variance=X_variance, **kwargs)
|
||||
self.ensure_default_constraints()
|
||||
|
|
@ -175,7 +175,7 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
X = np.zeros((resolution ** 2, self.input_dim))
|
||||
indices = np.r_[:X.shape[0]]
|
||||
if labels is None:
|
||||
labels = range(self.input_dim)
|
||||
labels = range(self.output_dim)
|
||||
|
||||
def plot_function(x):
|
||||
X[:, significant_dims] = x
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class SparseGPRegression(SparseGP):
|
|||
def __init__(self, X, Y, kernel=None, normalize_X=False, normalize_Y=False, Z=None, num_inducing=10, X_variance=None):
|
||||
# kern defaults to rbf (plus white for stability)
|
||||
if kernel is None:
|
||||
kernel = kern.rbf(X.shape[1]) + kern.white(X.shape[1], 1e-3)
|
||||
kernel = kern.rbf(X.shape[1]) # + kern.white(X.shape[1], 1e-3)
|
||||
|
||||
# Z defaults to a subset of the data
|
||||
if Z is None:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue