mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 20:42:39 +02:00
constant jitter to Kmm, deleted some white kernels in models and examples
This commit is contained in:
parent
1cc8f95717
commit
5570e82943
5 changed files with 165 additions and 161 deletions
|
|
@ -50,6 +50,8 @@ class SparseGP(GPBase):
|
|||
if self.has_uncertain_inputs:
|
||||
self.X_variance /= np.square(self._Xscale)
|
||||
|
||||
self._const_jitter = None
|
||||
|
||||
def getstate(self):
|
||||
"""
|
||||
Get the current state of the class,
|
||||
|
|
@ -81,7 +83,10 @@ class SparseGP(GPBase):
|
|||
|
||||
def _computations(self):
|
||||
# factor Kmm
|
||||
self.Lm = jitchol(self.Kmm)
|
||||
if self._const_jitter is None or not(self._const_jitter.shape[0] == self.num_inducing):
|
||||
self._const_jitter = np.eye(self.num_inducing) * 1e-7
|
||||
self.Lm = jitchol(self.Kmm + self._const_jitter)
|
||||
# TODO: no white kernel needed anymore, all noise in likelihood --------
|
||||
|
||||
# The rather complex computations of self.A
|
||||
if self.has_uncertain_inputs:
|
||||
|
|
|
|||
|
|
@ -140,30 +140,32 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False
|
|||
m.optimize('scg', messages=1)
|
||||
return m
|
||||
|
||||
def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=150, plot=False, **k):
|
||||
def BGPLVM_oil(optimize=True, N=200, Q=7, num_inducing=40, max_iters=1000, plot=False, **k):
|
||||
np.random.seed(0)
|
||||
data = GPy.util.datasets.oil()
|
||||
|
||||
# create simple GP model
|
||||
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
|
||||
kernel = GPy.kern.rbf_inv(Q, 1., [.1] * Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2))
|
||||
|
||||
Y = data['X'][:N]
|
||||
Yn = Y - Y.mean(0)
|
||||
Yn /= Yn.std(0)
|
||||
Yn = Gaussian(Y, normalize=True)
|
||||
# Yn = Y - Y.mean(0)
|
||||
# Yn /= Yn.std(0)
|
||||
|
||||
m = GPy.models.BayesianGPLVM(Yn, Q, kernel=kernel, num_inducing=num_inducing, **k)
|
||||
m.data_labels = data['Y'][:N].argmax(axis=1)
|
||||
|
||||
# m.constrain('variance|leng', logexp_clipped())
|
||||
# m['.*lengt'] = m.X.var(0).max() / m.X.var(0)
|
||||
m['noise'] = Yn.var() / 100.
|
||||
m['noise'] = Yn.Y.var() / 100.
|
||||
|
||||
|
||||
# optimize
|
||||
if optimize:
|
||||
# m.constrain_fixed('noise')
|
||||
# m.optimize('scg', messages=1, max_iters=200, gtol=.05)
|
||||
# m.constrain_positive('noise')
|
||||
m.constrain_fixed('noise')
|
||||
m.optimize('scg', messages=1, max_iters=200, gtol=.05)
|
||||
m.constrain_positive('noise')
|
||||
m.constrain_bounded('white', 1e-7, 1)
|
||||
m.optimize('scg', messages=1, max_iters=max_iters, gtol=.05)
|
||||
|
||||
if plot:
|
||||
|
|
@ -271,7 +273,7 @@ def bgplvm_simulation(optimize='scg',
|
|||
max_iters=2e4,
|
||||
plot_sim=False):
|
||||
# from GPy.core.transformations import logexp_clipped
|
||||
D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 300, 30, 6
|
||||
D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 30, 3, 10
|
||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
|
||||
|
||||
from GPy.models import mrd
|
||||
|
|
@ -296,7 +298,7 @@ def bgplvm_simulation(optimize='scg',
|
|||
return m
|
||||
|
||||
def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw):
|
||||
D1, D2, D3, N, num_inducing, Q = 150, 200, 400, 500, 3, 7
|
||||
D1, D2, D3, N, num_inducing, Q = 30, 10, 15, 60, 3, 10
|
||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim)
|
||||
|
||||
likelihood_list = [Gaussian(x, normalize=True) for x in Ylist]
|
||||
|
|
@ -383,7 +385,7 @@ def stick_bgplvm(model=None):
|
|||
m = BayesianGPLVM(data['Y'], Q, init="PCA", num_inducing=20, kernel=kernel)
|
||||
# optimize
|
||||
m.ensure_default_constraints()
|
||||
m.optimize(messages=1, max_iters=3000, xtol=1e-300, ftol=1e-300)
|
||||
m.optimize('scg', messages=1, max_iters=200, xtol=1e-300, ftol=1e-300)
|
||||
m._set_params(m._get_params())
|
||||
plt.clf, (latent_axes, sense_axes) = plt.subplots(1, 2)
|
||||
plt.sca(latent_axes)
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ def toy_ARD_sparse(optim_iters=1000, kernel_type='linear', N=300, D=4):
|
|||
kernel = GPy.kern.rbf_inv(X.shape[1], ARD=1)
|
||||
else:
|
||||
kernel = GPy.kern.rbf(X.shape[1], ARD=1)
|
||||
kernel += GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
kernel += GPy.kern.bias(X.shape[1])
|
||||
X_variance = np.ones(X.shape) * 0.5
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance=X_variance)
|
||||
# len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
|
|
@ -223,13 +223,14 @@ def coregionalisation_sparse(optim_iters=100):
|
|||
|
||||
k1 = GPy.kern.rbf(1)
|
||||
k2 = GPy.kern.coregionalise(2, 2)
|
||||
k = k1.prod(k2,tensor=True) + GPy.kern.white(2,0.001)
|
||||
k = k1.prod(k2, tensor=True) # + GPy.kern.white(2,0.001)
|
||||
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
|
||||
m.constrain_fixed('.*rbf_var', 1.)
|
||||
m.constrain_fixed('iip')
|
||||
m.constrain_bounded('noise_variance', 1e-3, 1e-1)
|
||||
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
|
||||
# m.optimize_restarts(5, robust=True, messages=1, max_iters=optim_iters, optimizer='bfgs')
|
||||
m.optimize('bfgs', messages=1, max_iters=optim_iters)
|
||||
|
||||
# plotting:
|
||||
pb.figure()
|
||||
|
|
@ -307,18 +308,18 @@ def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf):
|
|||
lls = []
|
||||
total_var = np.var(data['Y'])
|
||||
kernel = kernel_call(1, variance=1., lengthscale=1.)
|
||||
Model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel)
|
||||
model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel)
|
||||
for log_SNR in log_SNRs:
|
||||
SNR = 10.**log_SNR
|
||||
noise_var = total_var / (1. + SNR)
|
||||
signal_var = total_var - noise_var
|
||||
Model.kern['.*variance'] = signal_var
|
||||
Model['noise_variance'] = noise_var
|
||||
model.kern['.*variance'] = signal_var
|
||||
model['noise_variance'] = noise_var
|
||||
length_scale_lls = []
|
||||
|
||||
for length_scale in length_scales:
|
||||
Model['.*lengthscale'] = length_scale
|
||||
length_scale_lls.append(Model.log_likelihood())
|
||||
model['.*lengthscale'] = length_scale
|
||||
length_scale_lls.append(model.log_likelihood())
|
||||
|
||||
lls.append(length_scale_lls)
|
||||
|
||||
|
|
@ -331,10 +332,8 @@ def sparse_GP_regression_1D(N = 400, num_inducing = 5, optim_iters=100):
|
|||
Y = np.sin(X) + np.random.randn(N, 1) * 0.05
|
||||
# construct kernel
|
||||
rbf = GPy.kern.rbf(1)
|
||||
noise = GPy.kern.white(1)
|
||||
kernel = rbf + noise
|
||||
# create simple GP Model
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel, num_inducing=num_inducing)
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
|
||||
|
||||
|
||||
m.checkgrad(verbose=1)
|
||||
|
|
@ -349,14 +348,12 @@ def sparse_GP_regression_2D(N = 400, num_inducing = 50, optim_iters=100):
|
|||
|
||||
# construct kernel
|
||||
rbf = GPy.kern.rbf(2)
|
||||
noise = GPy.kern.white(2)
|
||||
kernel = rbf + noise
|
||||
|
||||
# create simple GP Model
|
||||
m = GPy.models.SparseGPRegression(X,Y,kernel, num_inducing = num_inducing)
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=rbf, num_inducing=num_inducing)
|
||||
|
||||
# contrain all parameters to be positive (but not inducing inputs)
|
||||
m.set('.*len',2.)
|
||||
m['.*len'] = 2.
|
||||
|
||||
m.checkgrad()
|
||||
|
||||
|
|
@ -377,7 +374,7 @@ def uncertain_inputs_sparse_regression(optim_iters=100):
|
|||
# likelihood = GPy.likelihoods.Gaussian(Y)
|
||||
Z = np.random.uniform(-3., 3., (7, 1))
|
||||
|
||||
k = GPy.kern.rbf(1) + GPy.kern.white(1)
|
||||
k = GPy.kern.rbf(1)
|
||||
|
||||
# create simple GP Model - no input uncertainty on this one
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
assert Z.shape[1] == X.shape[1]
|
||||
|
||||
if kernel is None:
|
||||
kernel = kern.rbf(input_dim) + kern.white(input_dim)
|
||||
kernel = kern.rbf(input_dim) # + kern.white(input_dim)
|
||||
|
||||
SparseGP.__init__(self, X, likelihood, kernel, Z=Z, X_variance=X_variance, **kwargs)
|
||||
self.ensure_default_constraints()
|
||||
|
|
@ -175,7 +175,7 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
|||
X = np.zeros((resolution ** 2, self.input_dim))
|
||||
indices = np.r_[:X.shape[0]]
|
||||
if labels is None:
|
||||
labels = range(self.input_dim)
|
||||
labels = range(self.output_dim)
|
||||
|
||||
def plot_function(x):
|
||||
X[:, significant_dims] = x
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class SparseGPRegression(SparseGP):
|
|||
def __init__(self, X, Y, kernel=None, normalize_X=False, normalize_Y=False, Z=None, num_inducing=10, X_variance=None):
|
||||
# kern defaults to rbf (plus white for stability)
|
||||
if kernel is None:
|
||||
kernel = kern.rbf(X.shape[1]) + kern.white(X.shape[1], 1e-3)
|
||||
kernel = kern.rbf(X.shape[1]) # + kern.white(X.shape[1], 1e-3)
|
||||
|
||||
# Z defaults to a subset of the data
|
||||
if Z is None:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue