mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-30 15:26:23 +02:00
an assortment of fixes
This commit is contained in:
parent
7040b26f41
commit
56a4bc4e21
5 changed files with 13 additions and 19 deletions
|
|
@ -224,14 +224,10 @@ class model(parameterised):
|
||||||
for s in positive_strings:
|
for s in positive_strings:
|
||||||
for i in self.grep_param_names(".*"+s):
|
for i in self.grep_param_names(".*"+s):
|
||||||
if not (i in currently_constrained):
|
if not (i in currently_constrained):
|
||||||
#to_make_positive.append(re.escape(param_names[i]))
|
|
||||||
to_make_positive.append(i)
|
to_make_positive.append(i)
|
||||||
if len(to_make_positive):
|
if len(to_make_positive):
|
||||||
#self.constrain_positive('(' + '|'.join(to_make_positive) + ')')
|
|
||||||
self.constrain_positive(np.asarray(to_make_positive))
|
self.constrain_positive(np.asarray(to_make_positive))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def objective_function(self, x):
|
def objective_function(self, x):
|
||||||
"""
|
"""
|
||||||
The objective function passed to the optimizer. It combines the likelihood and the priors.
|
The objective function passed to the optimizer. It combines the likelihood and the priors.
|
||||||
|
|
|
||||||
|
|
@ -142,17 +142,17 @@ class SparseGP(GPBase):
|
||||||
def log_likelihood(self):
|
def log_likelihood(self):
|
||||||
""" Compute the (lower bound on the) log marginal likelihood """
|
""" Compute the (lower bound on the) log marginal likelihood """
|
||||||
if self.likelihood.is_heteroscedastic:
|
if self.likelihood.is_heteroscedastic:
|
||||||
A = -0.5 * self.N * self.input_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
|
A = -0.5 * self.N * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
|
||||||
B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
|
B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
|
||||||
else:
|
else:
|
||||||
A = -0.5 * self.N * self.input_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
A = -0.5 * self.N * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
|
||||||
B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A))
|
B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A))
|
||||||
C = -self.input_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))
|
C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))
|
||||||
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
|
||||||
return A + B + C + D + self.likelihood.Z
|
return A + B + C + D + self.likelihood.Z
|
||||||
|
|
||||||
def _set_params(self, p):
|
def _set_params(self, p):
|
||||||
self.Z = p[:self.num_inducing * self.input_dim].reshape(self.num_inducing, self.input_dim)
|
self.Z = p[:self.num_inducing * self.output_dim].reshape(self.num_inducing, self.input_dim)
|
||||||
self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam])
|
self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam])
|
||||||
self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:])
|
self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:])
|
||||||
self._compute_kernel_matrices()
|
self._compute_kernel_matrices()
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,6 @@ import numpy as np
|
||||||
from matplotlib import pyplot as plt
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
import GPy
|
import GPy
|
||||||
from GPy.util.datasets import swiss_roll_generated
|
|
||||||
from GPy.core.transformations import logexp
|
from GPy.core.transformations import logexp
|
||||||
from GPy.models.bayesian_gplvm import BayesianGPLVM
|
from GPy.models.bayesian_gplvm import BayesianGPLVM
|
||||||
|
|
||||||
|
|
@ -64,7 +63,7 @@ def GPLVM_oil_100(optimize=True):
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False):
|
def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False):
|
||||||
from GPy.util.datasets import swiss_roll
|
from GPy.util.datasets import swiss_roll_generated
|
||||||
from GPy.core.transformations import logexp_clipped
|
from GPy.core.transformations import logexp_clipped
|
||||||
|
|
||||||
data = swiss_roll_generated(N=N, sigma=sigma)
|
data = swiss_roll_generated(N=N, sigma=sigma)
|
||||||
|
|
@ -109,10 +108,10 @@ def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False):
|
||||||
m.data_colors = c
|
m.data_colors = c
|
||||||
m.data_t = t
|
m.data_t = t
|
||||||
|
|
||||||
m.constrain('variance|length', logexp_clipped())
|
|
||||||
m['lengthscale'] = 1. # X.var(0).max() / X.var(0)
|
|
||||||
m['noise'] = Y.var() / 100.
|
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
|
m['rbf_lengthscale'] = 1. # X.var(0).max() / X.var(0)
|
||||||
|
m['noise_variance'] = Y.var() / 100.
|
||||||
|
m['bias_variance'] = 0.05
|
||||||
|
|
||||||
if optimize:
|
if optimize:
|
||||||
m.optimize('scg', messages=1)
|
m.optimize('scg', messages=1)
|
||||||
|
|
|
||||||
|
|
@ -159,13 +159,13 @@ def coregionalisation_sparse(optim_iters=100):
|
||||||
k = k1.prod(k2,tensor=True) + GPy.kern.white(2,0.001)
|
k = k1.prod(k2,tensor=True) + GPy.kern.white(2,0.001)
|
||||||
|
|
||||||
m = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=Z)
|
m = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=Z)
|
||||||
m.scale_factor = 10000.
|
|
||||||
m.constrain_fixed('.*rbf_var',1.)
|
m.constrain_fixed('.*rbf_var',1.)
|
||||||
#m.constrain_positive('kappa')
|
|
||||||
m.constrain_fixed('iip')
|
m.constrain_fixed('iip')
|
||||||
|
m.constrain_bounded('noise_variance',1e-3,1e-1)
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
|
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
|
||||||
|
|
||||||
|
#plotting:
|
||||||
pb.figure()
|
pb.figure()
|
||||||
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
||||||
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
|
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
|
||||||
|
|
@ -300,7 +300,6 @@ def sparse_GP_regression_2D(N = 400, M = 50, optim_iters=100):
|
||||||
m.checkgrad()
|
m.checkgrad()
|
||||||
|
|
||||||
# optimize and plot
|
# optimize and plot
|
||||||
pb.figure()
|
|
||||||
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
|
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
|
||||||
m.plot()
|
m.plot()
|
||||||
print(m)
|
print(m)
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,7 @@ class Gaussian(likelihood):
|
||||||
x = np.float64(x)
|
x = np.float64(x)
|
||||||
if np.all(self._variance != x):
|
if np.all(self._variance != x):
|
||||||
if x == 0.:
|
if x == 0.:
|
||||||
self.precision = None
|
self.precision = np.inf
|
||||||
self.V = None
|
self.V = None
|
||||||
else:
|
else:
|
||||||
self.precision = 1. / x
|
self.precision = 1. / x
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue