an assortment of fixes

This commit is contained in:
James Hensman 2013-06-05 14:52:37 +01:00
parent 7040b26f41
commit 56a4bc4e21
5 changed files with 13 additions and 19 deletions

View file

@ -224,14 +224,10 @@ class model(parameterised):
for s in positive_strings:
for i in self.grep_param_names(".*"+s):
if not (i in currently_constrained):
#to_make_positive.append(re.escape(param_names[i]))
to_make_positive.append(i)
if len(to_make_positive):
#self.constrain_positive('(' + '|'.join(to_make_positive) + ')')
self.constrain_positive(np.asarray(to_make_positive))
def objective_function(self, x):
"""
The objective function passed to the optimizer. It combines the likelihood and the priors.

View file

@ -142,17 +142,17 @@ class SparseGP(GPBase):
def log_likelihood(self):
""" Compute the (lower bound on the) log marginal likelihood """
if self.likelihood.is_heteroscedastic:
A = -0.5 * self.N * self.input_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
A = -0.5 * self.N * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y)
B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A))
else:
A = -0.5 * self.N * self.input_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A))
C = -self.input_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))
A = -0.5 * self.N * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT
B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A))
C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2))
D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V))
return A + B + C + D + self.likelihood.Z
def _set_params(self, p):
self.Z = p[:self.num_inducing * self.input_dim].reshape(self.num_inducing, self.input_dim)
self.Z = p[:self.num_inducing * self.output_dim].reshape(self.num_inducing, self.input_dim)
self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam])
self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:])
self._compute_kernel_matrices()

View file

@ -5,7 +5,6 @@ import numpy as np
from matplotlib import pyplot as plt
import GPy
from GPy.util.datasets import swiss_roll_generated
from GPy.core.transformations import logexp
from GPy.models.bayesian_gplvm import BayesianGPLVM
@ -64,7 +63,7 @@ def GPLVM_oil_100(optimize=True):
return m
def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False):
from GPy.util.datasets import swiss_roll
from GPy.util.datasets import swiss_roll_generated
from GPy.core.transformations import logexp_clipped
data = swiss_roll_generated(N=N, sigma=sigma)
@ -109,10 +108,10 @@ def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False):
m.data_colors = c
m.data_t = t
m.constrain('variance|length', logexp_clipped())
m['lengthscale'] = 1. # X.var(0).max() / X.var(0)
m['noise'] = Y.var() / 100.
m.ensure_default_constraints()
m['rbf_lengthscale'] = 1. # X.var(0).max() / X.var(0)
m['noise_variance'] = Y.var() / 100.
m['bias_variance'] = 0.05
if optimize:
m.optimize('scg', messages=1)

View file

@ -159,13 +159,13 @@ def coregionalisation_sparse(optim_iters=100):
k = k1.prod(k2,tensor=True) + GPy.kern.white(2,0.001)
m = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=Z)
m.scale_factor = 10000.
m.constrain_fixed('.*rbf_var',1.)
#m.constrain_positive('kappa')
m.constrain_fixed('iip')
m.constrain_bounded('noise_variance',1e-3,1e-1)
m.ensure_default_constraints()
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
#plotting:
pb.figure()
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
@ -300,7 +300,6 @@ def sparse_GP_regression_2D(N = 400, M = 50, optim_iters=100):
m.checkgrad()
# optimize and plot
pb.figure()
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
m.plot()
print(m)

View file

@ -54,7 +54,7 @@ class Gaussian(likelihood):
x = np.float64(x)
if np.all(self._variance != x):
if x == 0.:
self.precision = None
self.precision = np.inf
self.V = None
else:
self.precision = 1. / x