From 56a4bc4e211063b47c4d4bd5e2696183754cbdcb Mon Sep 17 00:00:00 2001 From: James Hensman Date: Wed, 5 Jun 2013 14:52:37 +0100 Subject: [PATCH] an assortment of fixes --- GPy/core/model.py | 4 ---- GPy/core/sparse_gp.py | 12 ++++++------ GPy/examples/dimensionality_reduction.py | 9 ++++----- GPy/examples/regression.py | 5 ++--- GPy/likelihoods/gaussian.py | 2 +- 5 files changed, 13 insertions(+), 19 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 19e38080..7cc21080 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -224,14 +224,10 @@ class model(parameterised): for s in positive_strings: for i in self.grep_param_names(".*"+s): if not (i in currently_constrained): - #to_make_positive.append(re.escape(param_names[i])) to_make_positive.append(i) if len(to_make_positive): - #self.constrain_positive('(' + '|'.join(to_make_positive) + ')') self.constrain_positive(np.asarray(to_make_positive)) - - def objective_function(self, x): """ The objective function passed to the optimizer. It combines the likelihood and the priors. diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 26870927..5ac9de9d 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -142,17 +142,17 @@ class SparseGP(GPBase): def log_likelihood(self): """ Compute the (lower bound on the) log marginal likelihood """ if self.likelihood.is_heteroscedastic: - A = -0.5 * self.N * self.input_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y) - B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A)) + A = -0.5 * self.N * self.output_dim * np.log(2.*np.pi) + 0.5 * np.sum(np.log(self.likelihood.precision)) - 0.5 * np.sum(self.likelihood.V * self.likelihood.Y) + B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision.flatten() * self.psi0) - np.trace(self.A)) else: - A = -0.5 * self.N * self.input_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT - B = -0.5 * self.input_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A)) - C = -self.input_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2)) + A = -0.5 * self.N * self.output_dim * (np.log(2.*np.pi) - np.log(self.likelihood.precision)) - 0.5 * self.likelihood.precision * self.likelihood.trYYT + B = -0.5 * self.output_dim * (np.sum(self.likelihood.precision * self.psi0) - np.trace(self.A)) + C = -self.output_dim * (np.sum(np.log(np.diag(self.LB)))) # + 0.5 * self.num_inducing * np.log(sf2)) D = 0.5 * np.sum(np.square(self._LBi_Lmi_psi1V)) return A + B + C + D + self.likelihood.Z def _set_params(self, p): - self.Z = p[:self.num_inducing * self.input_dim].reshape(self.num_inducing, self.input_dim) + self.Z = p[:self.num_inducing * self.output_dim].reshape(self.num_inducing, self.input_dim) self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.Nparam]) self.likelihood._set_params(p[self.Z.size + self.kern.Nparam:]) self._compute_kernel_matrices() diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 60726b1d..621a3749 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -5,7 +5,6 @@ import numpy as np from matplotlib import pyplot as plt import GPy -from GPy.util.datasets import swiss_roll_generated from GPy.core.transformations import logexp from GPy.models.bayesian_gplvm import BayesianGPLVM @@ -64,7 +63,7 @@ def GPLVM_oil_100(optimize=True): return m def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False): - from GPy.util.datasets import swiss_roll + from GPy.util.datasets import swiss_roll_generated from GPy.core.transformations import logexp_clipped data = swiss_roll_generated(N=N, sigma=sigma) @@ -109,10 +108,10 @@ def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False): m.data_colors = c m.data_t = t - m.constrain('variance|length', logexp_clipped()) - m['lengthscale'] = 1. # X.var(0).max() / X.var(0) - m['noise'] = Y.var() / 100. m.ensure_default_constraints() + m['rbf_lengthscale'] = 1. # X.var(0).max() / X.var(0) + m['noise_variance'] = Y.var() / 100. + m['bias_variance'] = 0.05 if optimize: m.optimize('scg', messages=1) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index be3a71bd..1512e842 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -159,13 +159,13 @@ def coregionalisation_sparse(optim_iters=100): k = k1.prod(k2,tensor=True) + GPy.kern.white(2,0.001) m = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=Z) - m.scale_factor = 10000. m.constrain_fixed('.*rbf_var',1.) - #m.constrain_positive('kappa') m.constrain_fixed('iip') + m.constrain_bounded('noise_variance',1e-3,1e-1) m.ensure_default_constraints() m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters) + #plotting: pb.figure() Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1)))) Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1)))) @@ -300,7 +300,6 @@ def sparse_GP_regression_2D(N = 400, M = 50, optim_iters=100): m.checkgrad() # optimize and plot - pb.figure() m.optimize('tnc', messages = 1, max_f_eval=optim_iters) m.plot() print(m) diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 886d8873..161b54c7 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -54,7 +54,7 @@ class Gaussian(likelihood): x = np.float64(x) if np.all(self._variance != x): if x == 0.: - self.precision = None + self.precision = np.inf self.V = None else: self.precision = 1. / x