diff --git a/GPy/core/transformations.py b/GPy/core/transformations.py index f7e59ab6..fcbfb548 100644 --- a/GPy/core/transformations.py +++ b/GPy/core/transformations.py @@ -39,8 +39,8 @@ class logexp(transformation): return '(+ve)' class logexp_clipped(transformation): - max_bound = 1e300 - min_bound = 1e-10 + max_bound = 1e250 + min_bound = 1e-9 log_max_bound = np.log(max_bound) log_min_bound = np.log(min_bound) def __init__(self, lower=1e-6): @@ -49,11 +49,13 @@ class logexp_clipped(transformation): def f(self, x): exp = np.exp(np.clip(x, self.log_min_bound, self.log_max_bound)) f = np.log(1. + exp) + if np.isnan(f).any(): + import ipdb;ipdb.set_trace() return f def finv(self, f): return np.log(np.exp(np.clip(f, self.min_bound, self.max_bound)) - 1.) def gradfactor(self, f): - ef = np.exp(f) + ef = np.exp(f) # np.clip(f, self.min_bound, self.max_bound)) gf = (ef - 1.) / ef return np.where(f < self.lower, 0, gf) def initialize(self, f): diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 18995c50..4f713c1f 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -273,8 +273,8 @@ def bgplvm_simulation(optimize='scg', pylab.figure(); pylab.axis(); m.kern.plot_ARD() return m -def mrd_simulation(plot_sim=False): - D1, D2, D3, N, M, Q = 150, 250, 300, 700, 3, 7 +def mrd_simulation(optimize=True, plot_sim=False): + D1, D2, D3, N, M, Q = 150, 250, 30, 300, 3, 7 slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim) from GPy.models import mrd @@ -292,6 +292,13 @@ def mrd_simulation(plot_sim=False): m.constrain('variance|noise', logexp_clipped()) m.ensure_default_constraints() + # DEBUG + np.seterr("raise") + + if optimize: + print "Optimizing Model:" + m.optimize('scg', messages=1, max_iters=3e3) + return m def brendan_faces(): diff --git a/GPy/inference/SCG.py b/GPy/inference/SCG.py index e6ef25c0..f190d002 100644 --- a/GPy/inference/SCG.py +++ b/GPy/inference/SCG.py @@ -85,8 +85,6 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto # Increase effective curvature and evaluate step size alpha. delta = theta + beta * kappa if delta <= 0: - if display: - print "" delta = beta * kappa beta = beta - theta / kappa diff --git a/GPy/models/Bayesian_GPLVM.py b/GPy/models/Bayesian_GPLVM.py index 5511a1b9..464d7425 100644 --- a/GPy/models/Bayesian_GPLVM.py +++ b/GPy/models/Bayesian_GPLVM.py @@ -171,9 +171,6 @@ class Bayesian_GPLVM(sparse_GP, GPLVM): self.dbound_dZtheta = sparse_GP._log_likelihood_gradients(self) return np.hstack((self.dbound_dmuS.flatten(), self.dbound_dZtheta)) - def _log_likelihood_normal_gradients(self): - Si, _, _, _ = pdinv(self.X_variance) - def plot_latent(self, which_indices=None, *args, **kwargs): if which_indices is None: