diff --git a/GPy/core/parameterized.py b/GPy/core/parameterized.py index cad4d2a9..fe6eba62 100644 --- a/GPy/core/parameterized.py +++ b/GPy/core/parameterized.py @@ -195,15 +195,15 @@ class Parameterized(object): def constrain_negative(self, regexp): """ Set negative constraints. """ - self.constrain(regexp, transformations.negative_logexp()) + self.constrain(regexp, transformations.Negative_logexp()) def constrain_positive(self, regexp): """ Set positive constraints. """ - self.constrain(regexp, transformations.logexp()) + self.constrain(regexp, transformations.Logexp()) def constrain_bounded(self, regexp, lower, upper): """ Set bounded constraints. """ - self.constrain(regexp, transformations.logistic(lower, upper)) + self.constrain(regexp, transformations.Logistic(lower, upper)) def all_constrained_indices(self): if len(self.constrained_indices) or len(self.fixed_indices): @@ -212,7 +212,7 @@ class Parameterized(object): return np.empty(shape=(0,)) def constrain(self, regexp, transform): - assert isinstance(transform, transformations.transformation) + assert isinstance(transform, transformations.Transformation) matches = self.grep_param_names(regexp) overlap = set(matches).intersection(set(self.all_constrained_indices())) @@ -278,7 +278,7 @@ class Parameterized(object): def _untransform_params(self, x): """ - The transformation required for _set_params_transformed. + The Transformation required for _set_params_transformed. This moves the vector x seen by the optimiser (unconstrained) to the valid parameter vector seen by the model diff --git a/GPy/core/transformations.py b/GPy/core/transformations.py index 419bc54e..eeba3e51 100644 --- a/GPy/core/transformations.py +++ b/GPy/core/transformations.py @@ -7,7 +7,7 @@ from GPy.core.domains import POSITIVE, NEGATIVE, BOUNDED import sys lim_val = -np.log(sys.float_info.epsilon) -class transformation(object): +class Transformation(object): domain = None def f(self, x): raise NotImplementedError @@ -24,7 +24,7 @@ class transformation(object): def __str__(self): raise NotImplementedError -class logexp(transformation): +class Logexp(Transformation): domain = POSITIVE def f(self, x): return np.where(x>lim_val, x, np.log(1. + np.exp(x))) @@ -39,22 +39,22 @@ class logexp(transformation): def __str__(self): return '(+ve)' -class negative_logexp(transformation): +class Negative_logexp(Transformation): domain = NEGATIVE def f(self, x): - return -logexp.f(x) #np.log(1. + np.exp(x)) + return -Logexp.f(x) # np.log(1. + np.exp(x)) def finv(self, f): - return logexp.finv(-f) #np.log(np.exp(-f) - 1.) + return Logexp.finv(-f) # np.log(np.exp(-f) - 1.) def gradfactor(self, f): - return -logexp.gradfactor(-f) + return -Logexp.gradfactor(-f) #ef = np.exp(-f) #return -(ef - 1.) / ef def initialize(self, f): - return -logexp.initialize(f) #np.abs(f) + return -Logexp.initialize(f) # np.abs(f) def __str__(self): return '(-ve)' -class logexp_clipped(logexp): +class LogexpClipped(Logexp): max_bound = 1e100 min_bound = 1e-10 log_max_bound = np.log(max_bound) @@ -81,8 +81,8 @@ class logexp_clipped(logexp): def __str__(self): return '(+ve_c)' -class exponent(transformation): - # TODO: can't allow this to go to zero, need to set a lower bound. Similar with negative exponent below. See old MATLAB code. +class Exponent(Transformation): + # TODO: can't allow this to go to zero, need to set a lower bound. Similar with negative Exponent below. See old MATLAB code. domain = POSITIVE def f(self, x): return np.where(x-lim_val, np.exp(x), np.exp(-lim_val)), np.exp(lim_val)) @@ -97,20 +97,20 @@ class exponent(transformation): def __str__(self): return '(+ve)' -class negative_exponent(exponent): +class NegativeExponent(Exponent): domain = NEGATIVE def f(self, x): - return -exponent.f(x) + return -Exponent.f(x) def finv(self, f): - return exponent.finv(-f) + return Exponent.finv(-f) def gradfactor(self, f): return f def initialize(self, f): - return -exponent.initialize(f) #np.abs(f) + return -Exponent.initialize(f) #np.abs(f) def __str__(self): return '(-ve)' -class square(transformation): +class Square(Transformation): domain = POSITIVE def f(self, x): return x ** 2 @@ -123,7 +123,7 @@ class square(transformation): def __str__(self): return '(+sq)' -class logistic(transformation): +class Logistic(Transformation): domain = BOUNDED def __init__(self, lower, upper): assert lower < upper diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 005b131f..c62d910e 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -5,7 +5,7 @@ import numpy as np from matplotlib import pyplot as plt, cm import GPy -from GPy.core.transformations import logexp +from GPy.core.transformations import Logexp from GPy.models.bayesian_gplvm import BayesianGPLVM from GPy.likelihoods.gaussian import Gaussian @@ -88,7 +88,7 @@ def sparseGPLVM_oil(optimize=True, N=100, Q=6, num_inducing=15, max_iters=50): def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False): from GPy.util.datasets import swiss_roll_generated - from GPy.core.transformations import logexp_clipped + from GPy.core.transformations import LogexpClipped data = swiss_roll_generated(N=N, sigma=sigma) Y = data['Y'] @@ -155,7 +155,7 @@ def BGPLVM_oil(optimize=True, N=200, Q=7, num_inducing=40, max_iters=1000, plot= m = GPy.models.BayesianGPLVM(Yn, Q, kernel=kernel, num_inducing=num_inducing, **k) m.data_labels = data['Y'][:N].argmax(axis=1) - # m.constrain('variance|leng', logexp_clipped()) + # m.constrain('variance|leng', LogexpClipped()) # m['.*lengt'] = m.X.var(0).max() / m.X.var(0) m['noise'] = Yn.Y.var() / 100. @@ -272,7 +272,7 @@ def bgplvm_simulation(optimize='scg', plot=True, max_iters=2e4, plot_sim=False): -# from GPy.core.transformations import logexp_clipped +# from GPy.core.transformations import LogexpClipped D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 30, 3, 10 slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) @@ -285,7 +285,7 @@ def bgplvm_simulation(optimize='scg', k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q) m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k) - # m.constrain('variance|noise', logexp_clipped()) + # m.constrain('variance|noise', LogexpClipped()) m['noise'] = Y.var() / 100. if optimize: @@ -340,7 +340,7 @@ def brendan_faces(): # m = GPy.models.BayesianGPLVM(Yn, Q, num_inducing=100) # optimize - m.constrain('rbf|noise|white', GPy.core.transformations.logexp_clipped()) + m.constrain('rbf|noise|white', GPy.core.transformations.LogexpClipped()) m.optimize('scg', messages=1, max_f_eval=10000)