From 1674bc529bc49949fc39fe992ad5c487f942251d Mon Sep 17 00:00:00 2001 From: Neil Lawrence Date: Fri, 18 Jan 2013 13:37:17 +0000 Subject: [PATCH] expand_param and extract_param replaced with set_params_transformed and get_params_transformed --- GPy/core/model.py | 56 +++++++++++++++--------------- GPy/core/parameterised.py | 12 +++---- GPy/inference/samplers.py | 8 ++--- GPy/models/GPLVM.py | 4 +-- GPy/models/GP_EP.py | 10 +++--- GPy/models/GP_regression.py | 6 ++-- GPy/models/generalized_FITC.py | 8 ++--- GPy/models/sparse_GP_regression.py | 4 +-- 8 files changed, 54 insertions(+), 54 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index d3c6e582..2bfb02a5 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -115,7 +115,7 @@ class model(parameterised): [np.put(ret,i,p.lnpdf_grad(xx)) for i,(p,xx) in enumerate(zip(self.priors,x)) if not p is None] return ret - def extract_gradients(self): + def _log_likelihood_gradients_transformed(self): """ Use self.log_likelihood_gradients and self.prior_gradients to get the gradients of the model. Adjust the gradient for constraints and ties, return. @@ -138,14 +138,14 @@ class model(parameterised): Make this draw from the prior if one exists, else draw from N(0,1) """ #first take care of all parameters (from N(0,1)) - x = self.extract_param() + x = self._get_params_transformed() x = np.random.randn(x.size) - self.expand_param(x) + self._set_params_transformed(x) #now draw from prior where possible x = self._get_params() [np.put(x,i,p.rvs(1)) for i,p in enumerate(self.priors) if not p is None] self._set_params(x) - self.expand_param(self.extract_param())#makes sure all of the tied parameters get the same init (since there's only one prior object...) + self._set_params_transformed(self._get_params_transformed())#makes sure all of the tied parameters get the same init (since there's only one prior object...) def optimize_restarts(self, Nrestarts=10, robust=False, verbose=True, **kwargs): @@ -165,7 +165,7 @@ class model(parameterised): :verbose: whether to show informations about the current restart """ - initial_parameters = self.extract_param() + initial_parameters = self._get_params_transformed() for i in range(Nrestarts): try: self.randomize() @@ -182,9 +182,9 @@ class model(parameterised): raise e if len(self.optimization_runs): i = np.argmax([o.f_opt for o in self.optimization_runs]) - self.expand_param(self.optimization_runs[i].x_opt) + self._set_params_transformed(self.optimization_runs[i].x_opt) else: - self.expand_param(initial_parameters) + self._set_params_transformed(initial_parameters) def ensure_default_constraints(self,warn=False): """ @@ -214,24 +214,24 @@ class model(parameterised): optimizer = self.preferred_optimizer def f(x): - self.expand_param(x) + self._set_params_transformed(x) return -self.log_likelihood()-self.log_prior() def fp(x): - self.expand_param(x) - return -self.extract_gradients() + self._set_params_transformed(x) + return -self._log_likelihood_gradients_transformed() def f_fp(x): - self.expand_param(x) - return -self.log_likelihood()-self.log_prior(),-self.extract_gradients() + self._set_params_transformed(x) + return -self.log_likelihood()-self.log_prior(),-self._log_likelihood_gradients_transformed() if start == None: - start = self.extract_param() + start = self._get_params_transformed() optimizer = optimization.get_optimizer(optimizer) opt = optimizer(start, model = self, **kwargs) opt.run(f_fp=f_fp, f=f, fp=fp) self.optimization_runs.append(opt) - self.expand_param(opt.x_opt) + self._set_params_transformed(opt.x_opt) def optimize_SGD(self, momentum = 0.1, learning_rate = 0.01, iterations = 20, **kwargs): # assert self.Y.shape[1] > 1, "SGD only works with D > 1" @@ -292,18 +292,18 @@ class model(parameterised): If the overall gradient fails, invividual components are tested. """ - x = self.extract_param().copy() + x = self._get_params_transformed().copy() #choose a random direction to step in: dx = step*np.sign(np.random.uniform(-1,1,x.size)) #evaulate around the point x - self.expand_param(x+dx) - f1,g1 = self.log_likelihood() + self.log_prior(), self.extract_gradients() - self.expand_param(x-dx) - f2,g2 = self.log_likelihood() + self.log_prior(), self.extract_gradients() - self.expand_param(x) - gradient = self.extract_gradients() + self._set_params_transformed(x+dx) + f1,g1 = self.log_likelihood() + self.log_prior(), self._log_likelihood_gradients_transformed() + self._set_params_transformed(x-dx) + f2,g2 = self.log_likelihood() + self.log_prior(), self._log_likelihood_gradients_transformed() + self._set_params_transformed(x) + gradient = self._log_likelihood_gradients_transformed() numerical_gradient = (f1-f2)/(2*dx) ratio = (f1-f2)/(2*np.dot(dx,gradient)) @@ -319,7 +319,7 @@ class model(parameterised): print "Global check failed. Testing individual gradients\n" try: - names = self.extract_param_names() + names = self._get_param_names_transformed() except NotImplementedError: names = ['Variable %i'%i for i in range(len(x))] @@ -338,13 +338,13 @@ class model(parameterised): for i in range(len(x)): xx = x.copy() xx[i] += step - self.expand_param(xx) - f1,g1 = self.log_likelihood() + self.log_prior(), self.extract_gradients()[i] + self._set_params_transformed(xx) + f1,g1 = self.log_likelihood() + self.log_prior(), self._log_likelihood_gradients_transformed()[i] xx[i] -= 2.*step - self.expand_param(xx) - f2,g2 = self.log_likelihood() + self.log_prior(), self.extract_gradients()[i] - self.expand_param(x) - gradient = self.extract_gradients()[i] + self._set_params_transformed(xx) + f2,g2 = self.log_likelihood() + self.log_prior(), self._log_likelihood_gradients_transformed()[i] + self._set_params_transformed(x) + gradient = self._log_likelihood_gradients_transformed()[i] numerical_gradient = (f1-f2)/(2*step) diff --git a/GPy/core/parameterised.py b/GPy/core/parameterised.py index 738bde5b..81b6aa8a 100644 --- a/GPy/core/parameterised.py +++ b/GPy/core/parameterised.py @@ -66,7 +66,7 @@ class parameterised(object): if hasattr(self,'prior'): pass - self.expand_param(self.extract_param())# sets tied parameters to single value + self._set_params_transformed(self._get_params_transformed())# sets tied parameters to single value def untie_everything(self): """Unties all parameters by setting tied_indices to an empty list.""" @@ -216,9 +216,9 @@ class parameterised(object): self.constrained_fixed_values.append(self._get_params()[self.constrained_fixed_indices[-1]]) #self.constrained_fixed_values.append(value) - self.expand_param(self.extract_param()) + self._set_params_transformed(self._get_params_transformed()) - def extract_param(self): + def _get_params_transformed(self): """use self._get_params to get the 'true' parameters of the model, which are then tied, constrained and fixed""" x = self._get_params() x[self.constrained_positive_indices] = np.log(x[self.constrained_positive_indices]) @@ -232,7 +232,7 @@ class parameterised(object): return x - def expand_param(self,x): + def _set_params_transformed(self,x): """ takes the vector x, which is then modified (by untying, reparameterising or inserting fixed values), and then call self._set_params""" #work out how many places are fixed, and where they are. tricky logic! @@ -259,10 +259,10 @@ class parameterised(object): [np.put(xx,i,low+sigmoid(xx[i])*(high-low)) for i,low,high in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)] self._set_params(xx) - def extract_param_names(self): + def _get_param_names_transformed(self): """ Returns the parameter names as propagated after constraining, - tying or fixing, i.e. a list of the same length as extract_param() + tying or fixing, i.e. a list of the same length as _get_params_transformed() """ n = self._get_param_names() diff --git a/GPy/inference/samplers.py b/GPy/inference/samplers.py index da159c23..c2b47bce 100644 --- a/GPy/inference/samplers.py +++ b/GPy/inference/samplers.py @@ -17,7 +17,7 @@ class Metropolis_Hastings: def __init__(self,model,cov=None): """Metropolis Hastings, with tunings according to Gelman et al. """ self.model = model - current = self.model.extract_param() + current = self.model._get_params_transformed() self.D = current.size self.chains = [] if cov is None: @@ -32,19 +32,19 @@ class Metropolis_Hastings: if start is None: self.model.randomize() else: - self.model.expand_param(start) + self.model._set_params_transformed(start) def sample(self, Ntotal, Nburn, Nthin, tune=True, tune_throughout=False, tune_interval=400): - current = self.model.extract_param() + current = self.model._get_params_transformed() fcurrent = self.model.log_likelihood() + self.model.log_prior() accepted = np.zeros(Ntotal,dtype=np.bool) for it in range(Ntotal): print "sample %d of %d\r"%(it,Ntotal), sys.stdout.flush() prop = np.random.multivariate_normal(current, self.cov*self.scale*self.scale) - self.model.expand_param(prop) + self.model._set_params_transformed(prop) fprop = self.model.log_likelihood() + self.model.log_prior() if fprop>fcurrent:#sample accepted, going 'uphill' diff --git a/GPy/models/GPLVM.py b/GPy/models/GPLVM.py index 0a168f25..f4723396 100644 --- a/GPy/models/GPLVM.py +++ b/GPy/models/GPLVM.py @@ -35,10 +35,10 @@ class GPLVM(GP_regression): def _get_param_names(self): return (sum([['X_%i_%i'%(n,q) for n in range(self.N)] for q in range(self.Q)],[]) - + self.kern.extract_param_names()) + + self.kern._get_param_names_transformed()) def _get_params(self): - return np.hstack((self.X.flatten(), self.kern.extract_param())) + return np.hstack((self.X.flatten(), self.kern._get_params_transformed())) def _set_params(self,x): self.X = x[:self.X.size].reshape(self.N,self.Q).copy() diff --git a/GPy/models/GP_EP.py b/GPy/models/GP_EP.py index 5cb2ba45..6f5aad6e 100644 --- a/GPy/models/GP_EP.py +++ b/GPy/models/GP_EP.py @@ -42,13 +42,13 @@ class GP_EP(model): model.__init__(self) def _set_params(self,p): - self.kernel.expand_param(p) + self.kernel._set_params_transformed(p) def _get_params(self): - return self.kernel.extract_param() + return self.kernel._get_params_transformed() def _get_param_names(self): - return self.kernel.extract_param_names() + return self.kernel._get_param_names_transformed() def approximate_likelihood(self): self.ep_approx = Full(self.K,self.likelihood,epsilon=self.epsilon_ep,powerep=[self.eta,self.delta]) @@ -150,8 +150,8 @@ class GP_EP(model): log_likelihood_change = log_likelihood_new - self.log_likelihood_path[-1] if log_likelihood_change < 0: print 'log_likelihood decrement' - self.kernel.expand_param(self.parameters_path[-1]) - self.kernM.expand_param(self.parameters_path[-1]) + self.kernel._set_params_transformed(self.parameters_path[-1]) + self.kernM._set_params_transformed(self.parameters_path[-1]) else: self.approximate_likelihood() self.log_likelihood_path.append(self.log_likelihood()) diff --git a/GPy/models/GP_regression.py b/GPy/models/GP_regression.py index da657950..f34802bf 100644 --- a/GPy/models/GP_regression.py +++ b/GPy/models/GP_regression.py @@ -71,15 +71,15 @@ class GP_regression(model): model.__init__(self) def _set_params(self,p): - self.kern.expand_param(p) + self.kern._set_params_transformed(p) self.K = self.kern.K(self.X,slices1=self.Xslices) self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K) def _get_params(self): - return self.kern.extract_param() + return self.kern._get_params_transformed() def _get_param_names(self): - return self.kern.extract_param_names() + return self.kern._get_params_names_transformed() def _model_fit_term(self): """ diff --git a/GPy/models/generalized_FITC.py b/GPy/models/generalized_FITC.py index 40f18ff9..a20666d0 100644 --- a/GPy/models/generalized_FITC.py +++ b/GPy/models/generalized_FITC.py @@ -43,14 +43,14 @@ class generalized_FITC(model): model.__init__(self) def _set_params(self,p): - self.kernel.expand_param(p[0:-self.Z.size]) + self.kernel._set_params_transformed(p[0:-self.Z.size]) self.Z = p[-self.Z.size:].reshape(self.M,self.D) def _get_params(self): - return np.hstack([self.kernel.extract_param(),self.Z.flatten()]) + return np.hstack([self.kernel._get_params_transformed(),self.Z.flatten()]) def _get_param_names(self): - return self.kernel.extract_param_names()+['iip_%i'%i for i in range(self.Z.size)] + return self.kernel._get_param_names_transformed()+['iip_%i'%i for i in range(self.Z.size)] def approximate_likelihood(self): self.Kmm = self.kernel.K(self.Z) @@ -227,7 +227,7 @@ class generalized_FITC(model): log_likelihood_change = log_likelihood_new - self.log_likelihood_path[-1] if log_likelihood_change < 0: print 'log_likelihood decrement' - self.kernel.expand_param(self.parameters_path[-1]) + self.kernel._set_params_transformed(self.parameters_path[-1]) self.kernM = self.kernel.copy() slef.kernM.expand_X(self.iducing_inputs_path[-1]) self.__init__(self.kernel,self.likelihood,kernM=self.kernM,powerep=[self.eta,self.delta],epsilon_ep = self.epsilon_ep, epsilon_em = self.epsilon_em) diff --git a/GPy/models/sparse_GP_regression.py b/GPy/models/sparse_GP_regression.py index fc04d5ed..59e6ebbb 100644 --- a/GPy/models/sparse_GP_regression.py +++ b/GPy/models/sparse_GP_regression.py @@ -107,10 +107,10 @@ class sparse_GP_regression(GP_regression): self.dL_dKmm += np.dot(np.dot(self.G,self.beta*self.psi2) - np.dot(self.LBL_inv, self.psi1VVpsi1), self.Kmmi) + 0.5*self.G # dE def _get_params(self): - return np.hstack([self.Z.flatten(),self.beta,self.kern.extract_param()]) + return np.hstack([self.Z.flatten(),self.beta,self.kern._get_params_transformed()]) def _get_param_names(self): - return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + ['noise_precision']+self.kern.extract_param_names() + return sum([['iip_%i_%i'%(i,j) for i in range(self.Z.shape[0])] for j in range(self.Z.shape[1])],[]) + ['noise_precision']+self.kern._get_param_names_transformed() def log_likelihood(self): """