tests are now passing

This commit is contained in:
James Hensman 2013-06-04 17:19:33 +01:00
parent 362389781b
commit edae1032c7
11 changed files with 38 additions and 34 deletions

View file

@ -19,7 +19,7 @@ import pdb
class model(parameterised):
def __init__(self):
parameterised.__init__(self)
self.priors = [None for i in range(self._get_params().size)]
self.priors = None
self.optimization_runs = []
self.sampling_runs = []
self.preferred_optimizer = 'tnc'
@ -33,13 +33,13 @@ class model(parameterised):
def _log_likelihood_gradients(self):
raise NotImplementedError, "this needs to be implemented to use the model class"
def set_prior(self, which, what):
def set_prior(self, regexp, what):
"""
Sets priors on the model parameters.
Arguments
---------
which -- string, regexp, or integer array
regexp -- string, regexp, or integer array
what -- instance of a prior class
Notes
@ -51,8 +51,10 @@ class model(parameterised):
For tied parameters, the prior will only be "counted" once, thus
a prior object is only inserted on the first tied index
"""
if self.priors is None:
self.priors = [None for i in range(self._get_params().size)]
which = self.grep_param_names(which)
which = self.grep_param_names(regexp)
# check tied situation
tie_partial_matches = [tie for tie in self.tied_indices if (not set(tie).isdisjoint(set(which))) & (not set(tie) == set(which))]
@ -104,10 +106,15 @@ class model(parameterised):
def log_prior(self):
"""evaluate the prior"""
return np.sum([p.lnpdf(x) for p, x in zip(self.priors, self._get_params()) if p is not None])
if self.priors is not None:
return np.sum([p.lnpdf(x) for p, x in zip(self.priors, self._get_params()) if p is not None])
else:
return 0.
def _log_prior_gradients(self):
"""evaluate the gradients of the priors"""
if self.priors is None:
return 0.
x = self._get_params()
ret = np.zeros(x.size)
[np.put(ret, i, p.lnpdf_grad(xx)) for i, (p, xx) in enumerate(zip(self.priors, x)) if not p is None]
@ -135,7 +142,8 @@ class model(parameterised):
self._set_params_transformed(x)
# now draw from prior where possible
x = self._get_params()
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
if self.priors is not None:
[np.put(x, i, p.rvs(1)) for i, p in enumerate(self.priors) if not p is None]
self._set_params(x)
self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
@ -234,16 +242,13 @@ class model(parameterised):
Gets the gradients from the likelihood and the priors.
"""
self._set_params_transformed(x)
LL_gradients = self._transform_gradients(self._log_likelihood_gradients())
prior_gradients = self._transform_gradients(self._log_prior_gradients())
return -LL_gradients - prior_gradients
obj_grads = - self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients())
return obj_grads
def objective_and_gradients(self, x):
self._set_params_transformed(x)
obj_f = -self.log_likelihood() - self.log_prior()
LL_gradients = self._transform_gradients(self._log_likelihood_gradients())
prior_gradients = self._transform_gradients(self._log_prior_gradients())
obj_grads = -LL_gradients - prior_gradients
obj_grads = - self._transform_gradients(self._log_likelihood_gradients() + self._log_prior_gradients())
return obj_f, obj_grads
def optimize(self, optimizer=None, start=None, **kwargs):