mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-07 11:02:38 +02:00
[model] optimizer can now be an optimizer instance, instead of a string
This commit is contained in:
parent
1e006f63b5
commit
eaa18e3512
1 changed files with 12 additions and 10 deletions
|
|
@ -118,12 +118,12 @@ class Model(Parameterized):
|
||||||
"""
|
"""
|
||||||
The objective function for the given algorithm.
|
The objective function for the given algorithm.
|
||||||
|
|
||||||
This function is the true objective, which wants to be minimized.
|
This function is the true objective, which wants to be minimized.
|
||||||
Note that all parameters are already set and in place, so you just need
|
Note that all parameters are already set and in place, so you just need
|
||||||
to return the objective function here.
|
to return the objective function here.
|
||||||
|
|
||||||
For probabilistic models this is the negative log_likelihood
|
For probabilistic models this is the negative log_likelihood
|
||||||
(including the MAP prior), so we return it here. If your model is not
|
(including the MAP prior), so we return it here. If your model is not
|
||||||
probabilistic, just return your objective to minimize here!
|
probabilistic, just return your objective to minimize here!
|
||||||
"""
|
"""
|
||||||
return -float(self.log_likelihood()) - self.log_prior()
|
return -float(self.log_likelihood()) - self.log_prior()
|
||||||
|
|
@ -131,18 +131,18 @@ class Model(Parameterized):
|
||||||
def objective_function_gradients(self):
|
def objective_function_gradients(self):
|
||||||
"""
|
"""
|
||||||
The gradients for the objective function for the given algorithm.
|
The gradients for the objective function for the given algorithm.
|
||||||
The gradients are w.r.t. the *negative* objective function, as
|
The gradients are w.r.t. the *negative* objective function, as
|
||||||
this framework works with *negative* log-likelihoods as a default.
|
this framework works with *negative* log-likelihoods as a default.
|
||||||
|
|
||||||
You can find the gradient for the parameters in self.gradient at all times.
|
You can find the gradient for the parameters in self.gradient at all times.
|
||||||
This is the place, where gradients get stored for parameters.
|
This is the place, where gradients get stored for parameters.
|
||||||
|
|
||||||
This function is the true objective, which wants to be minimized.
|
This function is the true objective, which wants to be minimized.
|
||||||
Note that all parameters are already set and in place, so you just need
|
Note that all parameters are already set and in place, so you just need
|
||||||
to return the gradient here.
|
to return the gradient here.
|
||||||
|
|
||||||
For probabilistic models this is the gradient of the negative log_likelihood
|
For probabilistic models this is the gradient of the negative log_likelihood
|
||||||
(including the MAP prior), so we return it here. If your model is not
|
(including the MAP prior), so we return it here. If your model is not
|
||||||
probabilistic, just return your *negative* gradient here!
|
probabilistic, just return your *negative* gradient here!
|
||||||
"""
|
"""
|
||||||
return -(self._log_likelihood_gradients() + self._log_prior_gradients())
|
return -(self._log_likelihood_gradients() + self._log_prior_gradients())
|
||||||
|
|
@ -227,13 +227,15 @@ class Model(Parameterized):
|
||||||
|
|
||||||
if optimizer is None:
|
if optimizer is None:
|
||||||
optimizer = self.preferred_optimizer
|
optimizer = self.preferred_optimizer
|
||||||
|
elif isinstance(optimizer, optimization.Optimizer):
|
||||||
|
opt = optimizer
|
||||||
|
else:
|
||||||
|
optimizer = optimization.get_optimizer(optimizer)
|
||||||
|
opt = optimizer(start, model=self, **kwargs)
|
||||||
|
|
||||||
if start == None:
|
if start == None:
|
||||||
start = self.optimizer_array
|
start = self.optimizer_array
|
||||||
|
|
||||||
optimizer = optimization.get_optimizer(optimizer)
|
|
||||||
opt = optimizer(start, model=self, **kwargs)
|
|
||||||
|
|
||||||
opt.run(f_fp=self._objective_grads, f=self._objective, fp=self._grads)
|
opt.run(f_fp=self._objective_grads, f=self._objective, fp=self._grads)
|
||||||
|
|
||||||
self.optimization_runs.append(opt)
|
self.optimization_runs.append(opt)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue