mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-15 06:52:39 +02:00
[objective grads] undid the change, as this would lead to dramatic problems in reloading old models using the pickle module
This commit is contained in:
parent
20eff02061
commit
53dcd3f9fa
2 changed files with 12 additions and 12 deletions
|
|
@ -257,7 +257,7 @@ class Model(Parameterized):
|
||||||
opt = optimizer(start, model=self, max_iters=max_iters, **kwargs)
|
opt = optimizer(start, model=self, max_iters=max_iters, **kwargs)
|
||||||
|
|
||||||
with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook):
|
with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook):
|
||||||
opt.run(f_fp=self._objective_and_grads, f=self._objective, fp=self._objective_grads)
|
opt.run(f_fp=self._objective_grads, f=self._objective, fp=self._grads)
|
||||||
|
|
||||||
self.optimization_runs.append(opt)
|
self.optimization_runs.append(opt)
|
||||||
|
|
||||||
|
|
@ -314,7 +314,7 @@ class Model(Parameterized):
|
||||||
# evaulate around the point x
|
# evaulate around the point x
|
||||||
f1 = self._objective(x + dx)
|
f1 = self._objective(x + dx)
|
||||||
f2 = self._objective(x - dx)
|
f2 = self._objective(x - dx)
|
||||||
gradient = self._objective_grads(x)
|
gradient = self._grads(x)
|
||||||
|
|
||||||
dx = dx[transformed_index]
|
dx = dx[transformed_index]
|
||||||
gradient = gradient[transformed_index]
|
gradient = gradient[transformed_index]
|
||||||
|
|
@ -360,7 +360,7 @@ class Model(Parameterized):
|
||||||
print "No free parameters to check"
|
print "No free parameters to check"
|
||||||
return
|
return
|
||||||
|
|
||||||
gradient = self._objective_grads(x).copy()
|
gradient = self._grads(x).copy()
|
||||||
np.where(gradient == 0, 1e-312, gradient)
|
np.where(gradient == 0, 1e-312, gradient)
|
||||||
ret = True
|
ret = True
|
||||||
for nind, xind in itertools.izip(param_index, transformed_index):
|
for nind, xind in itertools.izip(param_index, transformed_index):
|
||||||
|
|
|
||||||
|
|
@ -12,8 +12,9 @@ def exponents(fnow, current_grad):
|
||||||
|
|
||||||
class VerboseOptimization(object):
|
class VerboseOptimization(object):
|
||||||
def __init__(self, model, opt, maxiters, verbose=True, current_iteration=0, ipython_notebook=False):
|
def __init__(self, model, opt, maxiters, verbose=True, current_iteration=0, ipython_notebook=False):
|
||||||
self.verbose = verbose
|
self.verbose = verbose or ipython_notebook
|
||||||
if self.verbose or ipython_notebook:
|
self.ipython_notebook = ipython_notebook
|
||||||
|
if self.verbose:
|
||||||
self.model = model
|
self.model = model
|
||||||
self.iteration = current_iteration
|
self.iteration = current_iteration
|
||||||
self.ipython_notebook = ipython_notebook
|
self.ipython_notebook = ipython_notebook
|
||||||
|
|
@ -130,12 +131,11 @@ class VerboseOptimization(object):
|
||||||
|
|
||||||
def __exit__(self, type, value, traceback):
|
def __exit__(self, type, value, traceback):
|
||||||
if self.verbose or self.ipython_notebook:
|
if self.verbose or self.ipython_notebook:
|
||||||
|
self.stop = time.time()
|
||||||
self.model.remove_observer(self)
|
self.model.remove_observer(self)
|
||||||
self.stop = time.time()
|
self.print_out()
|
||||||
|
|
||||||
self.print_out()
|
if not self.ipython_notebook:
|
||||||
|
print
|
||||||
if not self.ipython_notebook:
|
print 'Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)
|
||||||
print
|
print
|
||||||
print 'Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)
|
|
||||||
print
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue