mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-03 00:32:39 +02:00
fixed interface change in optimization.py
This commit is contained in:
parent
11dacb378a
commit
b4190f907e
2 changed files with 5 additions and 5 deletions
|
|
@ -170,12 +170,12 @@ class opt_rasm(Optimizer):
|
||||||
Optimizer.__init__(self, *args, **kwargs)
|
Optimizer.__init__(self, *args, **kwargs)
|
||||||
self.opt_name = "Rasmussen's Conjugate Gradient"
|
self.opt_name = "Rasmussen's Conjugate Gradient"
|
||||||
|
|
||||||
def opt(self):
|
def opt(self, f_fp = None, f = None, fp = None):
|
||||||
"""
|
"""
|
||||||
Run Rasmussen's Conjugate Gradient optimizer
|
Run Rasmussen's Conjugate Gradient optimizer
|
||||||
"""
|
"""
|
||||||
|
|
||||||
assert self.f_fp != None, "Rasmussen's minimizer requires f_fp"
|
assert f_fp != None, "Rasmussen's minimizer requires f_fp"
|
||||||
statuses = ['Converged', 'Line search failed', 'Maximum number of f evaluations reached',
|
statuses = ['Converged', 'Line search failed', 'Maximum number of f evaluations reached',
|
||||||
'NaNs in optimization']
|
'NaNs in optimization']
|
||||||
|
|
||||||
|
|
@ -187,7 +187,7 @@ class opt_rasm(Optimizer):
|
||||||
if self.gtol is not None:
|
if self.gtol is not None:
|
||||||
print "WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it"
|
print "WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it"
|
||||||
|
|
||||||
opt_result = rasm.minimize(self.x_init, self.f_fp, (), messages = self.messages,
|
opt_result = rasm.minimize(self.x_init, f_fp, (), messages = self.messages,
|
||||||
maxnumfuneval = self.max_f_eval)
|
maxnumfuneval = self.max_f_eval)
|
||||||
self.x_opt = opt_result[0]
|
self.x_opt = opt_result[0]
|
||||||
self.f_opt = opt_result[1][-1]
|
self.f_opt = opt_result[1][-1]
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ class warpedGP(GP_regression):
|
||||||
if warping_function == None:
|
if warping_function == None:
|
||||||
self.warping_function = TanhWarpingFunction(warping_terms)
|
self.warping_function = TanhWarpingFunction(warping_terms)
|
||||||
# self.warping_params = np.random.randn(self.warping_function.n_terms, 3)
|
# self.warping_params = np.random.randn(self.warping_function.n_terms, 3)
|
||||||
self.warping_params = np.ones((self.warping_function.n_terms, 3))*1.0 # TODO better init
|
self.warping_params = np.ones((self.warping_function.n_terms, 3))*0.0 # TODO better init
|
||||||
self.warp_params_shape = (self.warping_function.n_terms, 3) # todo get this from the subclass
|
self.warp_params_shape = (self.warping_function.n_terms, 3) # todo get this from the subclass
|
||||||
|
|
||||||
self.Z = Y.copy()
|
self.Z = Y.copy()
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue