add original bfgs optimizer and add rbf with inverse lengthscale

This commit is contained in:
Zhenwen Dai 2015-10-02 22:43:54 +01:00
parent 6b6ca60bb5
commit e6261c787c
2 changed files with 49 additions and 1 deletions

View file

@ -144,6 +144,33 @@ class opt_lbfgsb(Optimizer):
if opt_result[2]['warnflag']==2:
self.status = 'Error' + str(opt_result[2]['task'])
class opt_bfgs(Optimizer):
def __init__(self, *args, **kwargs):
Optimizer.__init__(self, *args, **kwargs)
self.opt_name = "BFGS (Scipy implementation)"
def opt(self, f_fp=None, f=None, fp=None):
"""
Run the optimizer
"""
rcstrings = ['','Maximum number of iterations exceeded', 'Gradient and/or function calls not changing']
opt_dict = {}
if self.xtol is not None:
print("WARNING: bfgs doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
print("WARNING: bfgs doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
opt_dict['pgtol'] = self.gtol
opt_result = optimize.fmin_bfgs(f, self.x_init, fp, disp=self.messages,
maxiter=self.max_iters, full_output=True, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[4]
self.status = rcstrings[opt_result[6]]
class opt_simplex(Optimizer):
def __init__(self, *args, **kwargs):
Optimizer.__init__(self, *args, **kwargs)
@ -255,6 +282,7 @@ def get_optimizer(f_min):
optimizers = {'fmin_tnc': opt_tnc,
'simplex': opt_simplex,
'lbfgsb': opt_lbfgsb,
'org-bfgs': opt_bfgs,
'scg': opt_SCG,
'adadelta':Opt_Adadelta}

View file

@ -7,6 +7,8 @@ from .stationary import Stationary
from .psi_comp import PSICOMP_RBF
from .psi_comp.rbf_psi_gpucomp import PSICOMP_RBF_GPU
from ...util.config import *
from ...core import Param
from GPy.core.parameterization.transformations import Logexp
class RBF(Stationary):
"""
@ -18,12 +20,17 @@ class RBF(Stationary):
"""
_support_GPU = True
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False):
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False, inv_l=False):
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
if self.useGPU:
self.psicomp = PSICOMP_RBF_GPU()
else:
self.psicomp = PSICOMP_RBF()
self.use_invLengthscale = inv_l
if inv_l:
self.unlink_parameter(self.lengthscale)
self.inv_l = Param('inv_lengthscale',1./self.lengthscale**2, Logexp())
self.link_parameter(self.inv_l)
def K_of_r(self, r):
return self.variance * np.exp(-0.5 * r**2)
@ -48,6 +55,10 @@ class RBF(Stationary):
assert self.input_dim == 1 #TODO: higher dim spectra?
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
def parameters_changed(self):
if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200)
super(RBF,self).parameters_changed()
#---------------------------------------#
# PSI statistics #
#---------------------------------------#
@ -68,6 +79,8 @@ class RBF(Stationary):
dL_dvar, dL_dlengscale = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[:2]
self.variance.gradient = dL_dvar
self.lengthscale.gradient = dL_dlengscale
if self.use_invLengthscale:
self.inv_l.gradient = dL_dlengscale*(self.lengthscale**3/-2.)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2]
@ -75,3 +88,10 @@ class RBF(Stationary):
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]
def update_gradients_diag(self, dL_dKdiag, X):
super(RBF,self).update_gradients_diag(dL_dKdiag, X)
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
def update_gradients_full(self, dL_dK, X, X2=None):
super(RBF,self).update_gradients_full(dL_dK, X, X2)
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)