mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-03 08:42:39 +02:00
added log jacobian transofms for Exponent, Logexp
This commit is contained in:
parent
3341b3c56a
commit
01b501e078
2 changed files with 62 additions and 30 deletions
|
|
@ -430,23 +430,37 @@ class Indexable(Nameable, Updateable):
|
|||
|
||||
def log_prior(self):
|
||||
"""evaluate the prior"""
|
||||
if self.priors.size > 0:
|
||||
x = self.param_array
|
||||
#py3 fix
|
||||
#return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()), 0)
|
||||
return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.items()), 0)
|
||||
return 0.
|
||||
if self.priors.size == 0:
|
||||
return 0.
|
||||
x = self.param_array
|
||||
#evaluate the prior log densities
|
||||
log_p = reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.items()), 0)
|
||||
|
||||
#account for the transformation by evaluating the log Jacobian (where things are transformed)
|
||||
log_j = 0.
|
||||
priored_indexes = np.hstack([i for p, i in self.priors.items()])
|
||||
for c,j in self.constraints.items():
|
||||
for jj in j:
|
||||
if jj in priored_indexes:
|
||||
log_j += c.log_jacobian(x[jj])
|
||||
return log_p + log_j
|
||||
|
||||
|
||||
def _log_prior_gradients(self):
|
||||
"""evaluate the gradients of the priors"""
|
||||
if self.priors.size > 0:
|
||||
x = self.param_array
|
||||
ret = np.zeros(x.size)
|
||||
#py3 fix
|
||||
#[np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()]
|
||||
[np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()]
|
||||
return ret
|
||||
return 0.
|
||||
if self.priors.size == 0:
|
||||
return 0.
|
||||
x = self.param_array
|
||||
ret = np.zeros(x.size)
|
||||
#compute derivate of prior density
|
||||
[np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()]
|
||||
#add in jacobian derivatives if transformed
|
||||
priored_indexes = np.hstack([i for p, i in self.priors.items()])
|
||||
for c,j in self.constraints.items():
|
||||
for jj in j:
|
||||
if jj in priored_indexes:
|
||||
ret[jj] += c.log_jacobian_grad(x[jj])
|
||||
return ret
|
||||
|
||||
#===========================================================================
|
||||
# Tie parameters together
|
||||
|
|
|
|||
|
|
@ -31,6 +31,16 @@ class Transformation(object):
|
|||
raise NotImplementedError
|
||||
def finv(self, model_param):
|
||||
raise NotImplementedError
|
||||
def log_jacobian(self, model_param):
|
||||
"""
|
||||
compute the log of the jacobian of f, evaluated at f(x)= model_param
|
||||
"""
|
||||
raise NotImplementedError
|
||||
def log_jacobian_grad(self, model_param):
|
||||
"""
|
||||
compute the drivative of the log of the jacobian of f, evaluated at f(x)= model_param
|
||||
"""
|
||||
raise NotImplementedError
|
||||
def gradfactor(self, model_param, dL_dmodel_param):
|
||||
""" df(opt_param)_dopt_param evaluated at self.f(opt_param)=model_param, times the gradient dL_dmodel_param,
|
||||
|
||||
|
|
@ -74,9 +84,33 @@ class Logexp(Transformation):
|
|||
if np.any(f < 0.):
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
return np.abs(f)
|
||||
def log_jacobian(self, model_param):
|
||||
return np.where(model_param>_lim_val, model_param, np.log(np.exp(model_param+1e-20) - 1.)) - model_param
|
||||
def log_jacobian_grad(self, model_param):
|
||||
return 1./(np.exp(model_param)-1.)
|
||||
def __str__(self):
|
||||
return '+ve'
|
||||
|
||||
class Exponent(Transformation):
|
||||
domain = _POSITIVE
|
||||
def f(self, x):
|
||||
return np.where(x<_lim_val, np.where(x>-_lim_val, np.exp(x), np.exp(-_lim_val)), np.exp(_lim_val))
|
||||
def finv(self, x):
|
||||
return np.log(x)
|
||||
def gradfactor(self, f, df):
|
||||
return np.einsum('i,i->i', df, f)
|
||||
def initialize(self, f):
|
||||
if np.any(f < 0.):
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
return np.abs(f)
|
||||
def log_jacobian(self, model_param):
|
||||
return np.log(model_param)
|
||||
def log_jacobian_grad(self, model_param):
|
||||
return 1./model_param
|
||||
def __str__(self):
|
||||
return '+ve'
|
||||
|
||||
|
||||
|
||||
class NormalTheta(Transformation):
|
||||
"Do not use, not officially supported!"
|
||||
|
|
@ -417,22 +451,6 @@ class LogexpClipped(Logexp):
|
|||
def __str__(self):
|
||||
return '+ve_c'
|
||||
|
||||
class Exponent(Transformation):
|
||||
# TODO: can't allow this to go to zero, need to set a lower bound. Similar with negative Exponent below. See old MATLAB code.
|
||||
domain = _POSITIVE
|
||||
def f(self, x):
|
||||
return np.where(x<_lim_val, np.where(x>-_lim_val, np.exp(x), np.exp(-_lim_val)), np.exp(_lim_val))
|
||||
def finv(self, x):
|
||||
return np.log(x)
|
||||
def gradfactor(self, f, df):
|
||||
return np.einsum('i,i->i', df, f)
|
||||
def initialize(self, f):
|
||||
if np.any(f < 0.):
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
return np.abs(f)
|
||||
def __str__(self):
|
||||
return '+ve'
|
||||
|
||||
class NegativeExponent(Exponent):
|
||||
domain = _NEGATIVE
|
||||
def f(self, x):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue