merge conflict in transformations

This commit is contained in:
James Hensman 2013-05-10 16:42:23 +01:00
commit 0efde820bd
16 changed files with 655 additions and 279 deletions

View file

@ -6,18 +6,18 @@ import numpy as np
class transformation(object):
def __init__(self):
#set the domain. Suggest we use 'positive', 'bounded', etc
# set the domain. Suggest we use 'positive', 'bounded', etc
self.domain = 'undefined'
def f(self, x):
raise NotImplementedError
def finv(self,x):
def finv(self, x):
raise NotImplementedError
def gradfactor(self,f):
def gradfactor(self, f):
""" df_dx evaluated at self.f(x)=f"""
raise NotImplementedError
def initialize(self,f):
def initialize(self, f):
""" produce a sensible initial values for f(x)"""
raise NotImplementedError
def __str__(self):
@ -25,14 +25,30 @@ class transformation(object):
class logexp(transformation):
def __init__(self):
self.domain= 'positive'
def f(self,x):
self.domain = 'positive'
def f(self, x):
return np.log(1. + np.exp(x))
def finv(self,f):
def finv(self, f):
return np.log(np.exp(f) - 1.)
def gradfactor(self,f):
def gradfactor(self, f):
ef = np.exp(f)
return (ef - 1.)/ef
return (ef - 1.) / ef
def initialize(self, f):
return np.abs(f)
def __str__(self):
return '(+ve)'
class logexp_clipped(transformation):
def __init__(self):
self.domain = 'positive'
def f(self, x):
f = np.log(1. + np.exp(x))
return f
def finv(self, f):
return np.log(np.exp(f) - 1.)
def gradfactor(self, f):
ef = np.exp(f)
return np.where(f < 1e-6, 0, gf)
def initialize(self,f):
if np.any(f<0.):
print "Warning: changing parameters to satisfy constraints"
@ -42,14 +58,14 @@ class logexp(transformation):
class exponent(transformation):
def __init__(self):
self.domain= 'positive'
def f(self,x):
self.domain = 'positive'
def f(self, x):
return np.exp(x)
def finv(self,x):
def finv(self, x):
return np.log(x)
def gradfactor(self,f):
def gradfactor(self, f):
return f
def initialize(self,f):
def initialize(self, f):
if np.any(f<0.):
print "Warning: changing parameters to satisfy constraints"
return np.abs(f)
@ -58,36 +74,50 @@ class exponent(transformation):
class negative_exponent(transformation):
def __init__(self):
self.domain= 'negative'
def f(self,x):
self.domain = 'negative'
def f(self, x):
return -np.exp(x)
def finv(self,x):
def finv(self, x):
return np.log(-x)
def gradfactor(self,f):
def gradfactor(self, f):
return f
def initialize(self,f):
def initialize(self, f):
if np.any(f>0.):
print "Warning: changing parameters to satisfy constraints"
return -np.abs(f)
def __str__(self):
return '(-ve)'
class square(transformation):
def __init__(self):
self.domain = 'positive'
def f(self, x):
return x ** 2
def finv(self, x):
return np.sqrt(x)
def gradfactor(self, f):
return 2 * np.sqrt(f)
def initialize(self, f):
return np.abs(f)
def __str__(self):
return '(+sq)'
class logistic(transformation):
def __init__(self,lower,upper):
self.domain= 'bounded'
def __init__(self, lower, upper):
self.domain = 'bounded'
assert lower < upper
self.lower, self.upper = float(lower), float(upper)
self.difference = self.upper - self.lower
def f(self,x):
return self.lower + self.difference/(1.+np.exp(-x))
def finv(self,f):
def f(self, x):
return self.lower + self.difference / (1. + np.exp(-x))
def finv(self, f):
return np.log(np.clip(f - self.lower, 1e-10, np.inf) / np.clip(self.upper - f, 1e-10, np.inf))
def gradfactor(self,f):
def gradfactor(self, f):
return (f-self.lower)*(self.upper-f)/self.difference
def initialize(self,f):
if np.any(np.logical_or(f<self.lower,f>self.upper)):
print "Warning: changing parameters to satisfy constraints"
return np.where(np.logical_or(f<self.lower,f>self.upper),self.f(f*0.),f)
def __str__(self):
return '({},{})'.format(self.lower,self.upper)
return '({},{})'.format(self.lower, self.upper)