Modifications to transformations ... not sure which tests to run to make sure I haven't messed things up. New code avoids exponentiating values greater than -log(eps) or less than log(eps). Also changed negative code to call the positive code (I think they should inherit the positive code ... but maybe not.

This commit is contained in:
Neil Lawrence 2013-08-16 18:28:16 +01:00
parent 9e0795afc4
commit 7aac769d37
5 changed files with 58 additions and 28 deletions

View file

@ -4,6 +4,8 @@
import numpy as np
from GPy.core.domains import POSITIVE, NEGATIVE, BOUNDED
import sys
lim_val = -np.log(sys.float_info.epsilon)
class transformation(object):
domain = None
@ -17,7 +19,7 @@ class transformation(object):
""" df_dx evaluated at self.f(x)=f"""
raise NotImplementedError
def initialize(self, f):
""" produce a sensible initial values for f(x)"""
""" produce a sensible initial value for f(x)"""
raise NotImplementedError
def __str__(self):
raise NotImplementedError
@ -25,13 +27,14 @@ class transformation(object):
class logexp(transformation):
domain = POSITIVE
def f(self, x):
return np.log(1. + np.exp(x))
return np.where(x>lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.log(np.exp(f) - 1.)
return np.where(f>lim_val, f, np.log(np.exp(f) - 1.))
def gradfactor(self, f):
ef = np.exp(f)
return (ef - 1.) / ef
return np.where(f>lim_val, 1., 1 - np.exp(-f))
def initialize(self, f):
if np.any(f < 0.):
print "Warning: changing parameters to satisfy constraints"
return np.abs(f)
def __str__(self):
return '(+ve)'
@ -39,18 +42,19 @@ class logexp(transformation):
class negative_logexp(transformation):
domain = NEGATIVE
def f(self, x):
return -np.log(1. + np.exp(x))
return -logexp.f(x) #np.log(1. + np.exp(x))
def finv(self, f):
return np.log(np.exp(-f) - 1.)
return logexp.finv(-f) #np.log(np.exp(-f) - 1.)
def gradfactor(self, f):
ef = np.exp(-f)
return -(ef - 1.) / ef
return -logexp.gradfactor(-f)
#ef = np.exp(-f)
#return -(ef - 1.) / ef
def initialize(self, f):
return -np.abs(f)
return -logexp.initialize(f) #np.abs(f)
def __str__(self):
return '(-ve)'
class logexp_clipped(transformation):
class logexp_clipped(logexp):
max_bound = 1e100
min_bound = 1e-10
log_max_bound = np.log(max_bound)
@ -78,9 +82,10 @@ class logexp_clipped(transformation):
return '(+ve_c)'
class exponent(transformation):
# TODO: can't allow this to go to zero, need to set a lower bound. Similar with negative exponent below. See old MATLAB code.
domain = POSITIVE
def f(self, x):
return np.exp(x)
return np.where(x<lim_val, np.where(x>-lim_val, np.exp(x), np.exp(-lim_val)), np.exp(lim_val))
def finv(self, x):
return np.log(x)
def gradfactor(self, f):
@ -92,18 +97,16 @@ class exponent(transformation):
def __str__(self):
return '(+ve)'
class negative_exponent(transformation):
class negative_exponent(exponent):
domain = NEGATIVE
def f(self, x):
return -np.exp(x)
def finv(self, x):
return np.log(-x)
return -exponent.f(x)
def finv(self, f):
return exponent.finv(-f)
def gradfactor(self, f):
return f
def initialize(self, f):
if np.any(f > 0.):
print "Warning: changing parameters to satisfy constraints"
return -np.abs(f)
return -exponent.initialize(f) #np.abs(f)
def __str__(self):
return '(-ve)'

View file

@ -51,6 +51,24 @@ def linear(input_dim,variances=None,ARD=False):
part = parts.linear.Linear(input_dim,variances,ARD)
return kern(input_dim, [part])
def mlp(input_dim,variance=1., weight_variance=None,bias_variance=100.,ARD=False):
"""
Construct an MLP kernel
:param input_dim: dimensionality of the kernel, obligatory
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
:param weight_scale: the lengthscale of the kernel
:type weight_scale: vector of weight variances for input weights in neural network (length 1 if kernel is isotropic)
:param bias_variance: the variance of the biases in the neural network.
:type bias_variance: float
:param ARD: Auto Relevance Determination (allows for ARD version of covariance)
:type ARD: Boolean
"""
part = parts.mlp.MLP(input_dim,variance,weight_variance,bias_variance,ARD)
return kern(input_dim, [part])
def white(input_dim,variance=1.):
"""
Construct a white kernel.
@ -253,6 +271,8 @@ def prod(k1,k2,tensor=False):
:param k1, k2: the kernels to multiply
:type k1, k2: kernpart
:param tensor: The kernels are either multiply as functions defined on the same input space (default) or on the product of the input spaces
:type tensor: Boolean
:rtype: kernel object
"""
part = parts.prod.Prod(k1, k2, tensor)
@ -260,13 +280,13 @@ def prod(k1,k2,tensor=False):
def symmetric(k):
"""
Construct a symmetrical kernel from an existing kernel
Construct a symmetric kernel from an existing kernel
"""
k_ = k.copy()
k_.parts = [symmetric.Symmetric(p) for p in k.parts]
return k_
def coregionalise(Nout,R=1, W=None, kappa=None):
def coregionalise(Nout, R=1, W=None, kappa=None):
p = parts.coregionalise.Coregionalise(Nout,R,W,kappa)
return kern(1,[p])
@ -291,11 +311,13 @@ def fixed(input_dim, K, variance=1.):
"""
Construct a Fixed effect kernel.
Arguments
---------
input_dim (int), obligatory
K (np.array), obligatory
variance (float)
:param input_dim: the number of input dimensions
:type input_dim: int (input_dim=1 is the only value currently supported)
:param K: the variance :math:`\sigma^2`
:type K: np.array
:param variance: kernel variance
:type variance: float
:rtype: kern object
"""
part = parts.fixed.Fixed(input_dim, K, variance)
return kern(input_dim, [part])
@ -318,7 +340,7 @@ def independent_outputs(k):
def hierarchical(k):
"""
Construct a kernel with independent outputs from an existing kernel
TODO THis can't be right! Construct a kernel with independent outputs from an existing kernel
"""
# for sl in k.input_slices:
# assert (sl.start is None) and (sl.stop is None), "cannot adjust input slices! (TODO)"

View file

@ -8,6 +8,7 @@ import independent_outputs
import linear
import Matern32
import Matern52
import mlp
import periodic_exponential
import periodic_Matern32
import periodic_Matern52

View file

@ -37,7 +37,6 @@ class GPRegression(GP):
def getstate(self):
return GP.getstate(self)
def setstate(self, state):
return GP.setstate(self, state)

View file

@ -59,6 +59,11 @@ class GradientTests(unittest.TestCase):
k = GPy.kern.rbf(2, ARD=True)
self.check_model_with_white(k, model_type='GPRegression', dimension=2)
def test_GPRegression_mlp_1d(self):
''' Testing the GP regression with mlp kernel with white kernel on 1d data '''
mlp = GPy.kern.mlp(1)
self.check_model_with_white(mlp, model_type='GPRegression', dimension=1)
def test_GPRegression_matern52_1D(self):
''' Testing the GP regression with matern52 kernel on 1d data '''
matern52 = GPy.kern.Matern52(1)