mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-06 02:24:17 +02:00
Modifications to transformations ... not sure which tests to run to make sure I haven't messed things up. New code avoids exponentiating values greater than -log(eps) or less than log(eps). Also changed negative code to call the positive code (I think they should inherit the positive code ... but maybe not.
This commit is contained in:
parent
9e0795afc4
commit
7aac769d37
5 changed files with 58 additions and 28 deletions
|
|
@ -4,6 +4,8 @@
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from GPy.core.domains import POSITIVE, NEGATIVE, BOUNDED
|
from GPy.core.domains import POSITIVE, NEGATIVE, BOUNDED
|
||||||
|
import sys
|
||||||
|
lim_val = -np.log(sys.float_info.epsilon)
|
||||||
|
|
||||||
class transformation(object):
|
class transformation(object):
|
||||||
domain = None
|
domain = None
|
||||||
|
|
@ -17,7 +19,7 @@ class transformation(object):
|
||||||
""" df_dx evaluated at self.f(x)=f"""
|
""" df_dx evaluated at self.f(x)=f"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
""" produce a sensible initial values for f(x)"""
|
""" produce a sensible initial value for f(x)"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
@ -25,13 +27,14 @@ class transformation(object):
|
||||||
class logexp(transformation):
|
class logexp(transformation):
|
||||||
domain = POSITIVE
|
domain = POSITIVE
|
||||||
def f(self, x):
|
def f(self, x):
|
||||||
return np.log(1. + np.exp(x))
|
return np.where(x>lim_val, x, np.log(1. + np.exp(x)))
|
||||||
def finv(self, f):
|
def finv(self, f):
|
||||||
return np.log(np.exp(f) - 1.)
|
return np.where(f>lim_val, f, np.log(np.exp(f) - 1.))
|
||||||
def gradfactor(self, f):
|
def gradfactor(self, f):
|
||||||
ef = np.exp(f)
|
return np.where(f>lim_val, 1., 1 - np.exp(-f))
|
||||||
return (ef - 1.) / ef
|
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
|
if np.any(f < 0.):
|
||||||
|
print "Warning: changing parameters to satisfy constraints"
|
||||||
return np.abs(f)
|
return np.abs(f)
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '(+ve)'
|
return '(+ve)'
|
||||||
|
|
@ -39,18 +42,19 @@ class logexp(transformation):
|
||||||
class negative_logexp(transformation):
|
class negative_logexp(transformation):
|
||||||
domain = NEGATIVE
|
domain = NEGATIVE
|
||||||
def f(self, x):
|
def f(self, x):
|
||||||
return -np.log(1. + np.exp(x))
|
return -logexp.f(x) #np.log(1. + np.exp(x))
|
||||||
def finv(self, f):
|
def finv(self, f):
|
||||||
return np.log(np.exp(-f) - 1.)
|
return logexp.finv(-f) #np.log(np.exp(-f) - 1.)
|
||||||
def gradfactor(self, f):
|
def gradfactor(self, f):
|
||||||
ef = np.exp(-f)
|
return -logexp.gradfactor(-f)
|
||||||
return -(ef - 1.) / ef
|
#ef = np.exp(-f)
|
||||||
|
#return -(ef - 1.) / ef
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
return -np.abs(f)
|
return -logexp.initialize(f) #np.abs(f)
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '(-ve)'
|
return '(-ve)'
|
||||||
|
|
||||||
class logexp_clipped(transformation):
|
class logexp_clipped(logexp):
|
||||||
max_bound = 1e100
|
max_bound = 1e100
|
||||||
min_bound = 1e-10
|
min_bound = 1e-10
|
||||||
log_max_bound = np.log(max_bound)
|
log_max_bound = np.log(max_bound)
|
||||||
|
|
@ -78,9 +82,10 @@ class logexp_clipped(transformation):
|
||||||
return '(+ve_c)'
|
return '(+ve_c)'
|
||||||
|
|
||||||
class exponent(transformation):
|
class exponent(transformation):
|
||||||
|
# TODO: can't allow this to go to zero, need to set a lower bound. Similar with negative exponent below. See old MATLAB code.
|
||||||
domain = POSITIVE
|
domain = POSITIVE
|
||||||
def f(self, x):
|
def f(self, x):
|
||||||
return np.exp(x)
|
return np.where(x<lim_val, np.where(x>-lim_val, np.exp(x), np.exp(-lim_val)), np.exp(lim_val))
|
||||||
def finv(self, x):
|
def finv(self, x):
|
||||||
return np.log(x)
|
return np.log(x)
|
||||||
def gradfactor(self, f):
|
def gradfactor(self, f):
|
||||||
|
|
@ -92,18 +97,16 @@ class exponent(transformation):
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '(+ve)'
|
return '(+ve)'
|
||||||
|
|
||||||
class negative_exponent(transformation):
|
class negative_exponent(exponent):
|
||||||
domain = NEGATIVE
|
domain = NEGATIVE
|
||||||
def f(self, x):
|
def f(self, x):
|
||||||
return -np.exp(x)
|
return -exponent.f(x)
|
||||||
def finv(self, x):
|
def finv(self, f):
|
||||||
return np.log(-x)
|
return exponent.finv(-f)
|
||||||
def gradfactor(self, f):
|
def gradfactor(self, f):
|
||||||
return f
|
return f
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
if np.any(f > 0.):
|
return -exponent.initialize(f) #np.abs(f)
|
||||||
print "Warning: changing parameters to satisfy constraints"
|
|
||||||
return -np.abs(f)
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '(-ve)'
|
return '(-ve)'
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -51,6 +51,24 @@ def linear(input_dim,variances=None,ARD=False):
|
||||||
part = parts.linear.Linear(input_dim,variances,ARD)
|
part = parts.linear.Linear(input_dim,variances,ARD)
|
||||||
return kern(input_dim, [part])
|
return kern(input_dim, [part])
|
||||||
|
|
||||||
|
def mlp(input_dim,variance=1., weight_variance=None,bias_variance=100.,ARD=False):
|
||||||
|
"""
|
||||||
|
Construct an MLP kernel
|
||||||
|
|
||||||
|
:param input_dim: dimensionality of the kernel, obligatory
|
||||||
|
:type input_dim: int
|
||||||
|
:param variance: the variance of the kernel
|
||||||
|
:type variance: float
|
||||||
|
:param weight_scale: the lengthscale of the kernel
|
||||||
|
:type weight_scale: vector of weight variances for input weights in neural network (length 1 if kernel is isotropic)
|
||||||
|
:param bias_variance: the variance of the biases in the neural network.
|
||||||
|
:type bias_variance: float
|
||||||
|
:param ARD: Auto Relevance Determination (allows for ARD version of covariance)
|
||||||
|
:type ARD: Boolean
|
||||||
|
"""
|
||||||
|
part = parts.mlp.MLP(input_dim,variance,weight_variance,bias_variance,ARD)
|
||||||
|
return kern(input_dim, [part])
|
||||||
|
|
||||||
def white(input_dim,variance=1.):
|
def white(input_dim,variance=1.):
|
||||||
"""
|
"""
|
||||||
Construct a white kernel.
|
Construct a white kernel.
|
||||||
|
|
@ -253,6 +271,8 @@ def prod(k1,k2,tensor=False):
|
||||||
|
|
||||||
:param k1, k2: the kernels to multiply
|
:param k1, k2: the kernels to multiply
|
||||||
:type k1, k2: kernpart
|
:type k1, k2: kernpart
|
||||||
|
:param tensor: The kernels are either multiply as functions defined on the same input space (default) or on the product of the input spaces
|
||||||
|
:type tensor: Boolean
|
||||||
:rtype: kernel object
|
:rtype: kernel object
|
||||||
"""
|
"""
|
||||||
part = parts.prod.Prod(k1, k2, tensor)
|
part = parts.prod.Prod(k1, k2, tensor)
|
||||||
|
|
@ -260,13 +280,13 @@ def prod(k1,k2,tensor=False):
|
||||||
|
|
||||||
def symmetric(k):
|
def symmetric(k):
|
||||||
"""
|
"""
|
||||||
Construct a symmetrical kernel from an existing kernel
|
Construct a symmetric kernel from an existing kernel
|
||||||
"""
|
"""
|
||||||
k_ = k.copy()
|
k_ = k.copy()
|
||||||
k_.parts = [symmetric.Symmetric(p) for p in k.parts]
|
k_.parts = [symmetric.Symmetric(p) for p in k.parts]
|
||||||
return k_
|
return k_
|
||||||
|
|
||||||
def coregionalise(Nout,R=1, W=None, kappa=None):
|
def coregionalise(Nout, R=1, W=None, kappa=None):
|
||||||
p = parts.coregionalise.Coregionalise(Nout,R,W,kappa)
|
p = parts.coregionalise.Coregionalise(Nout,R,W,kappa)
|
||||||
return kern(1,[p])
|
return kern(1,[p])
|
||||||
|
|
||||||
|
|
@ -291,11 +311,13 @@ def fixed(input_dim, K, variance=1.):
|
||||||
"""
|
"""
|
||||||
Construct a Fixed effect kernel.
|
Construct a Fixed effect kernel.
|
||||||
|
|
||||||
Arguments
|
:param input_dim: the number of input dimensions
|
||||||
---------
|
:type input_dim: int (input_dim=1 is the only value currently supported)
|
||||||
input_dim (int), obligatory
|
:param K: the variance :math:`\sigma^2`
|
||||||
K (np.array), obligatory
|
:type K: np.array
|
||||||
variance (float)
|
:param variance: kernel variance
|
||||||
|
:type variance: float
|
||||||
|
:rtype: kern object
|
||||||
"""
|
"""
|
||||||
part = parts.fixed.Fixed(input_dim, K, variance)
|
part = parts.fixed.Fixed(input_dim, K, variance)
|
||||||
return kern(input_dim, [part])
|
return kern(input_dim, [part])
|
||||||
|
|
@ -318,7 +340,7 @@ def independent_outputs(k):
|
||||||
|
|
||||||
def hierarchical(k):
|
def hierarchical(k):
|
||||||
"""
|
"""
|
||||||
Construct a kernel with independent outputs from an existing kernel
|
TODO THis can't be right! Construct a kernel with independent outputs from an existing kernel
|
||||||
"""
|
"""
|
||||||
# for sl in k.input_slices:
|
# for sl in k.input_slices:
|
||||||
# assert (sl.start is None) and (sl.stop is None), "cannot adjust input slices! (TODO)"
|
# assert (sl.start is None) and (sl.stop is None), "cannot adjust input slices! (TODO)"
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import independent_outputs
|
||||||
import linear
|
import linear
|
||||||
import Matern32
|
import Matern32
|
||||||
import Matern52
|
import Matern52
|
||||||
|
import mlp
|
||||||
import periodic_exponential
|
import periodic_exponential
|
||||||
import periodic_Matern32
|
import periodic_Matern32
|
||||||
import periodic_Matern52
|
import periodic_Matern52
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,6 @@ class GPRegression(GP):
|
||||||
def getstate(self):
|
def getstate(self):
|
||||||
return GP.getstate(self)
|
return GP.getstate(self)
|
||||||
|
|
||||||
|
|
||||||
def setstate(self, state):
|
def setstate(self, state):
|
||||||
return GP.setstate(self, state)
|
return GP.setstate(self, state)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -59,6 +59,11 @@ class GradientTests(unittest.TestCase):
|
||||||
k = GPy.kern.rbf(2, ARD=True)
|
k = GPy.kern.rbf(2, ARD=True)
|
||||||
self.check_model_with_white(k, model_type='GPRegression', dimension=2)
|
self.check_model_with_white(k, model_type='GPRegression', dimension=2)
|
||||||
|
|
||||||
|
def test_GPRegression_mlp_1d(self):
|
||||||
|
''' Testing the GP regression with mlp kernel with white kernel on 1d data '''
|
||||||
|
mlp = GPy.kern.mlp(1)
|
||||||
|
self.check_model_with_white(mlp, model_type='GPRegression', dimension=1)
|
||||||
|
|
||||||
def test_GPRegression_matern52_1D(self):
|
def test_GPRegression_matern52_1D(self):
|
||||||
''' Testing the GP regression with matern52 kernel on 1d data '''
|
''' Testing the GP regression with matern52 kernel on 1d data '''
|
||||||
matern52 = GPy.kern.Matern52(1)
|
matern52 = GPy.kern.Matern52(1)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue