removed materns

This commit is contained in:
James Hensman 2014-02-21 17:06:06 +00:00
parent 365bc42140
commit 78ceef01c3
7 changed files with 246 additions and 430 deletions

View file

@ -3,6 +3,7 @@ from _src.white import White
from _src.kern import Kern from _src.kern import Kern
from _src.linear import Linear from _src.linear import Linear
from _src.brownian import Brownian from _src.brownian import Brownian
from _src.stationary import Exponential, Matern32, Matern52, ExpQuad
#from _src.bias import Bias #from _src.bias import Bias
#import coregionalize #import coregionalize
#import exponential #import exponential

View file

@ -1,139 +0,0 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kernpart import Kernpart
import numpy as np
from scipy import integrate
class Matern32(Kernpart):
"""
Matern 3/2 kernel:
.. math::
k(r) = \\sigma^2 (1 + \\sqrt{3} r) \exp(- \sqrt{3} r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} }
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance :math:`\sigma^2`
:type variance: float
:param lengthscale: the vector of lengthscale :math:`\ell_i`
:type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter)
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension.
:type ARD: Boolean
:rtype: kernel object
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False):
self.input_dim = input_dim
self.ARD = ARD
if ARD == False:
self.num_params = 2
self.name = 'Mat32'
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel"
else:
lengthscale = np.ones(1)
else:
self.num_params = self.input_dim + 1
self.name = 'Mat32'
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == self.input_dim, "bad number of lengthscales"
else:
lengthscale = np.ones(self.input_dim)
self._set_params(np.hstack((variance, lengthscale.flatten())))
def _get_params(self):
"""return the value of the parameters."""
return np.hstack((self.variance, self.lengthscale))
def _set_params(self, x):
"""set the value of the parameters."""
assert x.size == self.num_params
self.variance = x[0]
self.lengthscale = x[1:]
def _get_param_names(self):
"""return parameter names."""
if self.num_params == 2:
return ['variance', 'lengthscale']
else:
return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)]
def K(self, X, X2, target):
"""Compute the covariance matrix between X and X2."""
if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))
np.add(self.variance * (1 + np.sqrt(3.) * dist) * np.exp(-np.sqrt(3.) * dist), target, target)
def Kdiag(self, X, target):
"""Compute the diagonal of the covariance matrix associated to X."""
np.add(target, self.variance, target)
def _param_grad_helper(self, dL_dK, X, X2, target):
"""derivative of the covariance matrix with respect to the parameters."""
if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))
dvar = (1 + np.sqrt(3.) * dist) * np.exp(-np.sqrt(3.) * dist)
invdist = 1. / np.where(dist != 0., dist, np.inf)
dist2M = np.square(X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 3
# dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis]
target[0] += np.sum(dvar * dL_dK)
if self.ARD == True:
dl = (self.variance * 3 * dist * np.exp(-np.sqrt(3.) * dist))[:, :, np.newaxis] * dist2M * invdist[:, :, np.newaxis]
# dl = self.variance*dvar[:,:,None]*dist2M*invdist[:,:,None]
target[1:] += (dl * dL_dK[:, :, None]).sum(0).sum(0)
else:
dl = (self.variance * 3 * dist * np.exp(-np.sqrt(3.) * dist)) * dist2M.sum(-1) * invdist
# dl = self.variance*dvar*dist2M.sum(-1)*invdist
target[1] += np.sum(dl * dL_dK)
def dKdiag_dtheta(self, dL_dKdiag, X, target):
"""derivative of the diagonal of the covariance matrix with respect to the parameters."""
target[0] += np.sum(dL_dKdiag)
def gradients_X(self, dL_dK, X, X2, target):
"""derivative of the covariance matrix with respect to X."""
if X2 is None:
dist = np.sqrt(np.sum(np.square((X[:, None, :] - X[None, :, :]) / self.lengthscale), -1))[:, :, None]
ddist_dX = 2*(X[:, None, :] - X[None, :, :]) / self.lengthscale ** 2 / np.where(dist != 0., dist, np.inf)
else:
dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))[:, :, None]
ddist_dX = (X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 2 / np.where(dist != 0., dist, np.inf)
gradients_X = -np.transpose(3 * self.variance * dist * np.exp(-np.sqrt(3) * dist) * ddist_dX, (1, 0, 2))
target += np.sum(gradients_X * dL_dK.T[:, :, None], 0)
def dKdiag_dX(self, dL_dKdiag, X, target):
pass
def Gram_matrix(self, F, F1, F2, lower, upper):
"""
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1.
:param F: vector of functions
:type F: np.array
:param F1: vector of derivatives of F
:type F1: np.array
:param F2: vector of second derivatives of F
:type F2: np.array
:param lower,upper: boundaries of the input domain
:type lower,upper: floats
"""
assert self.input_dim == 1
def L(x, i):
return(3. / self.lengthscale ** 2 * F[i](x) + 2 * np.sqrt(3) / self.lengthscale * F1[i](x) + F2[i](x))
n = F.shape[0]
G = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0]
Flower = np.array([f(lower) for f in F])[:, None]
F1lower = np.array([f(lower) for f in F1])[:, None]
# print "OLD \n", np.dot(F1lower,F1lower.T), "\n \n"
# return(G)
return(self.lengthscale ** 3 / (12.*np.sqrt(3) * self.variance) * G + 1. / self.variance * np.dot(Flower, Flower.T) + self.lengthscale ** 2 / (3.*self.variance) * np.dot(F1lower, F1lower.T))

View file

@ -1,145 +0,0 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kernpart import Kernpart
import numpy as np
import hashlib
from scipy import integrate
class Matern52(Kernpart):
"""
Matern 5/2 kernel:
.. math::
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} }
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance :math:`\sigma^2`
:type variance: float
:param lengthscale: the vector of lengthscale :math:`\ell_i`
:type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter)
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension.
:type ARD: Boolean
:rtype: kernel object
"""
def __init__(self,input_dim,variance=1.,lengthscale=None,ARD=False):
self.input_dim = input_dim
self.ARD = ARD
if ARD == False:
self.num_params = 2
self.name = 'Mat52'
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel"
else:
lengthscale = np.ones(1)
else:
self.num_params = self.input_dim + 1
self.name = 'Mat52'
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == self.input_dim, "bad number of lengthscales"
else:
lengthscale = np.ones(self.input_dim)
self._set_params(np.hstack((variance,lengthscale.flatten())))
def _get_params(self):
"""return the value of the parameters."""
return np.hstack((self.variance,self.lengthscale))
def _set_params(self,x):
"""set the value of the parameters."""
assert x.size == self.num_params
self.variance = x[0]
self.lengthscale = x[1:]
def _get_param_names(self):
"""return parameter names."""
if self.num_params == 2:
return ['variance','lengthscale']
else:
return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscale.size)]
def K(self,X,X2,target):
"""Compute the covariance matrix between X and X2."""
if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))
np.add(self.variance*(1+np.sqrt(5.)*dist+5./3*dist**2)*np.exp(-np.sqrt(5.)*dist), target,target)
def Kdiag(self,X,target):
"""Compute the diagonal of the covariance matrix associated to X."""
np.add(target,self.variance,target)
def _param_grad_helper(self,dL_dK,X,X2,target):
"""derivative of the covariance matrix with respect to the parameters."""
if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))
invdist = 1./np.where(dist!=0.,dist,np.inf)
dist2M = np.square(X[:,None,:]-X2[None,:,:])/self.lengthscale**3
dvar = (1+np.sqrt(5.)*dist+5./3*dist**2)*np.exp(-np.sqrt(5.)*dist)
dl = (self.variance * 5./3 * dist * (1 + np.sqrt(5.)*dist ) * np.exp(-np.sqrt(5.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis]
target[0] += np.sum(dvar*dL_dK)
if self.ARD:
dl = (self.variance * 5./3 * dist * (1 + np.sqrt(5.)*dist ) * np.exp(-np.sqrt(5.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis]
#dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist))[:,:,np.newaxis] * dist2M*invdist[:,:,np.newaxis]
target[1:] += (dl*dL_dK[:,:,None]).sum(0).sum(0)
else:
dl = (self.variance * 5./3 * dist * (1 + np.sqrt(5.)*dist ) * np.exp(-np.sqrt(5.)*dist)) * dist2M.sum(-1)*invdist
#dl = (self.variance* 3 * dist * np.exp(-np.sqrt(3.)*dist)) * dist2M.sum(-1)*invdist
target[1] += np.sum(dl*dL_dK)
def dKdiag_dtheta(self,dL_dKdiag,X,target):
"""derivative of the diagonal of the covariance matrix with respect to the parameters."""
target[0] += np.sum(dL_dKdiag)
def gradients_X(self,dL_dK,X,X2,target):
"""derivative of the covariance matrix with respect to X."""
if X2 is None:
dist = np.sqrt(np.sum(np.square((X[:,None,:]-X[None,:,:])/self.lengthscale),-1))[:,:,None]
ddist_dX = 2*(X[:,None,:]-X[None,:,:])/self.lengthscale**2/np.where(dist!=0.,dist,np.inf)
else:
dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))[:,:,None]
ddist_dX = (X[:,None,:]-X2[None,:,:])/self.lengthscale**2/np.where(dist!=0.,dist,np.inf)
gradients_X = - np.transpose(self.variance*5./3*dist*(1+np.sqrt(5)*dist)*np.exp(-np.sqrt(5)*dist)*ddist_dX,(1,0,2))
target += np.sum(gradients_X*dL_dK.T[:,:,None],0)
def dKdiag_dX(self,dL_dKdiag,X,target):
pass
def Gram_matrix(self,F,F1,F2,F3,lower,upper):
"""
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1.
:param F: vector of functions
:type F: np.array
:param F1: vector of derivatives of F
:type F1: np.array
:param F2: vector of second derivatives of F
:type F2: np.array
:param F3: vector of third derivatives of F
:type F3: np.array
:param lower,upper: boundaries of the input domain
:type lower,upper: floats
"""
assert self.input_dim == 1
def L(x,i):
return(5*np.sqrt(5)/self.lengthscale**3*F[i](x) + 15./self.lengthscale**2*F1[i](x)+ 3*np.sqrt(5)/self.lengthscale*F2[i](x) + F3[i](x))
n = F.shape[0]
G = np.zeros((n,n))
for i in range(n):
for j in range(i,n):
G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0]
G_coef = 3.*self.lengthscale**5/(400*np.sqrt(5))
Flower = np.array([f(lower) for f in F])[:,None]
F1lower = np.array([f(lower) for f in F1])[:,None]
F2lower = np.array([f(lower) for f in F2])[:,None]
orig = 9./8*np.dot(Flower,Flower.T) + 9.*self.lengthscale**4/200*np.dot(F2lower,F2lower.T)
orig2 = 3./5*self.lengthscale**2 * ( np.dot(F1lower,F1lower.T) + 1./8*np.dot(Flower,F2lower.T) + 1./8*np.dot(F2lower,Flower.T))
return(1./self.variance* (G_coef*G + orig + orig2))

View file

@ -1,129 +0,0 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kernpart import Kernpart
import numpy as np
from scipy import integrate
class Exponential(Kernpart):
"""
Exponential kernel (aka Ornstein-Uhlenbeck or Matern 1/2)
.. math::
k(r) = \sigma^2 \exp(- r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} }
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance :math:`\sigma^2`
:type variance: float
:param lengthscale: the vector of lengthscale :math:`\ell_i`
:type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter)
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension.
:type ARD: Boolean
:param name: the name of the kernel
:rtype: kernel object
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='exp'):
self.input_dim = input_dim
self.ARD = ARD
self.variance = variance
self.name = name
if ARD == False:
self.num_params = 2
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel"
else:
lengthscale = np.ones(1)
else:
self.num_params = self.input_dim + 1
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == self.input_dim, "bad number of lengthscales"
else:
lengthscale = np.ones(self.input_dim)
#self._set_params(np.hstack((variance, lengthscale.flatten())))
self.set_as_parameter('variance', 'lengthscale')
# def _get_params(self):
# """return the value of the parameters."""
# return np.hstack((self.variance, self.lengthscale))
#
# def _set_params(self, x):
# """set the value of the parameters."""
# assert x.size == self.num_params
# self.variance = x[0]
# self.lengthscale = x[1:]
#
# def _get_param_names(self):
# """return parameter names."""
# if self.num_params == 2:
# return ['variance', 'lengthscale']
# else:
# return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)]
def K(self, X, X2, target):
"""Compute the covariance matrix between X and X2."""
if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))
np.add(self.variance * np.exp(-dist), target, target)
def Kdiag(self, X, target):
"""Compute the diagonal of the covariance matrix associated to X."""
np.add(target, self.variance, target)
def _param_grad_helper(self, dL_dK, X, X2, target):
"""derivative of the covariance matrix with respect to the parameters."""
if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))
invdist = 1. / np.where(dist != 0., dist, np.inf)
dist2M = np.square(X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 3
dvar = np.exp(-dist)
target[0] += np.sum(dvar * dL_dK)
if self.ARD == True:
dl = self.variance * dvar[:, :, None] * dist2M * invdist[:, :, None]
target[1:] += (dl * dL_dK[:, :, None]).sum(0).sum(0)
else:
dl = self.variance * dvar * dist2M.sum(-1) * invdist
target[1] += np.sum(dl * dL_dK)
def dKdiag_dtheta(self, dL_dKdiag, X, target):
"""derivative of the diagonal of the covariance matrix with respect to the parameters."""
# NB: derivative of diagonal elements wrt lengthscale is 0
target[0] += np.sum(dL_dKdiag)
def gradients_X(self, dL_dK, X, X2, target):
"""derivative of the covariance matrix with respect to X."""
if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))[:, :, None]
ddist_dX = (X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 2 / np.where(dist != 0., dist, np.inf)
gradients_X = -np.transpose(self.variance * np.exp(-dist) * ddist_dX, (1, 0, 2))
target += np.sum(gradients_X * dL_dK.T[:, :, None], 0)
def dKdiag_dX(self, dL_dKdiag, X, target):
pass
def Gram_matrix(self, F, F1, lower, upper):
"""
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1.
:param F: vector of functions
:type F: np.array
:param F1: vector of derivatives of F
:type F1: np.array
:param lower,upper: boundaries of the input domain
:type lower,upper: floats
"""
assert self.input_dim == 1
def L(x, i):
return(1. / self.lengthscale * F[i](x) + F1[i](x))
n = F.shape[0]
G = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0]
Flower = np.array([f(lower) for f in F])[:, None]
return(self.lengthscale / 2. / self.variance * G + 1. / self.variance * np.dot(Flower, Flower.T))

221
GPy/kern/_src/stationary.py Normal file
View file

@ -0,0 +1,221 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from ... import util
import numpy as np
from scipy import integrate
class Stationary(Kern):
def __init__(self, input_dim, variance, lengthscale, ARD, name):
super(Stationary, self).__init__(input_dim, name)
self.ARD = ARD
if not ARD:
if lengthscale is None:
lengthscale = np.ones(1)
else:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == 1 "Only lengthscale needed for non-ARD kernel"
else:
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size in [1, input_dim], "Bad lengthscales"
if lengthscale.size != input_dim:
lengthscale = np.ones(input_dim)*lengthscale
else:
lengthscale = np.ones(self.input_dim)
self.lengthscale = Param('lengthscale', lengthscale, Logexp())
self.variance = Param('variance', variance, Logexp())
assert self.variance.size==1
self.add_parameters(self.variance, self.lengthscale)
def _dist(self, X, X2):
if X2 is None:
X2 = X
return X[:, None, :] - X2[None, :, :]
def _scaled_dist(self, X, X2=None):
return np.sqrt(np.sum(np.square(self._dist(X, X2) / self.lengthscale), -1))
def Kdiag(self, X):
ret = np.empty(X.shape[0])
ret[:] = self.variance
return ret
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.sum(dL_dKdiag)
self.lengthscale.gradient = 0.
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def update_gradients_full(self, dL_dK, X, X2=None):
K = self.K(X, X2)
self.variance.gradient = np.sum(K * dL_dK)/self.variance
rinv = self._inv_dist(X, X2)
dL_dr = self.dK_dr(X, X2) * dL_dK
x_xl3 = np.square(self._dist(X, X2)) / self.lengthscale**3
if self.ARD:
self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum(0).sum(0)
else:
self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum()
def _inv_dist(self, X, X2=None):
dist = self._scaled_dist(X, X2)
if X2 is None:
nondiag = util.diag.offdiag_view(dist)
nondiag[:] = 1./nondiag
return dist
else:
return 1./np.where(dist != 0., dist, np.inf)
def gradients_X(self, dL_dK, X, X2=None):
dL_dr = self.dK_dr(X, X2) * dL_dK
invdist = self._inv_dist(X, X2)
ret = np.sum((invdist*dL_dr)[:,:,None]*self._dist(X, X2),1)/self.lengthscale**2
if X2 is None:
ret *= 2.
return ret
class Exponential(Stationary):
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='Exponential'):
super(Exponential, self).__init__(input_dim, variance, lengthscale, ARD, name)
def K(self, X, X2=None):
dist = self._scaled_dist(X, X2)
return self.variance * np.exp(-0.5 * dist)
def dK_dr(self, X, X2):
return -0.5*self.K(X, X2)
class Matern32(Stationary):
"""
Matern 3/2 kernel:
.. math::
k(r) = \\sigma^2 (1 + \\sqrt{3} r) \exp(- \sqrt{3} r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} }
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance :math:`\sigma^2`
:type variance: float
:param lengthscale: the vector of lengthscale :math:`\ell_i`
:type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter)
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension.
:type ARD: Boolean
:rtype: kernel object
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='Mat32'):
super(Matern32, self).__init__(input_dim, variance, lengthscale, ARD, name)
def K(self, X, X2=None):
dist = self._scaled_dist(X, X2)
return self.variance * (1. + np.sqrt(3.) * dist) * np.exp(-np.sqrt(3.) * dist)
def dK_dr(self, X, X2):
dist = self._scaled_dist(X, X2)
return -3.*self.variance*dist*np.exp(-np.sqrt(3.)*dist)
def Gram_matrix(self, F, F1, F2, lower, upper):
"""
Return the Gram matrix of the vector of functions F with respect to the
RKHS norm. The use of this function is limited to input_dim=1.
:param F: vector of functions
:type F: np.array
:param F1: vector of derivatives of F
:type F1: np.array
:param F2: vector of second derivatives of F
:type F2: np.array
:param lower,upper: boundaries of the input domain
:type lower,upper: floats
"""
assert self.input_dim == 1
def L(x, i):
return(3. / self.lengthscale ** 2 * F[i](x) + 2 * np.sqrt(3) / self.lengthscale * F1[i](x) + F2[i](x))
n = F.shape[0]
G = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0]
Flower = np.array([f(lower) for f in F])[:, None]
F1lower = np.array([f(lower) for f in F1])[:, None]
return(self.lengthscale ** 3 / (12.*np.sqrt(3) * self.variance) * G + 1. / self.variance * np.dot(Flower, Flower.T) + self.lengthscale ** 2 / (3.*self.variance) * np.dot(F1lower, F1lower.T))
class Matern52(Stationary):
"""
Matern 5/2 kernel:
.. math::
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r) \ \ \ \ \ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} }
"""
def K(self, X, X2=None):
r = self._scaled_dist(X, X2)
return self.variance*(1+np.sqrt(5.)*r+5./3*r**2)*np.exp(-np.sqrt(5.)*r)
def dK_dr(self, X, X2):
r = self._scaled_dist(X, X2)
return self.variance*(10./3*r -5.*r -5.*np.sqrt(5.)/3*r**2)*np.exp(-np.sqrt(5.)*r)
def Gram_matrix(self,F,F1,F2,F3,lower,upper):
"""
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1.
:param F: vector of functions
:type F: np.array
:param F1: vector of derivatives of F
:type F1: np.array
:param F2: vector of second derivatives of F
:type F2: np.array
:param F3: vector of third derivatives of F
:type F3: np.array
:param lower,upper: boundaries of the input domain
:type lower,upper: floats
"""
assert self.input_dim == 1
def L(x,i):
return(5*np.sqrt(5)/self.lengthscale**3*F[i](x) + 15./self.lengthscale**2*F1[i](x)+ 3*np.sqrt(5)/self.lengthscale*F2[i](x) + F3[i](x))
n = F.shape[0]
G = np.zeros((n,n))
for i in range(n):
for j in range(i,n):
G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0]
G_coef = 3.*self.lengthscale**5/(400*np.sqrt(5))
Flower = np.array([f(lower) for f in F])[:,None]
F1lower = np.array([f(lower) for f in F1])[:,None]
F2lower = np.array([f(lower) for f in F2])[:,None]
orig = 9./8*np.dot(Flower,Flower.T) + 9.*self.lengthscale**4/200*np.dot(F2lower,F2lower.T)
orig2 = 3./5*self.lengthscale**2 * ( np.dot(F1lower,F1lower.T) + 1./8*np.dot(Flower,F2lower.T) + 1./8*np.dot(F2lower,Flower.T))
return(1./self.variance* (G_coef*G + orig + orig2))
class ExpQuad(Stationary):
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='ExpQuad'):
super(ExpQuad, self).__init__(input_dim, variance, lengthscale, ARD, name)
def K(self, X, X2=None):
r = self._scaled_dist(X, X2)
return self.variance * np.exp(-0.5 * r**2)
def dK_dr(self, X, X2):
dist = self._scaled_dist(X, X2)
return -dist*self.K(X, X2)

View file

@ -12,6 +12,7 @@ import decorators
import classification import classification
import subarray_and_sorting import subarray_and_sorting
import caching import caching
import diag
try: try:
import sympy import sympy

View file

@ -11,14 +11,14 @@ import numpy as np
def view(A, offset=0): def view(A, offset=0):
""" """
Get a view on the diagonal elements of a 2D array. Get a view on the diagonal elements of a 2D array.
This is actually a view (!) on the diagonal of the array, so you can This is actually a view (!) on the diagonal of the array, so you can
in-place adjust the view. in-place adjust the view.
:param :class:`ndarray` A: 2 dimensional numpy array :param :class:`ndarray` A: 2 dimensional numpy array
:param int offset: view offset to give back (negative entries allowed) :param int offset: view offset to give back (negative entries allowed)
:rtype: :class:`ndarray` view of diag(A) :rtype: :class:`ndarray` view of diag(A)
>>> import numpy as np >>> import numpy as np
>>> X = np.arange(9).reshape(3,3) >>> X = np.arange(9).reshape(3,3)
>>> view(X) >>> view(X)
@ -36,7 +36,7 @@ def view(A, offset=0):
""" """
from numpy.lib.stride_tricks import as_strided from numpy.lib.stride_tricks import as_strided
assert A.ndim == 2, "only implemented for 2 dimensions" assert A.ndim == 2, "only implemented for 2 dimensions"
assert A.shape[0] == A.shape[1], "attempting to get the view of non-square matrix?!" assert A.shape[0] == A.shape[1], "attempting to get the view of non-square matrix?!"
if offset > 0: if offset > 0:
return as_strided(A[0, offset:], shape=(A.shape[0] - offset, ), strides=((A.shape[0]+1)*A.itemsize, )) return as_strided(A[0, offset:], shape=(A.shape[0] - offset, ), strides=((A.shape[0]+1)*A.itemsize, ))
elif offset < 0: elif offset < 0:
@ -44,6 +44,12 @@ def view(A, offset=0):
else: else:
return as_strided(A, shape=(A.shape[0], ), strides=((A.shape[0]+1)*A.itemsize, )) return as_strided(A, shape=(A.shape[0], ), strides=((A.shape[0]+1)*A.itemsize, ))
def offdiag_view(A, offset=0):
from numpy.lib.stride_tricks import as_strided
assert A.ndim == 2, "only implemented for 2 dimensions"
Af = as_strided(A, shape=(A.size,), strides=(A.itemsize,))
return as_strided(Af[(1+offset):], shape=(A.shape[0]-1, A.shape[1]), strides=(A.strides[0] + A.itemsize, A.strides[1]))
def _diag_ufunc(A,b,offset,func): def _diag_ufunc(A,b,offset,func):
dA = view(A, offset); func(dA,b,dA) dA = view(A, offset); func(dA,b,dA)
return A return A
@ -51,11 +57,11 @@ def _diag_ufunc(A,b,offset,func):
def times(A, b, offset=0): def times(A, b, offset=0):
""" """
Times the view of A with b in place (!). Times the view of A with b in place (!).
Returns modified A Returns modified A
Broadcasting is allowed, thus b can be scalar. Broadcasting is allowed, thus b can be scalar.
if offset is not zero, make sure b is of right shape! if offset is not zero, make sure b is of right shape!
:param ndarray A: 2 dimensional array :param ndarray A: 2 dimensional array
:param ndarray-like b: either one dimensional or scalar :param ndarray-like b: either one dimensional or scalar
:param int offset: same as in view. :param int offset: same as in view.
@ -67,11 +73,11 @@ multiply = times
def divide(A, b, offset=0): def divide(A, b, offset=0):
""" """
Divide the view of A by b in place (!). Divide the view of A by b in place (!).
Returns modified A Returns modified A
Broadcasting is allowed, thus b can be scalar. Broadcasting is allowed, thus b can be scalar.
if offset is not zero, make sure b is of right shape! if offset is not zero, make sure b is of right shape!
:param ndarray A: 2 dimensional array :param ndarray A: 2 dimensional array
:param ndarray-like b: either one dimensional or scalar :param ndarray-like b: either one dimensional or scalar
:param int offset: same as in view. :param int offset: same as in view.
@ -84,9 +90,9 @@ def add(A, b, offset=0):
Add b to the view of A in place (!). Add b to the view of A in place (!).
Returns modified A. Returns modified A.
Broadcasting is allowed, thus b can be scalar. Broadcasting is allowed, thus b can be scalar.
if offset is not zero, make sure b is of right shape! if offset is not zero, make sure b is of right shape!
:param ndarray A: 2 dimensional array :param ndarray A: 2 dimensional array
:param ndarray-like b: either one dimensional or scalar :param ndarray-like b: either one dimensional or scalar
:param int offset: same as in view. :param int offset: same as in view.
@ -99,16 +105,16 @@ def subtract(A, b, offset=0):
Subtract b from the view of A in place (!). Subtract b from the view of A in place (!).
Returns modified A. Returns modified A.
Broadcasting is allowed, thus b can be scalar. Broadcasting is allowed, thus b can be scalar.
if offset is not zero, make sure b is of right shape! if offset is not zero, make sure b is of right shape!
:param ndarray A: 2 dimensional array :param ndarray A: 2 dimensional array
:param ndarray-like b: either one dimensional or scalar :param ndarray-like b: either one dimensional or scalar
:param int offset: same as in view. :param int offset: same as in view.
:rtype: view of A, which is adjusted inplace :rtype: view of A, which is adjusted inplace
""" """
return _diag_ufunc(A, b, offset, np.subtract) return _diag_ufunc(A, b, offset, np.subtract)
if __name__ == '__main__': if __name__ == '__main__':
import doctest import doctest
doctest.testmod() doctest.testmod()