added ARD flag to exponential

This commit is contained in:
Nicolas 2013-01-18 15:14:23 +00:00
parent 17ec4d6275
commit 11d088cf90
3 changed files with 39 additions and 39 deletions

View file

@ -2,5 +2,5 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
from constructors import rbf, Matern32, Matern52, exponential, linear, white, bias, finite_dimensional, rbf_ARD, spline, Brownian, linear_ARD, rbf_sympy, sympykern from constructors import rbf, Matern32, Matern52, exponential, linear, white, bias, finite_dimensional, spline, Brownian, linear_ARD, rbf_sympy, sympykern
from kern import kern from kern import kern

View file

@ -36,20 +36,6 @@ def rbf(D,variance=1., lengthscale=None,ARD=False):
part = rbfpart(D,variance,lengthscale,ARD) part = rbfpart(D,variance,lengthscale,ARD)
return kern(D, [part]) return kern(D, [part])
def rbf_ARD(D,variance=1., lengthscales=None):
"""
Construct an RBF kernel with Automatic Relevance Determination (ARD)
:param D: dimensionality of the kernel, obligatory
:type D: int
:param variance: the variance of the kernel
:type variance: float
:param lengthscales: the lengthscales of the kernel
:type lengthscales: None|np.ndarray
"""
part = rbf_ARD_part(D,variance,lengthscales)
return kern(D, [part])
def linear(D,lengthscales=None): def linear(D,lengthscales=None):
""" """
Construct a linear kernel. Construct a linear kernel.
@ -86,7 +72,7 @@ def white(D,variance=1.):
part = whitepart(D,variance) part = whitepart(D,variance)
return kern(D, [part]) return kern(D, [part])
def exponential(D,variance=1., lengthscales=None): def exponential(D,variance=1., lengthscale=None, ARD=False):
""" """
Construct a exponential kernel. Construct a exponential kernel.
@ -96,10 +82,10 @@ def exponential(D,variance=1., lengthscales=None):
variance (float) variance (float)
lengthscales (np.ndarray) lengthscales (np.ndarray)
""" """
part = exponentialpart(D,variance, lengthscales) part = exponentialpart(D,variance, lengthscale, ARD)
return kern(D, [part]) return kern(D, [part])
def Matern32(D,variance=1., lengthscales=None): def Matern32(D,variance=1., lengthscale=None, ARD=False):
""" """
Construct a Matern 3/2 kernel. Construct a Matern 3/2 kernel.
@ -109,7 +95,7 @@ def Matern32(D,variance=1., lengthscales=None):
variance (float) variance (float)
lengthscales (np.ndarray) lengthscales (np.ndarray)
""" """
part = Matern32part(D,variance, lengthscales) part = Matern32part(D,variance, lengthscale, ARD)
return kern(D, [part]) return kern(D, [part])
def Matern52(D,variance=1., lengthscales=None): def Matern52(D,variance=1., lengthscales=None):

View file

@ -24,37 +24,46 @@ class exponential(kernpart):
:rtype: kernel object :rtype: kernel object
""" """
def __init__(self,D,variance=1.,lengthscales=None): def __init__(self,D,variance=1.,lengthscale=None,ARD=False):
self.D = D self.D = D
if lengthscales is not None: self.ARD = ARD
assert lengthscales.shape==(self.D,) if ARD == False:
self.Nparam = 2
self.name = 'exp'
if lengthscale is not None:
assert lengthscale.shape == (1,)
else:
lengthscale = np.ones(1)
else: else:
lengthscales = np.ones(self.D) self.Nparam = self.D + 1
self.Nparam = self.D + 1 self.name = 'exp_ARD'
self.name = 'exp' if lengthscale is not None:
self._set_params(np.hstack((variance,lengthscales))) assert lengthscale.shape == (self.D,)
else:
lengthscale = np.ones(self.D)
self._set_params(np.hstack((variance,lengthscale)))
def _get_params(self): def _get_params(self):
"""return the value of the parameters.""" """return the value of the parameters."""
return np.hstack((self.variance,self.lengthscales)) return np.hstack((self.variance,self.lengthscale))
def _set_params(self,x): def _set_params(self,x):
"""set the value of the parameters.""" """set the value of the parameters."""
assert x.size==(self.D+1) assert x.size==(self.D+1)
self.variance = x[0] self.variance = x[0]
self.lengthscales = x[1:] self.lengthscale = x[1:]
def _get_param_names(self): def _get_param_names(self):
"""return parameter names.""" """return parameter names."""
if self.D==1: if self.Nparam==2:
return ['variance','lengthscale'] return ['variance','lengthscale']
else: else:
return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscales.size)] return ['variance']+['lengthscale_%i'%i for i in range(self.lengthscale.size)]
def K(self,X,X2,target): def K(self,X,X2,target):
"""Compute the covariance matrix between X and X2.""" """Compute the covariance matrix between X and X2."""
if X2 is None: X2 = X if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscales),-1)) dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))
np.add(self.variance*np.exp(-dist), target,target) np.add(self.variance*np.exp(-dist), target,target)
def Kdiag(self,X,target): def Kdiag(self,X,target):
@ -64,13 +73,18 @@ class exponential(kernpart):
def dK_dtheta(self,partial,X,X2,target): def dK_dtheta(self,partial,X,X2,target):
"""derivative of the covariance matrix with respect to the parameters.""" """derivative of the covariance matrix with respect to the parameters."""
if X2 is None: X2 = X if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscales),-1)) dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))
invdist = 1./np.where(dist!=0.,dist,np.inf) invdist = 1./np.where(dist!=0.,dist,np.inf)
dist2M = np.square(X[:,None,:]-X2[None,:,:])/self.lengthscales**3 dist2M = np.square(X[:,None,:]-X2[None,:,:])/self.lengthscale**3
dvar = np.exp(-dist) dvar = np.exp(-dist)
dl = self.variance*dvar[:,:,None]*dist2M*invdist[:,:,None]
target[0] += np.sum(dvar*partial) target[0] += np.sum(dvar*partial)
target[1:] += (dl*partial[:,:,None]).sum(0).sum(0) if self.ARD == True:
dl = self.variance*dvar[:,:,None]*dist2M*invdist[:,:,None]
target[1:] += (dl*partial[:,:,None]).sum(0).sum(0)
else:
dl = self.variance*dvar*dist2M.sum(-1)*invdist
target[1] += np.sum(dl*partial)
#foo
def dKdiag_dtheta(self,partial,X,target): def dKdiag_dtheta(self,partial,X,target):
"""derivative of the diagonal of the covariance matrix with respect to the parameters.""" """derivative of the diagonal of the covariance matrix with respect to the parameters."""
@ -80,8 +94,8 @@ class exponential(kernpart):
def dK_dX(self,partial,X,X2,target): def dK_dX(self,partial,X,X2,target):
"""derivative of the covariance matrix with respect to X.""" """derivative of the covariance matrix with respect to X."""
if X2 is None: X2 = X if X2 is None: X2 = X
dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscales),-1))[:,:,None] dist = np.sqrt(np.sum(np.square((X[:,None,:]-X2[None,:,:])/self.lengthscale),-1))[:,:,None]
ddist_dX = (X[:,None,:]-X2[None,:,:])/self.lengthscales**2/np.where(dist!=0.,dist,np.inf) ddist_dX = (X[:,None,:]-X2[None,:,:])/self.lengthscale**2/np.where(dist!=0.,dist,np.inf)
dK_dX = - np.transpose(self.variance*np.exp(-dist)*ddist_dX,(1,0,2)) dK_dX = - np.transpose(self.variance*np.exp(-dist)*ddist_dX,(1,0,2))
target += np.sum(dK_dX*partial.T[:,:,None],0) target += np.sum(dK_dX*partial.T[:,:,None],0)
@ -101,14 +115,14 @@ class exponential(kernpart):
""" """
assert self.D == 1 assert self.D == 1
def L(x,i): def L(x,i):
return(1./self.lengthscales*F[i](x) + F1[i](x)) return(1./self.lengthscale*F[i](x) + F1[i](x))
n = F.shape[0] n = F.shape[0]
G = np.zeros((n,n)) G = np.zeros((n,n))
for i in range(n): for i in range(n):
for j in range(i,n): for j in range(i,n):
G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0] G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0]
Flower = np.array([f(lower) for f in F])[:,None] Flower = np.array([f(lower) for f in F])[:,None]
return(self.lengthscales/2./self.variance * G + 1./self.variance * np.dot(Flower,Flower.T)) return(self.lengthscale/2./self.variance * G + 1./self.variance * np.dot(Flower,Flower.T))