Merge branch 'devel' of github.com:SheffieldML/GPy into devel

This commit is contained in:
Max Zwiessele 2014-10-16 12:43:19 +01:00
commit 3358d06e42
17 changed files with 184 additions and 522 deletions

View file

@ -1,30 +0,0 @@
try:
import sympy as sym
sympy_available=True
except ImportError:
sympy_available=False
import numpy as np
from symbolic import Symbolic
class Eq(Symbolic):
"""
The exponentiated quadratic covariance as a symbolic function.
"""
def __init__(self, input_dim, output_dim=1, variance=1.0, lengthscale=1.0, name='Eq'):
parameters = {'variance' : variance, 'lengthscale' : lengthscale}
x = sym.symbols('x_:' + str(input_dim))
z = sym.symbols('z_:' + str(input_dim))
variance = sym.var('variance',positive=True)
lengthscale = sym.var('lengthscale', positive=True)
dist_string = ' + '.join(['(x_%i - z_%i)**2' %(i, i) for i in range(input_dim)])
from sympy.parsing.sympy_parser import parse_expr
dist = parse_expr(dist_string)
# this is the covariance function
f = variance*sym.exp(-dist/(2*lengthscale**2))
# extra input dim is to signify the output dimension.
super(Eq, self).__init__(input_dim=input_dim, k=f, output_dim=output_dim, parameters=parameters, name=name)

View file

@ -1,30 +0,0 @@
try:
import sympy as sym
sympy_available=True
except ImportError:
sympy_available=False
import numpy as np
from symbolic import Symbolic
class Heat_eqinit(Symbolic):
"""
A symbolic covariance based on laying down an initial condition of the heat equation with an exponentiated quadratic covariance. The covariance then has multiple outputs which are interpreted as observations of the diffused process with different diffusion coefficients (or at different times).
"""
def __init__(self, input_dim, output_dim=1, param=None, name='Heat_eqinit'):
x = sym.symbols('x_:' + str(input_dim))
z = sym.symbols('z_:' + str(input_dim))
scale = sym.var('scale_i scale_j',positive=True)
lengthscale = sym.var('lengthscale_i lengthscale_j', positive=True)
shared_lengthscale = sym.var('shared_lengthscale', positive=True)
dist_string = ' + '.join(['(x_%i - z_%i)**2' %(i, i) for i in range(input_dim)])
from sympy.parsing.sympy_parser import parse_expr
dist = parse_expr(dist_string)
# this is the covariance function
f = scale_i*scale_j*sym.exp(-dist/(2*(shared_lengthscale**2 + lengthscale_i*lengthscale_j)))
# extra input dim is to signify the output dimension.
super(Heat_eqinit, self).__init__(input_dim=input_dim+1, k=f, output_dim=output_dim, name=name)

View file

@ -10,6 +10,7 @@ from ... import util
import numpy as np
from scipy import integrate
from ...util.caching import Cache_this
from ...util.config import config # for assesing whether to use weave
class Stationary(Kern):
"""
@ -146,7 +147,17 @@ class Stationary(Kern):
#self.lengthscale.gradient = -((dL_dr*rinv)[:,:,None]*x_xl3).sum(0).sum(0)/self.lengthscale**3
tmp = dL_dr*self._inv_dist(X, X2)
if X2 is None: X2 = X
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)])
if config.getboolean('weave', 'working'):
try:
self.lengthscale.gradient = self.weave_lengthscale_grads(tmp, X, X2)
except:
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
config.set('weave', 'working', 'False')
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)])
else:
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)])
else:
r = self._scaled_dist(X, X2)
self.lengthscale.gradient = -np.sum(dL_dr*r)/self.lengthscale
@ -161,10 +172,43 @@ class Stationary(Kern):
dist = self._scaled_dist(X, X2).copy()
return 1./np.where(dist != 0., dist, np.inf)
def weave_lengthscale_grads(self, tmp, X, X2):
N,M = tmp.shape
Q = X.shape[1]
if hasattr(X, 'values'):X = X.values
if hasattr(X2, 'values'):X2 = X2.values
grads = np.zeros(self.input_dim)
code = """
double gradq;
for(int q=0; q<Q; q++){
gradq = 0;
for(int n=0; n<N; n++){
for(int m=0; m<M; m++){
gradq += tmp(n,m)*(X(n,q)-X2(m,q))*(X(n,q)-X2(m,q));
}
}
grads[q] = gradq;
}
"""
from scipy import weave
weave.inline(code, ['tmp', 'X', 'X2', 'grads', 'N', 'M', 'Q'], type_converters=weave.converters.blitz, support_code="#include <math.h>")
return -grads/self.lengthscale**3
def gradients_X(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective wrt K (dL_dK), compute the derivative wrt X
"""
if config.getboolean('weave', 'working'):
try:
return self.gradients_X_weave(dL_dK, X, X2)
except:
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
config.set('weave', 'working', 'False')
return self.gradients_X_(dL_dK, X, X2)
else:
return self.gradients_X_(dL_dK, X, X2)
def gradients_X_(self, dL_dK, X, X2=None):
invdist = self._inv_dist(X, X2)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
tmp = invdist*dL_dr
@ -184,6 +228,34 @@ class Stationary(Kern):
return ret
def gradients_X_weave(self, dL_dK, X, X2=None):
invdist = self._inv_dist(X, X2)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
tmp = invdist*dL_dr
if X2 is None:
tmp = tmp + tmp.T
X2 = X
ret = np.zeros(X.shape)
code = """
int n,q,d;
double retnd;
for(n=0;n<N;n++){
for(d=0;d<D;d++){
retnd = 0;
for(q=0;q<Q;q++){
retnd += tmp(n,q)*(X(n,d)-X2(q,d));
}
ret(n,d) = retnd;
}
}
"""
from scipy import weave
N,D = X.shape
Q = tmp.shape[1]
weave.inline(code, ['ret', 'N', 'D', 'Q', 'tmp', 'X', 'X2'], type_converters=weave.converters.blitz)
return ret/self.lengthscale**2
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)