Merge branch 'devel' of github.com:SheffieldML/GPy into devel

This commit is contained in:
Max Zwiessele 2014-10-16 14:18:02 +01:00
commit 4e8fbebe27
3 changed files with 18 additions and 6 deletions

View file

@ -10,8 +10,8 @@ from parameterization.variational import VariationalPosterior
import logging import logging
from GPy.inference.latent_function_inference.posterior import Posterior from GPy.inference.latent_function_inference.posterior import Posterior
from GPy.inference.optimization.stochastics import SparseGPStochastics,\ #no stochastics.py file added! from GPy.inference.optimization.stochastics import SparseGPStochastics,\
SparseGPMissing #SparseGPMissing
logger = logging.getLogger("sparse gp") logger = logging.getLogger("sparse gp")
class SparseGP(GP): class SparseGP(GP):

View file

@ -8,9 +8,9 @@ from ...core.parameterization.transformations import Logexp
from ...util.linalg import tdot from ...util.linalg import tdot
from ... import util from ... import util
import numpy as np import numpy as np
from scipy import integrate from scipy import integrate, weave
from ...util.caching import Cache_this
from ...util.config import config # for assesing whether to use weave from ...util.config import config # for assesing whether to use weave
from ...util.caching import Cache_this
class Stationary(Kern): class Stationary(Kern):
""" """
@ -132,10 +132,22 @@ class Stationary(Kern):
return ret return ret
def update_gradients_diag(self, dL_dKdiag, X): def update_gradients_diag(self, dL_dKdiag, X):
"""
Given the derivative of the objective with respect to the diagonal of
the covariance matrix, compute the derivative wrt the parameters of
this kernel and stor in the <parameter>.gradient field.
See also update_gradients_full
"""
self.variance.gradient = np.sum(dL_dKdiag) self.variance.gradient = np.sum(dL_dKdiag)
self.lengthscale.gradient = 0. self.lengthscale.gradient = 0.
def update_gradients_full(self, dL_dK, X, X2=None): def update_gradients_full(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective wrt the covariance matrix
(dL_dK), compute the gradient wrt the parameters of this kernel,
and store in the parameters object as e.g. self.variance.gradient
"""
self.variance.gradient = np.einsum('ij,ij,i', self.K(X, X2), dL_dK, 1./self.variance) self.variance.gradient = np.einsum('ij,ij,i', self.K(X, X2), dL_dK, 1./self.variance)
#now the lengthscale gradient(s) #now the lengthscale gradient(s)
@ -173,6 +185,7 @@ class Stationary(Kern):
return 1./np.where(dist != 0., dist, np.inf) return 1./np.where(dist != 0., dist, np.inf)
def weave_lengthscale_grads(self, tmp, X, X2): def weave_lengthscale_grads(self, tmp, X, X2):
"""Use scipy.weave to compute derivatives wrt the lengthscales"""
N,M = tmp.shape N,M = tmp.shape
Q = X.shape[1] Q = X.shape[1]
if hasattr(X, 'values'):X = X.values if hasattr(X, 'values'):X = X.values
@ -190,7 +203,6 @@ class Stationary(Kern):
grads[q] = gradq; grads[q] = gradq;
} }
""" """
from scipy import weave
weave.inline(code, ['tmp', 'X', 'X2', 'grads', 'N', 'M', 'Q'], type_converters=weave.converters.blitz, support_code="#include <math.h>") weave.inline(code, ['tmp', 'X', 'X2', 'grads', 'N', 'M', 'Q'], type_converters=weave.converters.blitz, support_code="#include <math.h>")
return -grads/self.lengthscale**3 return -grads/self.lengthscale**3

View file

@ -4,7 +4,7 @@
import sympy as sym import sympy as sym
from sympy.utilities.lambdify import lambdify from sympy.utilities.lambdify import lambdify
from GPy.util.symbolic import gammaln # does not exist! JH from GPy.util.symbolic import gammaln
import numpy as np import numpy as np
import link_functions import link_functions