Merge branch 'devel' of https://github.com/SheffieldML/GPy into devel

This commit is contained in:
Alan Saul 2016-03-30 19:42:46 +01:00
commit b1ac51ce49
2 changed files with 22 additions and 14 deletions

View file

@ -155,11 +155,16 @@ class StdPeriodic(Kern):
self.period.gradient = 0 self.period.gradient = 0
self.lengthscale.gradient = 0 self.lengthscale.gradient = 0
# def gradients_X(self, dL_dK, X, X2=None): def gradients_X(self, dL_dK, X, X2=None):
# """derivative of the covariance matrix with respect to X.""" K = self.K(X, X2)
# if X2 is None:
# raise NotImplemented("Periodic kernel: dK_dX not implemented") dL_dK = dL_dK+dL_dK.T
# X2 = X
# def gradients_X_diag(self, dL_dKdiag, X): dX = -np.pi*((dL_dK*K)[:,:,None]*np.sin(2*np.pi/self.period*(X[:,None,:] - X2[None,:,:]))/(2.*np.square(self.lengthscale)*self.period)).sum(1)
# return dX
# raise NotImplemented("Periodic kernel: dKdiag_dX not implemented")
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def input_sensitivity(self, summarize=True):
return self.variance*np.ones(self.input_dim)/self.lengthscale**2

View file

@ -489,18 +489,21 @@ class RatQuad(Stationary):
self.link_parameters(self.power) self.link_parameters(self.power)
def K_of_r(self, r): def K_of_r(self, r):
r2 = np.power(r, 2.) r2 = np.square(r)
return self.variance*np.power(1. + r2/2., -self.power) # return self.variance*np.power(1. + r2/2., -self.power)
return self.variance*np.exp(-self.power*np.log1p(r2/2.))
def dK_dr(self, r): def dK_dr(self, r):
r2 = np.power(r, 2.) r2 = np.square(r)
return -self.variance*self.power*r*np.power(1. + r2/2., - self.power - 1.) # return -self.variance*self.power*r*np.power(1. + r2/2., - self.power - 1.)
return-self.variance*self.power*r*np.exp(-(self.power+1)*np.log1p(r2/2.))
def update_gradients_full(self, dL_dK, X, X2=None): def update_gradients_full(self, dL_dK, X, X2=None):
super(RatQuad, self).update_gradients_full(dL_dK, X, X2) super(RatQuad, self).update_gradients_full(dL_dK, X, X2)
r = self._scaled_dist(X, X2) r = self._scaled_dist(X, X2)
r2 = np.power(r, 2.) r2 = np.square(r)
dK_dpow = -self.variance * np.power(2., self.power) * np.power(r2 + 2., -self.power) * np.log(0.5*(r2+2.)) # dK_dpow = -self.variance * np.power(2., self.power) * np.power(r2 + 2., -self.power) * np.log(0.5*(r2+2.))
dK_dpow = -self.variance * np.exp(self.power*(np.log(2.)-np.log1p(r2+1)))*np.log1p(r2/2.)
grad = np.sum(dL_dK*dK_dpow) grad = np.sum(dL_dK*dK_dpow)
self.power.gradient = grad self.power.gradient = grad