mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-04 17:22:39 +02:00
Merge branch 'devel' of https://github.com/SheffieldML/GPy into devel
This commit is contained in:
commit
b1ac51ce49
2 changed files with 22 additions and 14 deletions
|
|
@ -155,11 +155,16 @@ class StdPeriodic(Kern):
|
|||
self.period.gradient = 0
|
||||
self.lengthscale.gradient = 0
|
||||
|
||||
# def gradients_X(self, dL_dK, X, X2=None):
|
||||
# """derivative of the covariance matrix with respect to X."""
|
||||
#
|
||||
# raise NotImplemented("Periodic kernel: dK_dX not implemented")
|
||||
#
|
||||
# def gradients_X_diag(self, dL_dKdiag, X):
|
||||
#
|
||||
# raise NotImplemented("Periodic kernel: dKdiag_dX not implemented")
|
||||
def gradients_X(self, dL_dK, X, X2=None):
|
||||
K = self.K(X, X2)
|
||||
if X2 is None:
|
||||
dL_dK = dL_dK+dL_dK.T
|
||||
X2 = X
|
||||
dX = -np.pi*((dL_dK*K)[:,:,None]*np.sin(2*np.pi/self.period*(X[:,None,:] - X2[None,:,:]))/(2.*np.square(self.lengthscale)*self.period)).sum(1)
|
||||
return dX
|
||||
|
||||
def gradients_X_diag(self, dL_dKdiag, X):
|
||||
return np.zeros(X.shape)
|
||||
|
||||
def input_sensitivity(self, summarize=True):
|
||||
return self.variance*np.ones(self.input_dim)/self.lengthscale**2
|
||||
|
|
@ -489,18 +489,21 @@ class RatQuad(Stationary):
|
|||
self.link_parameters(self.power)
|
||||
|
||||
def K_of_r(self, r):
|
||||
r2 = np.power(r, 2.)
|
||||
return self.variance*np.power(1. + r2/2., -self.power)
|
||||
r2 = np.square(r)
|
||||
# return self.variance*np.power(1. + r2/2., -self.power)
|
||||
return self.variance*np.exp(-self.power*np.log1p(r2/2.))
|
||||
|
||||
def dK_dr(self, r):
|
||||
r2 = np.power(r, 2.)
|
||||
return -self.variance*self.power*r*np.power(1. + r2/2., - self.power - 1.)
|
||||
r2 = np.square(r)
|
||||
# return -self.variance*self.power*r*np.power(1. + r2/2., - self.power - 1.)
|
||||
return-self.variance*self.power*r*np.exp(-(self.power+1)*np.log1p(r2/2.))
|
||||
|
||||
def update_gradients_full(self, dL_dK, X, X2=None):
|
||||
super(RatQuad, self).update_gradients_full(dL_dK, X, X2)
|
||||
r = self._scaled_dist(X, X2)
|
||||
r2 = np.power(r, 2.)
|
||||
dK_dpow = -self.variance * np.power(2., self.power) * np.power(r2 + 2., -self.power) * np.log(0.5*(r2+2.))
|
||||
r2 = np.square(r)
|
||||
# dK_dpow = -self.variance * np.power(2., self.power) * np.power(r2 + 2., -self.power) * np.log(0.5*(r2+2.))
|
||||
dK_dpow = -self.variance * np.exp(self.power*(np.log(2.)-np.log1p(r2+1)))*np.log1p(r2/2.)
|
||||
grad = np.sum(dL_dK*dK_dpow)
|
||||
self.power.gradient = grad
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue