mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 12:02:38 +02:00
GP_regression demo working with new style gradients for rbf, linear, white, bias
This commit is contained in:
parent
5f92ff6785
commit
78d1abfc22
5 changed files with 18 additions and 25 deletions
|
|
@ -9,10 +9,10 @@ import hashlib
|
|||
class bias(kernpart):
|
||||
def __init__(self,D,variance=1.):
|
||||
"""
|
||||
Arguments
|
||||
----------
|
||||
D: int - the number of input dimensions
|
||||
variance: float
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
"""
|
||||
self.D = D
|
||||
self.Nparam = 1
|
||||
|
|
@ -30,19 +30,16 @@ class bias(kernpart):
|
|||
return ['variance']
|
||||
|
||||
def K(self,X,X2,target):
|
||||
if X2 is None: X2 = X
|
||||
np.add(self.variance, target,target)
|
||||
target += self.variance
|
||||
|
||||
def Kdiag(self,X,target):
|
||||
np.add(target,self.variance,target)
|
||||
target += self.variance
|
||||
|
||||
def dK_dtheta(self,X,X2,target):
|
||||
"""Return shape is NxMx(Ntheta)"""
|
||||
if X2 is None: X2 = X
|
||||
np.add(target[:,:,0],1., target[:,:,0])
|
||||
def dK_dtheta(self,partial,X,X2,target):
|
||||
target += partial.sum()
|
||||
|
||||
def dKdiag_dtheta(self,X,target):
|
||||
np.add(target[:,0],1.,target[:,0])
|
||||
def dKdiag_dtheta(self,partial,X,target):
|
||||
target += partial.sum()
|
||||
|
||||
def dK_dX(self, X, X2, target):
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -37,20 +37,16 @@ class linear(kernpart):
|
|||
def Kdiag(self,X,target):
|
||||
np.add(target,np.sum(self.variance*np.square(X),-1),target)
|
||||
|
||||
def dK_dtheta(self,X,X2,target):
|
||||
def dK_dtheta(self,partial,X,X2,target):
|
||||
"""
|
||||
Computes the derivatives wrt theta
|
||||
Return shape is NxMx(Ntheta)
|
||||
|
||||
"""
|
||||
product = np.dot(X, X2.T)
|
||||
target += np.sum(product*partial)
|
||||
|
||||
if X2 is None: X2 = X
|
||||
product = np.dot(X, X2.T)[:,:, None]#X[:,None,:]*X2[None,:,:]
|
||||
target += product
|
||||
|
||||
def dK_dX(self,X,X2,target):
|
||||
if X2 is None: X2 = X
|
||||
np.add(target,X2[:,None,:]*self.variance,target)
|
||||
def dK_dX(self,partial,X,X2,target):
|
||||
target += self.variance * np.sum(partial[:,None,:]*X2.T[None,:,:],-1)
|
||||
|
||||
# def psi0(self,Z,mu,S,target):
|
||||
# expected = np.square(mu) + S
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ class rbf(kernpart):
|
|||
self._K_computations(X,X2)
|
||||
_K_dist = X[:,None,:]-X2[None,:,:]
|
||||
dK_dX = np.transpose(-self.variance*self._K_dvar[:,:,np.newaxis]*_K_dist/self.lengthscale2,(1,0,2))
|
||||
target += np.sum(dK_dX*partial[:,:,None],1)
|
||||
target += np.sum(dK_dX*partial[:,:,None],0)
|
||||
|
||||
def dKdiag_dX(self,X,target):
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class white(kernpart):
|
|||
"""
|
||||
def __init__(self,D,variance=1.):
|
||||
self.D = D
|
||||
self.Nparam = 1.
|
||||
self.Nparam = 1
|
||||
self.name = 'white'
|
||||
self.set_param(np.array([variance]).flatten())
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class GP_regression(model):
|
|||
|
||||
def __init__(self,X,Y,kernel=None,normalize_X=False,normalize_Y=False, Xslices=None):
|
||||
if kernel is None:
|
||||
kernel = kern.rbf(X.shape[1]) + kern.white(X.shape[1])
|
||||
kernel = kern.rbf(X.shape[1]) + kern.white(X.shape[1]) + kern.bias(X.shape[1])
|
||||
|
||||
# parse arguments
|
||||
self.Xslices = Xslices
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue