mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-05 01:32:40 +02:00
maint: Remove tabs (and some trailing spaces)
This commit is contained in:
parent
490c4c73f5
commit
0a9b1cc10d
3 changed files with 85 additions and 84 deletions
|
|
@ -17,7 +17,7 @@ class Prior(object):
|
|||
if not cls._instance or cls._instance.__class__ is not cls:
|
||||
newfunc = super(Prior, cls).__new__
|
||||
if newfunc is object.__new__:
|
||||
cls._instance = newfunc(cls)
|
||||
cls._instance = newfunc(cls)
|
||||
else:
|
||||
cls._instance = newfunc(cls, *args, **kwargs)
|
||||
return cls._instance
|
||||
|
|
@ -58,9 +58,9 @@ class Gaussian(Prior):
|
|||
return instance()
|
||||
newfunc = super(Prior, cls).__new__
|
||||
if newfunc is object.__new__:
|
||||
o = newfunc(cls)
|
||||
o = newfunc(cls)
|
||||
else:
|
||||
o = newfunc(cls, mu, sigma)
|
||||
o = newfunc(cls, mu, sigma)
|
||||
cls._instances.append(weakref.ref(o))
|
||||
return cls._instances[-1]()
|
||||
|
||||
|
|
@ -102,9 +102,9 @@ class Uniform(Prior):
|
|||
return instance()
|
||||
newfunc = super(Prior, cls).__new__
|
||||
if newfunc is object.__new__:
|
||||
o = newfunc(cls)
|
||||
o = newfunc(cls)
|
||||
else:
|
||||
o = newfunc(cls, lower, upper)
|
||||
o = newfunc(cls, lower, upper)
|
||||
cls._instances.append(weakref.ref(o))
|
||||
return cls._instances[-1]()
|
||||
|
||||
|
|
@ -282,7 +282,7 @@ class Gamma(Prior):
|
|||
return instance()
|
||||
newfunc = super(Prior, cls).__new__
|
||||
if newfunc is object.__new__:
|
||||
o = newfunc(cls)
|
||||
o = newfunc(cls)
|
||||
else:
|
||||
o = newfunc(cls, a, b)
|
||||
cls._instances.append(weakref.ref(o))
|
||||
|
|
@ -542,8 +542,8 @@ class DGPLVM(Prior):
|
|||
|
||||
"""
|
||||
domain = _REAL
|
||||
|
||||
def __new__(cls, sigma2, lbl, x_shape):
|
||||
|
||||
def __new__(cls, sigma2, lbl, x_shape):
|
||||
return super(Prior, cls).__new__(cls, sigma2, lbl, x_shape)
|
||||
|
||||
def __init__(self, sigma2, lbl, x_shape):
|
||||
|
|
@ -909,13 +909,13 @@ class DGPLVM_Lamda(Prior, Parameterized):
|
|||
# This function calculates log of our prior
|
||||
def lnpdf(self, x):
|
||||
x = x.reshape(self.x_shape)
|
||||
|
||||
#!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
#self.lamda.values[:] = self.lamda.values/self.lamda.values.sum()
|
||||
|
||||
#!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
#self.lamda.values[:] = self.lamda.values/self.lamda.values.sum()
|
||||
|
||||
xprime = x.dot(np.diagflat(self.lamda))
|
||||
x = xprime
|
||||
# print x
|
||||
# print x
|
||||
cls = self.compute_cls(x)
|
||||
M_0 = np.mean(x, axis=0)
|
||||
M_i = self.compute_Mi(cls)
|
||||
|
|
@ -932,7 +932,7 @@ class DGPLVM_Lamda(Prior, Parameterized):
|
|||
x = x.reshape(self.x_shape)
|
||||
xprime = x.dot(np.diagflat(self.lamda))
|
||||
x = xprime
|
||||
# print x
|
||||
# print x
|
||||
cls = self.compute_cls(x)
|
||||
M_0 = np.mean(x, axis=0)
|
||||
M_i = self.compute_Mi(cls)
|
||||
|
|
@ -964,14 +964,14 @@ class DGPLVM_Lamda(Prior, Parameterized):
|
|||
|
||||
# Because of the GPy we need to transpose our matrix so that it gets the same shape as out matrix (denominator layout!!!)
|
||||
DPxprim_Dx = DPxprim_Dx.T
|
||||
|
||||
|
||||
DPxprim_Dlamda = DPx_Dx.dot(x)
|
||||
|
||||
# Because of the GPy we need to transpose our matrix so that it gets the same shape as out matrix (denominator layout!!!)
|
||||
# Because of the GPy we need to transpose our matrix so that it gets the same shape as out matrix (denominator layout!!!)
|
||||
DPxprim_Dlamda = DPxprim_Dlamda.T
|
||||
|
||||
self.lamda.gradient = np.diag(DPxprim_Dlamda)
|
||||
# print DPxprim_Dx
|
||||
# print DPxprim_Dx
|
||||
return DPxprim_Dx
|
||||
|
||||
|
||||
|
|
@ -1046,7 +1046,7 @@ class DGPLVM_T(Prior):
|
|||
M_i = np.zeros((self.classnum, self.dim))
|
||||
for i in cls:
|
||||
# Mean of each class
|
||||
# class_i = np.multiply(cls[i],vec)
|
||||
# class_i = np.multiply(cls[i],vec)
|
||||
class_i = cls[i]
|
||||
M_i[i] = np.mean(class_i, axis=0)
|
||||
return M_i
|
||||
|
|
@ -1155,7 +1155,7 @@ class DGPLVM_T(Prior):
|
|||
x = x.reshape(self.x_shape)
|
||||
xprim = x.dot(self.vec)
|
||||
x = xprim
|
||||
# print x
|
||||
# print x
|
||||
cls = self.compute_cls(x)
|
||||
M_0 = np.mean(x, axis=0)
|
||||
M_i = self.compute_Mi(cls)
|
||||
|
|
@ -1163,7 +1163,7 @@ class DGPLVM_T(Prior):
|
|||
Sw = self.compute_Sw(cls, M_i)
|
||||
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
|
||||
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
|
||||
#print 'SB_inv: ', Sb_inv_N
|
||||
#print 'SB_inv: ', Sb_inv_N
|
||||
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
|
||||
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
|
||||
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
|
||||
|
|
@ -1172,8 +1172,8 @@ class DGPLVM_T(Prior):
|
|||
def lnpdf_grad(self, x):
|
||||
x = x.reshape(self.x_shape)
|
||||
xprim = x.dot(self.vec)
|
||||
x = xprim
|
||||
# print x
|
||||
x = xprim
|
||||
# print x
|
||||
cls = self.compute_cls(x)
|
||||
M_0 = np.mean(x, axis=0)
|
||||
M_i = self.compute_Mi(cls)
|
||||
|
|
@ -1188,7 +1188,7 @@ class DGPLVM_T(Prior):
|
|||
# Calculating inverse of Sb and its transpose and minus
|
||||
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
|
||||
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
|
||||
#print 'SB_inv: ',Sb_inv_N
|
||||
#print 'SB_inv: ',Sb_inv_N
|
||||
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
|
||||
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
|
||||
Sb_inv_N_trans = np.transpose(Sb_inv_N)
|
||||
|
|
@ -1375,4 +1375,5 @@ class StudentT(Prior):
|
|||
def rvs(self, n):
|
||||
from scipy.stats import t
|
||||
ret = t.rvs(self.nu, loc=self.mu, scale=self.sigma, size=n)
|
||||
return ret
|
||||
return ret
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class ODE_t(Kern):
|
|||
self.link_parameters(self.a, self.c, self.variance_Yt, self.lengthscale_Yt,self.ubias)
|
||||
|
||||
def K(self, X, X2=None):
|
||||
"""Compute the covariance matrix between X and X2."""
|
||||
"""Compute the covariance matrix between X and X2."""
|
||||
X,slices = X[:,:-1],index_to_slices(X[:,-1])
|
||||
if X2 is None:
|
||||
X2,slices2 = X,slices
|
||||
|
|
@ -31,9 +31,9 @@ class ODE_t(Kern):
|
|||
|
||||
tdist = (X[:,0][:,None] - X2[:,0][None,:])**2
|
||||
ttdist = (X[:,0][:,None] - X2[:,0][None,:])
|
||||
|
||||
|
||||
vyt = self.variance_Yt
|
||||
|
||||
|
||||
lyt=1/(2*self.lengthscale_Yt)
|
||||
|
||||
a = -self.a
|
||||
|
|
@ -69,10 +69,10 @@ class ODE_t(Kern):
|
|||
lyt = 1./(2*self.lengthscale_Yt)
|
||||
|
||||
a = -self.a
|
||||
c = self.c
|
||||
|
||||
c = self.c
|
||||
|
||||
k1 = (2*lyt )*vyt
|
||||
|
||||
|
||||
Kdiag = np.zeros(X.shape[0])
|
||||
slices = index_to_slices(X[:,-1])
|
||||
|
||||
|
|
@ -106,7 +106,7 @@ class ODE_t(Kern):
|
|||
tdist = (X[:,0][:,None] - X2[:,0][None,:])**2
|
||||
ttdist = (X[:,0][:,None] - X2[:,0][None,:])
|
||||
#rdist = [tdist,xdist]
|
||||
|
||||
|
||||
rd=tdist.shape[0]
|
||||
|
||||
dka = np.zeros([rd,rd])
|
||||
|
|
@ -146,7 +146,7 @@ class ODE_t(Kern):
|
|||
elif i==1 and j==1:
|
||||
dkYdvart[ss1,ss2] = (k1(tdist[ss1,ss2]) + 1. )* kyy(tdist[ss1,ss2])
|
||||
dkYdlent[ss1,ss2] = vyt*dkyydlyt(tdist[ss1,ss2])*( k1(tdist[ss1,ss2]) + 1. ) +\
|
||||
vyt*kyy(tdist[ss1,ss2])*dk1dlyt(tdist[ss1,ss2])
|
||||
vyt*kyy(tdist[ss1,ss2])*dk1dlyt(tdist[ss1,ss2])
|
||||
dkdubias[ss1,ss2] = 1
|
||||
else:
|
||||
dkYdvart[ss1,ss2] = (-k4(ttdist[ss1,ss2])+1)*kyy(tdist[ss1,ss2])
|
||||
|
|
@ -156,10 +156,10 @@ class ODE_t(Kern):
|
|||
dkdubias[ss1,ss2] = 0
|
||||
#dkYdlent[ss1,ss2] = vyt*dkyydlyt(tdist[ss1,ss2])* (-2*lyt*(ttdist[ss1,ss2])+1.)+\
|
||||
#vyt*kyy(tdist[ss1,ss2])*(-2)*(ttdist[ss1,ss2])
|
||||
|
||||
|
||||
self.variance_Yt.gradient = np.sum(dkYdvart * dL_dK)
|
||||
|
||||
self.lengthscale_Yt.gradient = np.sum(dkYdlent*(-0.5*self.lengthscale_Yt**(-2)) * dL_dK)
|
||||
|
||||
self.ubias.gradient = np.sum(dkdubias * dL_dK)
|
||||
self.ubias.gradient = np.sum(dkdubias * dL_dK)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,67 +10,67 @@ from paramz.caching import Cache_this
|
|||
|
||||
class GridKern(Stationary):
|
||||
|
||||
def __init__(self, input_dim, variance, lengthscale, ARD, active_dims, name, originalDimensions, useGPU=False):
|
||||
super(GridKern, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
|
||||
self.originalDimensions = originalDimensions
|
||||
def __init__(self, input_dim, variance, lengthscale, ARD, active_dims, name, originalDimensions, useGPU=False):
|
||||
super(GridKern, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
|
||||
self.originalDimensions = originalDimensions
|
||||
|
||||
@Cache_this(limit=3, ignore_args=())
|
||||
def dKd_dVar(self, X, X2=None):
|
||||
"""
|
||||
Derivative of Kernel function wrt variance applied on inputs X and X2.
|
||||
In the stationary case there is an inner function depending on the
|
||||
distances from X to X2, called r.
|
||||
@Cache_this(limit=3, ignore_args=())
|
||||
def dKd_dVar(self, X, X2=None):
|
||||
"""
|
||||
Derivative of Kernel function wrt variance applied on inputs X and X2.
|
||||
In the stationary case there is an inner function depending on the
|
||||
distances from X to X2, called r.
|
||||
|
||||
dKd_dVar(X, X2) = dKdVar_of_r((X-X2)**2)
|
||||
"""
|
||||
r = self._scaled_dist(X, X2)
|
||||
return self.dKdVar_of_r(r)
|
||||
dKd_dVar(X, X2) = dKdVar_of_r((X-X2)**2)
|
||||
"""
|
||||
r = self._scaled_dist(X, X2)
|
||||
return self.dKdVar_of_r(r)
|
||||
|
||||
@Cache_this(limit=3, ignore_args=())
|
||||
def dKd_dLen(self, X, dimension, lengthscale, X2=None):
|
||||
"""
|
||||
Derivate of Kernel function wrt lengthscale applied on inputs X and X2.
|
||||
In the stationary case there is an inner function depending on the
|
||||
distances from X to X2, called r.
|
||||
@Cache_this(limit=3, ignore_args=())
|
||||
def dKd_dLen(self, X, dimension, lengthscale, X2=None):
|
||||
"""
|
||||
Derivate of Kernel function wrt lengthscale applied on inputs X and X2.
|
||||
In the stationary case there is an inner function depending on the
|
||||
distances from X to X2, called r.
|
||||
|
||||
dKd_dLen(X, X2) = dKdLen_of_r((X-X2)**2)
|
||||
"""
|
||||
r = self._scaled_dist(X, X2)
|
||||
return self.dKdLen_of_r(r, dimension, lengthscale)
|
||||
dKd_dLen(X, X2) = dKdLen_of_r((X-X2)**2)
|
||||
"""
|
||||
r = self._scaled_dist(X, X2)
|
||||
return self.dKdLen_of_r(r, dimension, lengthscale)
|
||||
|
||||
class GridRBF(GridKern):
|
||||
"""
|
||||
Similar to regular RBF but supplemented with methods required for Gaussian grid regression
|
||||
Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel:
|
||||
"""
|
||||
Similar to regular RBF but supplemented with methods required for Gaussian grid regression
|
||||
Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel:
|
||||
|
||||
.. math::
|
||||
.. math::
|
||||
|
||||
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg)
|
||||
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg)
|
||||
|
||||
"""
|
||||
_support_GPU = True
|
||||
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='gridRBF', originalDimensions=1, useGPU=False):
|
||||
super(GridRBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, originalDimensions, useGPU=useGPU)
|
||||
"""
|
||||
_support_GPU = True
|
||||
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='gridRBF', originalDimensions=1, useGPU=False):
|
||||
super(GridRBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, originalDimensions, useGPU=useGPU)
|
||||
|
||||
def K_of_r(self, r):
|
||||
return (self.variance**(float(1)/self.originalDimensions)) * np.exp(-0.5 * r**2)
|
||||
def K_of_r(self, r):
|
||||
return (self.variance**(float(1)/self.originalDimensions)) * np.exp(-0.5 * r**2)
|
||||
|
||||
def dKdVar_of_r(self, r):
|
||||
"""
|
||||
Compute derivative of kernel wrt variance
|
||||
"""
|
||||
return np.exp(-0.5 * r**2)
|
||||
def dKdVar_of_r(self, r):
|
||||
"""
|
||||
Compute derivative of kernel wrt variance
|
||||
"""
|
||||
return np.exp(-0.5 * r**2)
|
||||
|
||||
def dKdLen_of_r(self, r, dimCheck, lengthscale):
|
||||
"""
|
||||
Compute derivative of kernel for dimension wrt lengthscale
|
||||
Computation of derivative changes when lengthscale corresponds to
|
||||
the dimension of the kernel whose derivate is being computed.
|
||||
"""
|
||||
if (dimCheck == True):
|
||||
return (self.variance**(float(1)/self.originalDimensions)) * np.exp(-0.5 * r**2) * (r**2) / (lengthscale**(float(1)/self.originalDimensions))
|
||||
else:
|
||||
return (self.variance**(float(1)/self.originalDimensions)) * np.exp(-0.5 * r**2) / (lengthscale**(float(1)/self.originalDimensions))
|
||||
def dKdLen_of_r(self, r, dimCheck, lengthscale):
|
||||
"""
|
||||
Compute derivative of kernel for dimension wrt lengthscale
|
||||
Computation of derivative changes when lengthscale corresponds to
|
||||
the dimension of the kernel whose derivate is being computed.
|
||||
"""
|
||||
if (dimCheck == True):
|
||||
return (self.variance**(float(1)/self.originalDimensions)) * np.exp(-0.5 * r**2) * (r**2) / (lengthscale**(float(1)/self.originalDimensions))
|
||||
else:
|
||||
return (self.variance**(float(1)/self.originalDimensions)) * np.exp(-0.5 * r**2) / (lengthscale**(float(1)/self.originalDimensions))
|
||||
|
||||
def dK_dr(self, r):
|
||||
return -r*self.K_of_r(r)
|
||||
def dK_dr(self, r):
|
||||
return -r*self.K_of_r(r)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue