mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-15 06:52:39 +02:00
REFACTORING: model names, lowercase, classes uppercase
This commit is contained in:
parent
2a39440619
commit
2e5e8ac026
50 changed files with 436 additions and 3307 deletions
|
|
@ -13,14 +13,14 @@ class Brownian(kernpart):
|
|||
"""
|
||||
Brownian Motion kernel.
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance:
|
||||
:type variance: float
|
||||
"""
|
||||
def __init__(self,D,variance=1.):
|
||||
self.D = D
|
||||
assert self.D==1, "Brownian motion in 1D only"
|
||||
def __init__(self,input_dim,variance=1.):
|
||||
self.input_dim = input_dim
|
||||
assert self.input_dim==1, "Brownian motion in 1D only"
|
||||
self.Nparam = 1.
|
||||
self.name = 'Brownian'
|
||||
self._set_params(np.array([variance]).flatten())
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
from constructors import rbf, Matern32, Matern52, exponential, linear, white, bias, finite_dimensional, spline, Brownian, periodic_exponential, periodic_Matern32, periodic_Matern52, prod, symmetric, coregionalise, rational_quadratic, fixed, rbfcos, independent_outputs
|
||||
from constructors import rbf, Matern32, Matern52, exponential, linear, white, bias, finite_dimensional, spline, Brownian, periodic_exponential, periodic_Matern32, periodic_Matern52, prod, symmetric, Coregionalise, rational_quadratic, fixed, rbfcos, independent_outputs
|
||||
try:
|
||||
from constructors import rbf_sympy, sympykern # these depend on sympy
|
||||
except:
|
||||
|
|
|
|||
|
|
@ -7,14 +7,14 @@ import numpy as np
|
|||
import hashlib
|
||||
|
||||
class bias(kernpart):
|
||||
def __init__(self,D,variance=1.):
|
||||
def __init__(self,input_dim,variance=1.):
|
||||
"""
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
"""
|
||||
self.D = D
|
||||
self.input_dim = input_dim
|
||||
self.Nparam = 1
|
||||
self.name = 'bias'
|
||||
self._set_params(np.array([variance]).flatten())
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ from periodic_Matern32 import periodic_Matern32 as periodic_Matern32part
|
|||
from periodic_Matern52 import periodic_Matern52 as periodic_Matern52part
|
||||
from prod import prod as prodpart
|
||||
from symmetric import symmetric as symmetric_part
|
||||
from coregionalise import coregionalise as coregionalise_part
|
||||
from coregionalise import Coregionalise as coregionalise_part
|
||||
from rational_quadratic import rational_quadratic as rational_quadraticpart
|
||||
from rbfcos import rbfcos as rbfcospart
|
||||
from independent_outputs import independent_outputs as independent_output_part
|
||||
|
|
@ -33,8 +33,8 @@ def rbf(D,variance=1., lengthscale=None,ARD=False):
|
|||
"""
|
||||
Construct an RBF kernel
|
||||
|
||||
:param D: dimensionality of the kernel, obligatory
|
||||
:type D: int
|
||||
:param input_dim: dimensionality of the kernel, obligatory
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the kernel
|
||||
|
|
@ -51,7 +51,7 @@ def linear(D,variances=None,ARD=False):
|
|||
|
||||
Arguments
|
||||
---------
|
||||
D (int), obligatory
|
||||
input_dimD (int), obligatory
|
||||
variances (np.ndarray)
|
||||
ARD (boolean)
|
||||
"""
|
||||
|
|
@ -64,7 +64,7 @@ def white(D,variance=1.):
|
|||
|
||||
Arguments
|
||||
---------
|
||||
D (int), obligatory
|
||||
input_dimD (int), obligatory
|
||||
variance (float)
|
||||
"""
|
||||
part = whitepart(D,variance)
|
||||
|
|
@ -74,8 +74,8 @@ def exponential(D,variance=1., lengthscale=None, ARD=False):
|
|||
"""
|
||||
Construct an exponential kernel
|
||||
|
||||
:param D: dimensionality of the kernel, obligatory
|
||||
:type D: int
|
||||
:param input_dim: dimensionality of the kernel, obligatory
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the kernel
|
||||
|
|
@ -90,8 +90,8 @@ def Matern32(D,variance=1., lengthscale=None, ARD=False):
|
|||
"""
|
||||
Construct a Matern 3/2 kernel.
|
||||
|
||||
:param D: dimensionality of the kernel, obligatory
|
||||
:type D: int
|
||||
:param input_dim: dimensionality of the kernel, obligatory
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the kernel
|
||||
|
|
@ -106,8 +106,8 @@ def Matern52(D,variance=1., lengthscale=None, ARD=False):
|
|||
"""
|
||||
Construct a Matern 5/2 kernel.
|
||||
|
||||
:param D: dimensionality of the kernel, obligatory
|
||||
:type D: int
|
||||
:param input_dim: dimensionality of the kernel, obligatory
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the kernel
|
||||
|
|
@ -124,7 +124,7 @@ def bias(D,variance=1.):
|
|||
|
||||
Arguments
|
||||
---------
|
||||
D (int), obligatory
|
||||
input_dim (int), obligatory
|
||||
variance (float)
|
||||
"""
|
||||
part = biaspart(D,variance)
|
||||
|
|
@ -133,7 +133,7 @@ def bias(D,variance=1.):
|
|||
def finite_dimensional(D,F,G,variances=1.,weights=None):
|
||||
"""
|
||||
Construct a finite dimensional kernel.
|
||||
D: int - the number of input dimensions
|
||||
input_dim: int - the number of input dimensions
|
||||
F: np.array of functions with shape (n,) - the n basis functions
|
||||
G: np.array with shape (n,n) - the Gram matrix associated to F
|
||||
variances : np.ndarray with shape (n,)
|
||||
|
|
@ -145,8 +145,8 @@ def spline(D,variance=1.):
|
|||
"""
|
||||
Construct a spline kernel.
|
||||
|
||||
:param D: Dimensionality of the kernel
|
||||
:type D: int
|
||||
:param input_dim: Dimensionality of the kernel
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
"""
|
||||
|
|
@ -157,8 +157,8 @@ def Brownian(D,variance=1.):
|
|||
"""
|
||||
Construct a Brownian motion kernel.
|
||||
|
||||
:param D: Dimensionality of the kernel
|
||||
:type D: int
|
||||
:param input_dim: Dimensionality of the kernel
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
"""
|
||||
|
|
@ -204,8 +204,8 @@ def periodic_exponential(D=1,variance=1., lengthscale=None, period=2*np.pi,n_fre
|
|||
"""
|
||||
Construct an periodic exponential kernel
|
||||
|
||||
:param D: dimensionality, only defined for D=1
|
||||
:type D: int
|
||||
:param input_dim: dimensionality, only defined for input_dim=1
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the kernel
|
||||
|
|
@ -222,8 +222,8 @@ def periodic_Matern32(D,variance=1., lengthscale=None, period=2*np.pi,n_freq=10,
|
|||
"""
|
||||
Construct a periodic Matern 3/2 kernel.
|
||||
|
||||
:param D: dimensionality, only defined for D=1
|
||||
:type D: int
|
||||
:param input_dim: dimensionality, only defined for input_dim=1
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the kernel
|
||||
|
|
@ -240,8 +240,8 @@ def periodic_Matern52(D,variance=1., lengthscale=None, period=2*np.pi,n_freq=10,
|
|||
"""
|
||||
Construct a periodic Matern 5/2 kernel.
|
||||
|
||||
:param D: dimensionality, only defined for D=1
|
||||
:type D: int
|
||||
:param input_dim: dimensionality, only defined for input_dim=1
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the kernel
|
||||
|
|
@ -256,14 +256,14 @@ def periodic_Matern52(D,variance=1., lengthscale=None, period=2*np.pi,n_freq=10,
|
|||
|
||||
def prod(k1,k2,tensor=False):
|
||||
"""
|
||||
Construct a product kernel over D from two kernels over D
|
||||
Construct a product kernel over input_dim from two kernels over input_dim
|
||||
|
||||
:param k1, k2: the kernels to multiply
|
||||
:type k1, k2: kernpart
|
||||
:rtype: kernel object
|
||||
"""
|
||||
part = prodpart(k1,k2,tensor)
|
||||
return kern(part.D, [part])
|
||||
return kern(part.input_dim, [part])
|
||||
|
||||
def symmetric(k):
|
||||
"""
|
||||
|
|
@ -273,7 +273,7 @@ def symmetric(k):
|
|||
k_.parts = [symmetric_part(p) for p in k.parts]
|
||||
return k_
|
||||
|
||||
def coregionalise(Nout,R=1, W=None, kappa=None):
|
||||
def Coregionalise(Nout,R=1, W=None, kappa=None):
|
||||
p = coregionalise_part(Nout,R,W,kappa)
|
||||
return kern(1,[p])
|
||||
|
||||
|
|
@ -282,8 +282,8 @@ def rational_quadratic(D,variance=1., lengthscale=1., power=1.):
|
|||
"""
|
||||
Construct rational quadratic kernel.
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int (D=1 is the only value currently supported)
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int (input_dim=1 is the only value currently supported)
|
||||
:param variance: the variance :math:`\sigma^2`
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale :math:`\ell`
|
||||
|
|
@ -300,7 +300,7 @@ def fixed(D, K, variance=1.):
|
|||
|
||||
Arguments
|
||||
---------
|
||||
D (int), obligatory
|
||||
input_dim (int), obligatory
|
||||
K (np.array), obligatory
|
||||
variance (float)
|
||||
"""
|
||||
|
|
@ -321,6 +321,6 @@ def independent_outputs(k):
|
|||
for sl in k.input_slices:
|
||||
assert (sl.start is None) and (sl.stop is None), "cannot adjust input slices! (TODO)"
|
||||
parts = [independent_output_part(p) for p in k.parts]
|
||||
return kern(k.D+1,parts)
|
||||
return kern(k.input_dim+1,parts)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -7,12 +7,12 @@ from GPy.util.linalg import mdot, pdinv
|
|||
import pdb
|
||||
from scipy import weave
|
||||
|
||||
class coregionalise(kernpart):
|
||||
class Coregionalise(kernpart):
|
||||
"""
|
||||
Kernel for Intrinsic Corregionalization Models
|
||||
"""
|
||||
def __init__(self,Nout,R=1, W=None, kappa=None):
|
||||
self.D = 1
|
||||
self.input_dim = 1
|
||||
self.name = 'coregion'
|
||||
self.Nout = Nout
|
||||
self.R = R
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@ from prod import prod
|
|||
from ..util.linalg import symmetrify
|
||||
|
||||
class kern(parameterised):
|
||||
def __init__(self, D, parts=[], input_slices=None):
|
||||
def __init__(self, input_dim, parts=[], input_slices=None):
|
||||
"""
|
||||
This is the main kernel class for GPy. It handles multiple (additive) kernel functions, and keeps track of variaous things like which parameters live where.
|
||||
|
||||
The technical code for kernels is divided into _parts_ (see e.g. rbf.py). This obnject contains a list of parts, which are computed additively. For multiplication, special _prod_ parts are used.
|
||||
|
||||
:param D: The dimensioality of the kernel's input space
|
||||
:type D: int
|
||||
:param input_dim: The dimensioality of the kernel's input space
|
||||
:type input_dim: int
|
||||
:param parts: the 'parts' (PD functions) of the kernel
|
||||
:type parts: list of kernpart objects
|
||||
:param input_slices: the slices on the inputs which apply to each kernel
|
||||
|
|
@ -29,7 +29,7 @@ class kern(parameterised):
|
|||
self.Nparts = len(parts)
|
||||
self.Nparam = sum([p.Nparam for p in self.parts])
|
||||
|
||||
self.D = D
|
||||
self.input_dim = input_dim
|
||||
|
||||
# deal with input_slices
|
||||
if input_slices is None:
|
||||
|
|
@ -96,10 +96,10 @@ class kern(parameterised):
|
|||
:type other: GPy.kern
|
||||
"""
|
||||
if tensor:
|
||||
D = self.D + other.D
|
||||
self_input_slices = [slice(*sl.indices(self.D)) for sl in self.input_slices]
|
||||
other_input_indices = [sl.indices(other.D) for sl in other.input_slices]
|
||||
other_input_slices = [slice(i[0] + self.D, i[1] + self.D, i[2]) for i in other_input_indices]
|
||||
D = self.input_dim + other.input_dim
|
||||
self_input_slices = [slice(*sl.indices(self.input_dim)) for sl in self.input_slices]
|
||||
other_input_indices = [sl.indices(other.input_dim) for sl in other.input_slices]
|
||||
other_input_slices = [slice(i[0] + self.input_dim, i[1] + self.input_dim, i[2]) for i in other_input_indices]
|
||||
|
||||
newkern = kern(D, self.parts + other.parts, self_input_slices + other_input_slices)
|
||||
|
||||
|
|
@ -111,8 +111,8 @@ class kern(parameterised):
|
|||
newkern.constraints = self.constraints + other.constraints
|
||||
newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices]
|
||||
else:
|
||||
assert self.D == other.D
|
||||
newkern = kern(self.D, self.parts + other.parts, self.input_slices + other.input_slices)
|
||||
assert self.input_dim == other.input_dim
|
||||
newkern = kern(self.input_dim, self.parts + other.parts, self.input_slices + other.input_slices)
|
||||
# transfer constraints:
|
||||
newkern.constrained_indices = self.constrained_indices + [i + self.Nparam for i in other.constrained_indices]
|
||||
newkern.constraints = self.constraints + other.constraints
|
||||
|
|
@ -138,16 +138,16 @@ class kern(parameterised):
|
|||
|
||||
slices = []
|
||||
for sl1, sl2 in itertools.product(K1.input_slices, K2.input_slices):
|
||||
s1, s2 = [False] * K1.D, [False] * K2.D
|
||||
s1, s2 = [False] * K1.input_dim, [False] * K2.input_dim
|
||||
s1[sl1], s2[sl2] = [True], [True]
|
||||
slices += [s1 + s2]
|
||||
|
||||
newkernparts = [prod(k1, k2, tensor) for k1, k2 in itertools.product(K1.parts, K2.parts)]
|
||||
|
||||
if tensor:
|
||||
newkern = kern(K1.D + K2.D, newkernparts, slices)
|
||||
newkern = kern(K1.input_dim + K2.input_dim, newkernparts, slices)
|
||||
else:
|
||||
newkern = kern(K1.D, newkernparts, slices)
|
||||
newkern = kern(K1.input_dim, newkernparts, slices)
|
||||
|
||||
newkern._follow_constrains(K1, K2)
|
||||
return newkern
|
||||
|
|
@ -211,7 +211,7 @@ class kern(parameterised):
|
|||
def K(self, X, X2=None, which_parts='all'):
|
||||
if which_parts == 'all':
|
||||
which_parts = [True] * self.Nparts
|
||||
assert X.shape[1] == self.D
|
||||
assert X.shape[1] == self.input_dim
|
||||
if X2 is None:
|
||||
target = np.zeros((X.shape[0], X.shape[0]))
|
||||
[p.K(X[:, i_s], None, target=target) for p, i_s, part_i_used in zip(self.parts, self.input_slices, which_parts) if part_i_used]
|
||||
|
|
@ -225,11 +225,11 @@ class kern(parameterised):
|
|||
:param dL_dK: An array of dL_dK derivaties, dL_dK
|
||||
:type dL_dK: Np.ndarray (N x M)
|
||||
:param X: Observed data inputs
|
||||
:type X: np.ndarray (N x D)
|
||||
:type X: np.ndarray (N x input_dim)
|
||||
:param X2: Observed dara inputs (optional, defaults to X)
|
||||
:type X2: np.ndarray (M x D)
|
||||
:type X2: np.ndarray (M x input_dim)
|
||||
"""
|
||||
assert X.shape[1] == self.D
|
||||
assert X.shape[1] == self.input_dim
|
||||
target = np.zeros(self.Nparam)
|
||||
if X2 is None:
|
||||
[p.dK_dtheta(dL_dK, X[:, i_s], None, target[ps]) for p, i_s, ps, in zip(self.parts, self.input_slices, self.param_slices)]
|
||||
|
|
@ -251,20 +251,20 @@ class kern(parameterised):
|
|||
def Kdiag(self, X, which_parts='all'):
|
||||
if which_parts == 'all':
|
||||
which_parts = [True] * self.Nparts
|
||||
assert X.shape[1] == self.D
|
||||
assert X.shape[1] == self.input_dim
|
||||
target = np.zeros(X.shape[0])
|
||||
[p.Kdiag(X[:, i_s], target=target) for p, i_s, part_on in zip(self.parts, self.input_slices, which_parts) if part_on]
|
||||
return target
|
||||
|
||||
def dKdiag_dtheta(self, dL_dKdiag, X):
|
||||
assert X.shape[1] == self.D
|
||||
assert X.shape[1] == self.input_dim
|
||||
assert dL_dKdiag.size == X.shape[0]
|
||||
target = np.zeros(self.Nparam)
|
||||
[p.dKdiag_dtheta(dL_dKdiag, X[:, i_s], target[ps]) for p, i_s, ps in zip(self.parts, self.input_slices, self.param_slices)]
|
||||
return self._transform_gradients(target)
|
||||
|
||||
def dKdiag_dX(self, dL_dKdiag, X):
|
||||
assert X.shape[1] == self.D
|
||||
assert X.shape[1] == self.input_dim
|
||||
target = np.zeros_like(X)
|
||||
[p.dKdiag_dX(dL_dKdiag, X[:, i_s], target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)]
|
||||
return target
|
||||
|
|
@ -386,7 +386,7 @@ class kern(parameterised):
|
|||
def plot(self, x=None, plot_limits=None, which_parts='all', resolution=None, *args, **kwargs):
|
||||
if which_parts == 'all':
|
||||
which_parts = [True] * self.Nparts
|
||||
if self.D == 1:
|
||||
if self.input_dim == 1:
|
||||
if x is None:
|
||||
x = np.zeros((1, 1))
|
||||
else:
|
||||
|
|
@ -408,7 +408,7 @@ class kern(parameterised):
|
|||
pb.xlabel("x")
|
||||
pb.ylabel("k(x,%0.1f)" % x)
|
||||
|
||||
elif self.D == 2:
|
||||
elif self.input_dim == 2:
|
||||
if x is None:
|
||||
x = np.zeros((1, 2))
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -3,16 +3,16 @@
|
|||
|
||||
|
||||
class kernpart(object):
|
||||
def __init__(self,D):
|
||||
def __init__(self,input_dim):
|
||||
"""
|
||||
The base class for a kernpart: a positive definite function which forms part of a kernel
|
||||
|
||||
:param D: the number of input dimensions to the function
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions to the function
|
||||
:type input_dim: int
|
||||
|
||||
Do not instantiate.
|
||||
"""
|
||||
self.D = D
|
||||
self.input_dim = input_dim
|
||||
self.Nparam = 1
|
||||
self.name = 'unnamed'
|
||||
|
||||
|
|
|
|||
|
|
@ -13,10 +13,10 @@ class linear(kernpart):
|
|||
|
||||
.. math::
|
||||
|
||||
k(x,y) = \sum_{i=1}^D \sigma^2_i x_iy_i
|
||||
k(x,y) = \sum_{i=1}^input_dim \sigma^2_i x_iy_i
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variances: the vector of variances :math:`\sigma^2_i`
|
||||
:type variances: array or list of the appropriate size (or float if there is only one variance parameter)
|
||||
:param ARD: Auto Relevance Determination. If equal to "False", the kernel has only one variance parameter \sigma^2, otherwise there is one variance parameter per dimension.
|
||||
|
|
@ -24,8 +24,8 @@ class linear(kernpart):
|
|||
:rtype: kernel object
|
||||
"""
|
||||
|
||||
def __init__(self, D, variances=None, ARD=False):
|
||||
self.D = D
|
||||
def __init__(self, input_dim, variances=None, ARD=False):
|
||||
self.input_dim = input_dim
|
||||
self.ARD = ARD
|
||||
if ARD == False:
|
||||
self.Nparam = 1
|
||||
|
|
@ -37,13 +37,13 @@ class linear(kernpart):
|
|||
variances = np.ones(1)
|
||||
self._Xcache, self._X2cache = np.empty(shape=(2,))
|
||||
else:
|
||||
self.Nparam = self.D
|
||||
self.Nparam = self.input_dim
|
||||
self.name = 'linear'
|
||||
if variances is not None:
|
||||
variances = np.asarray(variances)
|
||||
assert variances.size == self.D, "bad number of lengthscales"
|
||||
assert variances.size == self.input_dim, "bad number of lengthscales"
|
||||
else:
|
||||
variances = np.ones(self.D)
|
||||
variances = np.ones(self.input_dim)
|
||||
self._set_params(variances.flatten())
|
||||
|
||||
# initialize cache
|
||||
|
|
@ -82,7 +82,7 @@ class linear(kernpart):
|
|||
def dK_dtheta(self, dL_dK, X, X2, target):
|
||||
if self.ARD:
|
||||
if X2 is None:
|
||||
[np.add(target[i:i + 1], np.sum(dL_dK * tdot(X[:, i:i + 1])), target[i:i + 1]) for i in range(self.D)]
|
||||
[np.add(target[i:i + 1], np.sum(dL_dK * tdot(X[:, i:i + 1])), target[i:i + 1]) for i in range(self.input_dim)]
|
||||
else:
|
||||
product = X[:, None, :] * X2[None, :, :]
|
||||
target += (dL_dK[:, :, None] * product).sum(0).sum(0)
|
||||
|
|
@ -153,7 +153,7 @@ class linear(kernpart):
|
|||
# psi2_real[n, m, m_prime] = np.dot(tmp, (
|
||||
# self._Z[m_prime:m_prime + 1] * self.variances).T)
|
||||
# mu2_S = (self._mu[:, None, :] * self._mu[:, :, None])
|
||||
# mu2_S[:, np.arange(self.D), np.arange(self.D)] += self._S
|
||||
# mu2_S[:, np.arange(self.input_dim), np.arange(self.input_dim)] += self._S
|
||||
# psi2 = (self.ZA[None, :, None, :] * mu2_S[:, None]).sum(-1)
|
||||
# psi2 = (psi2[:, :, None] * self.ZA[None, None]).sum(-1)
|
||||
# psi2_tensor = np.tensordot(self.ZZ[None, :, :, :] * np.square(self.variances), self.mu2_S[:, None, None, :], ((3), (3))).squeeze().T
|
||||
|
|
|
|||
|
|
@ -22,14 +22,14 @@ class prod(kernpart):
|
|||
self.k1 = k1
|
||||
self.k2 = k2
|
||||
if tensor:
|
||||
self.D = k1.D + k2.D
|
||||
self.slice1 = slice(0,self.k1.D)
|
||||
self.slice2 = slice(self.k1.D,self.k1.D+self.k2.D)
|
||||
self.input_dim = k1.input_dim + k2.input_dim
|
||||
self.slice1 = slice(0,self.k1.input_dim)
|
||||
self.slice2 = slice(self.k1.input_dim,self.k1.input_dim+self.k2.input_dim)
|
||||
else:
|
||||
assert k1.D == k2.D, "Error: The input spaces of the kernels to sum don't have the same dimension."
|
||||
self.D = k1.D
|
||||
self.slice1 = slice(0,self.D)
|
||||
self.slice2 = slice(0,self.D)
|
||||
assert k1.input_dim == k2.input_dim, "Error: The input spaces of the kernels to sum don't have the same dimension."
|
||||
self.input_dim = k1.input_dim
|
||||
self.slice1 = slice(0,self.input_dim)
|
||||
self.slice2 = slice(0,self.input_dim)
|
||||
|
||||
self._X, self._X2, self._params = np.empty(shape=(3,1))
|
||||
self._set_params(np.hstack((k1._get_params(),k2._get_params())))
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ class rbf(kernpart):
|
|||
|
||||
where \ell_i is the lengthscale, \sigma^2 the variance and d the dimensionality of the input.
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the vector of lengthscale of the kernel
|
||||
|
|
@ -31,8 +31,8 @@ class rbf(kernpart):
|
|||
.. Note: this object implements both the ARD and 'spherical' version of the function
|
||||
"""
|
||||
|
||||
def __init__(self,D,variance=1.,lengthscale=None,ARD=False):
|
||||
self.D = D
|
||||
def __init__(self,input_dim,variance=1.,lengthscale=None,ARD=False):
|
||||
self.input_dim = input_dim
|
||||
self.name = 'rbf'
|
||||
self.ARD = ARD
|
||||
if not ARD:
|
||||
|
|
@ -43,12 +43,12 @@ class rbf(kernpart):
|
|||
else:
|
||||
lengthscale = np.ones(1)
|
||||
else:
|
||||
self.Nparam = self.D + 1
|
||||
self.Nparam = self.input_dim + 1
|
||||
if lengthscale is not None:
|
||||
lengthscale = np.asarray(lengthscale)
|
||||
assert lengthscale.size == self.D, "bad number of lengthscales"
|
||||
assert lengthscale.size == self.input_dim, "bad number of lengthscales"
|
||||
else:
|
||||
lengthscale = np.ones(self.D)
|
||||
lengthscale = np.ones(self.input_dim)
|
||||
|
||||
self._set_params(np.hstack((variance,lengthscale.flatten())))
|
||||
|
||||
|
|
@ -100,7 +100,7 @@ class rbf(kernpart):
|
|||
code = """
|
||||
int q,i,j;
|
||||
double tmp;
|
||||
for(q=0; q<D; q++){
|
||||
for(q=0; q<input_dim; q++){
|
||||
tmp = 0;
|
||||
for(i=0; i<N; i++){
|
||||
for(j=0; j<i; j++){
|
||||
|
|
@ -110,12 +110,12 @@ class rbf(kernpart):
|
|||
target(q+1) += var_len3(q)*tmp;
|
||||
}
|
||||
"""
|
||||
N,M,D = X.shape[0], X.shape[0], self.D
|
||||
N, M, input_dim = X.shape[0], X.shape[0], self.input_dim
|
||||
else:
|
||||
code = """
|
||||
int q,i,j;
|
||||
double tmp;
|
||||
for(q=0; q<D; q++){
|
||||
for(q=0; q<input_dim; q++){
|
||||
tmp = 0;
|
||||
for(i=0; i<N; i++){
|
||||
for(j=0; j<M; j++){
|
||||
|
|
@ -125,9 +125,9 @@ class rbf(kernpart):
|
|||
target(q+1) += var_len3(q)*tmp;
|
||||
}
|
||||
"""
|
||||
N,M,D = X.shape[0], X2.shape[0], self.D
|
||||
#[np.add(target[1+q:2+q],var_len3[q]*np.sum(dvardLdK*np.square(X[:,q][:,None]-X2[:,q][None,:])),target[1+q:2+q]) for q in range(self.D)]
|
||||
weave.inline(code, arg_names=['N','M','D','X','X2','target','dvardLdK','var_len3'],
|
||||
N, M, input_dim = X.shape[0], X2.shape[0], self.input_dim
|
||||
#[np.add(target[1+q:2+q],var_len3[q]*np.sum(dvardLdK*np.square(X[:,q][:,None]-X2[:,q][None,:])),target[1+q:2+q]) for q in range(self.input_dim)]
|
||||
weave.inline(code, arg_names=['N','M','input_dim','X','X2','target','dvardLdK','var_len3'],
|
||||
type_converters=weave.converters.blitz,**self.weave_options)
|
||||
else:
|
||||
target[1] += (self.variance/self.lengthscale)*np.sum(self._K_dvar*self._K_dist2*dL_dK)
|
||||
|
|
@ -278,8 +278,8 @@ class rbf(kernpart):
|
|||
psi2 = np.empty((N,M,M))
|
||||
|
||||
psi2_Zdist_sq = self._psi2_Zdist_sq
|
||||
_psi2_denom = self._psi2_denom.squeeze().reshape(N,self.D)
|
||||
half_log_psi2_denom = 0.5*np.log(self._psi2_denom).squeeze().reshape(N,self.D)
|
||||
_psi2_denom = self._psi2_denom.squeeze().reshape(N,self.input_dim)
|
||||
half_log_psi2_denom = 0.5*np.log(self._psi2_denom).squeeze().reshape(N,self.input_dim)
|
||||
variance_sq = float(np.square(self.variance))
|
||||
if self.ARD:
|
||||
lengthscale2 = self.lengthscale2
|
||||
|
|
|
|||
|
|
@ -8,13 +8,13 @@ class white(kernpart):
|
|||
"""
|
||||
White noise kernel.
|
||||
|
||||
:param D: the number of input dimensions
|
||||
:type D: int
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance:
|
||||
:type variance: float
|
||||
"""
|
||||
def __init__(self,D,variance=1.):
|
||||
self.D = D
|
||||
def __init__(self,input_dim,variance=1.):
|
||||
self.input_dim = input_dim
|
||||
self.Nparam = 1
|
||||
self.name = 'white'
|
||||
self._set_params(np.array([variance]).flatten())
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue