mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-07 02:52:40 +02:00
Merge branch 'devel' of https://github.com/SheffieldML/GPy into devel
This commit is contained in:
commit
aaa5020bf1
133 changed files with 1761 additions and 957 deletions
|
|
@ -1,11 +1,11 @@
|
|||
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
from independent_outputs import index_to_slices
|
||||
from .independent_outputs import index_to_slices
|
||||
|
||||
class ODE_UY(Kern):
|
||||
def __init__(self, input_dim, variance_U=3., variance_Y=1., lengthscale_U=1., lengthscale_Y=1., active_dims=None, name='ode_uy'):
|
||||
|
|
@ -114,7 +114,7 @@ class ODE_UY(Kern):
|
|||
elif i==1:
|
||||
Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
|
||||
else:
|
||||
raise ValueError, "invalid input/output index"
|
||||
raise ValueError("invalid input/output index")
|
||||
#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag
|
||||
#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag
|
||||
return Kdiag
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
from independent_outputs import index_to_slices
|
||||
from .independent_outputs import index_to_slices
|
||||
|
||||
class ODE_UYC(Kern):
|
||||
def __init__(self, input_dim, variance_U=3., variance_Y=1., lengthscale_U=1., lengthscale_Y=1., ubias =1. ,active_dims=None, name='ode_uyc'):
|
||||
|
|
@ -115,7 +115,7 @@ class ODE_UYC(Kern):
|
|||
elif i==1:
|
||||
Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
|
||||
else:
|
||||
raise ValueError, "invalid input/output index"
|
||||
raise ValueError("invalid input/output index")
|
||||
#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag
|
||||
#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag
|
||||
return Kdiag
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
from independent_outputs import index_to_slices
|
||||
from .independent_outputs import index_to_slices
|
||||
|
||||
|
||||
class ODE_st(Kern):
|
||||
|
|
@ -135,7 +135,7 @@ class ODE_st(Kern):
|
|||
Kdiag[s1]+= b**2*k1 - 2*a*c*k2 + a**2*k3 + c**2*vyt*vyx
|
||||
#Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
|
||||
else:
|
||||
raise ValueError, "invalid input/output index"
|
||||
raise ValueError("invalid input/output index")
|
||||
|
||||
return Kdiag
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
from independent_outputs import index_to_slices
|
||||
from .independent_outputs import index_to_slices
|
||||
|
||||
|
||||
class ODE_t(Kern):
|
||||
|
|
@ -85,7 +85,7 @@ class ODE_t(Kern):
|
|||
Kdiag[s1]+= k1 + vyt+self.ubias
|
||||
#Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
|
||||
else:
|
||||
raise ValueError, "invalid input/output index"
|
||||
raise ValueError("invalid input/output index")
|
||||
|
||||
return Kdiag
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,8 @@
|
|||
import numpy as np
|
||||
import itertools
|
||||
from ...util.caching import Cache_this
|
||||
from kern import CombinationKernel
|
||||
from .kern import CombinationKernel
|
||||
from functools import reduce
|
||||
|
||||
class Add(CombinationKernel):
|
||||
"""
|
||||
|
|
@ -84,10 +85,10 @@ class Add(CombinationKernel):
|
|||
psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts))
|
||||
#return psi2
|
||||
# compute the "cross" terms
|
||||
from static import White, Bias
|
||||
from rbf import RBF
|
||||
from .static import White, Bias
|
||||
from .rbf import RBF
|
||||
#from rbf_inv import RBFInv
|
||||
from linear import Linear
|
||||
from .linear import Linear
|
||||
#ffrom fixed import Fixed
|
||||
|
||||
for p1, p2 in itertools.combinations(self.parts, 2):
|
||||
|
|
@ -111,11 +112,11 @@ class Add(CombinationKernel):
|
|||
psi2 += np.einsum('nm,no->mo',tmp1,tmp2)+np.einsum('nm,no->mo',tmp2,tmp1)
|
||||
#(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
|
||||
else:
|
||||
raise NotImplementedError, "psi2 cannot be computed for this kernel"
|
||||
raise NotImplementedError("psi2 cannot be computed for this kernel")
|
||||
return psi2
|
||||
|
||||
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
from static import White, Bias
|
||||
from .static import White, Bias
|
||||
for p1 in self.parts:
|
||||
#compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2!
|
||||
eff_dL_dpsi1 = dL_dpsi1.copy()
|
||||
|
|
@ -131,7 +132,7 @@ class Add(CombinationKernel):
|
|||
p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
|
||||
|
||||
def gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
from static import White, Bias
|
||||
from .static import White, Bias
|
||||
target = np.zeros(Z.shape)
|
||||
for p1 in self.parts:
|
||||
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
|
||||
|
|
@ -149,7 +150,7 @@ class Add(CombinationKernel):
|
|||
return target
|
||||
|
||||
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
from static import White, Bias
|
||||
from .static import White, Bias
|
||||
target_grads = [np.zeros(v.shape) for v in variational_posterior.parameters]
|
||||
for p1 in self.parameters:
|
||||
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
|
||||
|
|
@ -164,7 +165,7 @@ class Add(CombinationKernel):
|
|||
else:
|
||||
eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.psi1(Z, variational_posterior) * 2.
|
||||
grads = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
|
||||
[np.add(target_grads[i],grads[i],target_grads[i]) for i in xrange(len(grads))]
|
||||
[np.add(target_grads[i],grads[i],target_grads[i]) for i in range(len(grads))]
|
||||
return target_grads
|
||||
|
||||
def add(self, other):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
|
|
|
|||
|
|
@ -1,13 +1,17 @@
|
|||
# Copyright (c) 2012, James Hensman and Ricardo Andrade
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
import numpy as np
|
||||
from scipy import weave
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from ...util.config import config # for assesing whether to use weave
|
||||
|
||||
try:
|
||||
from scipy import weave
|
||||
except ImportError:
|
||||
config.set('weave', 'working', 'False')
|
||||
|
||||
class Coregionalize(Kern):
|
||||
"""
|
||||
Covariance function for intrinsic/linear coregionalization models
|
||||
|
|
@ -61,7 +65,7 @@ class Coregionalize(Kern):
|
|||
try:
|
||||
return self._K_weave(X, X2)
|
||||
except:
|
||||
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
|
||||
print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n")
|
||||
config.set('weave', 'working', 'False')
|
||||
return self._K_numpy(X, X2)
|
||||
else:
|
||||
|
|
@ -123,7 +127,7 @@ class Coregionalize(Kern):
|
|||
try:
|
||||
dL_dK_small = self._gradient_reduce_weave(dL_dK, index, index2)
|
||||
except:
|
||||
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
|
||||
print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n")
|
||||
config.set('weave', 'working', 'False')
|
||||
dL_dK_small = self._gradient_reduce_weave(dL_dK, index, index2)
|
||||
else:
|
||||
|
|
@ -162,7 +166,7 @@ class Coregionalize(Kern):
|
|||
|
||||
def update_gradients_diag(self, dL_dKdiag, X):
|
||||
index = np.asarray(X, dtype=np.int).flatten()
|
||||
dL_dKdiag_small = np.array([dL_dKdiag[index==i].sum() for i in xrange(self.output_dim)])
|
||||
dL_dKdiag_small = np.array([dL_dKdiag[index==i].sum() for i in range(self.output_dim)])
|
||||
self.W.gradient = 2.*self.W*dL_dKdiag_small[:, None]
|
||||
self.kappa.gradient = dL_dKdiag_small
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
import numpy as np
|
||||
from scipy.special import wofz
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from ...util.caching import Cache_this
|
||||
|
|
|
|||
|
|
@ -2,13 +2,13 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
from kern import Kern, CombinationKernel
|
||||
from .kern import Kern, CombinationKernel
|
||||
import numpy as np
|
||||
import itertools
|
||||
|
||||
def index_to_slices(index):
|
||||
"""
|
||||
take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index.
|
||||
take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index.
|
||||
|
||||
e.g.
|
||||
>>> index = np.asarray([0,0,0,1,1,1,2,2,2])
|
||||
|
|
@ -79,10 +79,10 @@ class IndependentOutputs(CombinationKernel):
|
|||
|
||||
def update_gradients_full(self,dL_dK,X,X2=None):
|
||||
slices = index_to_slices(X[:,self.index_dim])
|
||||
if self.single_kern:
|
||||
if self.single_kern:
|
||||
target = np.zeros(self.kern.size)
|
||||
kerns = itertools.repeat(self.kern)
|
||||
else:
|
||||
else:
|
||||
kerns = self.kern
|
||||
target = [np.zeros(kern.size) for kern, _ in zip(kerns, slices)]
|
||||
def collate_grads(kern, i, dL, X, X2):
|
||||
|
|
@ -94,20 +94,24 @@ class IndependentOutputs(CombinationKernel):
|
|||
else:
|
||||
slices2 = index_to_slices(X2[:,self.index_dim])
|
||||
[[[collate_grads(kern, i, dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for i,(kern,slices_i,slices_j) in enumerate(zip(kerns,slices,slices2))]
|
||||
if self.single_kern: kern.gradient = target
|
||||
else:[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))]
|
||||
if self.single_kern:
|
||||
self.kern.gradient = target
|
||||
else:
|
||||
[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))]
|
||||
|
||||
def gradients_X(self,dL_dK, X, X2=None):
|
||||
target = np.zeros(X.shape)
|
||||
kerns = itertools.repeat(self.kern) if self.single_kern else self.kern
|
||||
if X2 is None:
|
||||
# TODO: make use of index_to_slices
|
||||
# FIXME: Broken as X is already sliced out
|
||||
print "Warning, gradients_X may not be working, I believe X has already been sliced out by the slicer!"
|
||||
values = np.unique(X[:,self.index_dim])
|
||||
slices = [X[:,self.index_dim]==i for i in values]
|
||||
[target.__setitem__(s, kern.gradients_X(dL_dK[s,s],X[s],None))
|
||||
for kern, s in zip(kerns, slices)]
|
||||
#slices = index_to_slices(X[:,self.index_dim])
|
||||
#[[np.add(target[s], kern.gradients_X(dL_dK[s,s], X[s]), out=target[s])
|
||||
#[[np.add(target[s], kern.gradients_X(dL_dK[s,s], X[s]), out=target[s])
|
||||
# for s in slices_i] for kern, slices_i in zip(kerns, slices)]
|
||||
#import ipdb;ipdb.set_trace()
|
||||
#[[(np.add(target[s ], kern.gradients_X(dL_dK[s ,ss],X[s ], X[ss]), out=target[s ]),
|
||||
|
|
@ -142,7 +146,7 @@ class IndependentOutputs(CombinationKernel):
|
|||
if self.single_kern: target[:] += kern.gradient
|
||||
else: target[i][:] += kern.gradient
|
||||
[[collate_grads(kern, i, dL_dKdiag[s], X[s,:]) for s in slices_i] for i, (kern, slices_i) in enumerate(zip(kerns, slices))]
|
||||
if self.single_kern: kern.gradient = target
|
||||
if self.single_kern: self.kern.gradient = target
|
||||
else:[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))]
|
||||
|
||||
class Hierarchical(CombinationKernel):
|
||||
|
|
|
|||
|
|
@ -4,17 +4,20 @@
|
|||
import sys
|
||||
import numpy as np
|
||||
from ...core.parameterization.parameterized import Parameterized
|
||||
from kernel_slice_operations import KernCallsViaSlicerMeta
|
||||
from .kernel_slice_operations import KernCallsViaSlicerMeta
|
||||
from ...util.caching import Cache_this
|
||||
from GPy.core.parameterization.observable_array import ObsAr
|
||||
from functools import reduce
|
||||
import six
|
||||
|
||||
|
||||
|
||||
@six.add_metaclass(KernCallsViaSlicerMeta)
|
||||
class Kern(Parameterized):
|
||||
#===========================================================================
|
||||
# This adds input slice support. The rather ugly code for slicing can be
|
||||
# found in kernel_slice_operations
|
||||
__metaclass__ = KernCallsViaSlicerMeta
|
||||
# __meataclass__ is ignored in Python 3 - needs to be put in the function definiton
|
||||
#__metaclass__ = KernCallsViaSlicerMeta
|
||||
#Here, we use the Python module six to support Py3 and Py2 simultaneously
|
||||
#===========================================================================
|
||||
_support_GPU=False
|
||||
def __init__(self, input_dim, active_dims, name, useGPU=False, *a, **kw):
|
||||
|
|
@ -178,7 +181,7 @@ class Kern(Parameterized):
|
|||
|
||||
"""
|
||||
assert isinstance(other, Kern), "only kernels can be added to kernels..."
|
||||
from add import Add
|
||||
from .add import Add
|
||||
return Add([self, other], name=name)
|
||||
|
||||
def __mul__(self, other):
|
||||
|
|
@ -210,7 +213,7 @@ class Kern(Parameterized):
|
|||
|
||||
"""
|
||||
assert isinstance(other, Kern), "only kernels can be multiplied to kernels..."
|
||||
from prod import Prod
|
||||
from .prod import Prod
|
||||
#kernels = []
|
||||
#if isinstance(self, Prod): kernels.extend(self.parameters)
|
||||
#else: kernels.append(self)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...util.linalg import tdot
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
|
|
|
|||
|
|
@ -3,11 +3,12 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...util.linalg import mdot
|
||||
from ...util.decorators import silence_errors
|
||||
from ...core.parameterization.param import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from functools import reduce
|
||||
|
||||
class Periodic(Kern):
|
||||
def __init__(self, input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name):
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
class Poly(Kern):
|
||||
|
|
|
|||
|
|
@ -2,9 +2,10 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
from kern import CombinationKernel
|
||||
from .kern import CombinationKernel
|
||||
from ...util.caching import Cache_this
|
||||
import itertools
|
||||
from functools import reduce
|
||||
|
||||
|
||||
def numpy_invalid_op_as_exception(func):
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@
|
|||
from ....core.parameterization.parameter_core import Pickleable
|
||||
from GPy.util.caching import Cache_this
|
||||
from ....core.parameterization import variational
|
||||
import rbf_psi_comp
|
||||
import ssrbf_psi_comp
|
||||
import sslinear_psi_comp
|
||||
import linear_psi_comp
|
||||
from . import rbf_psi_comp
|
||||
from . import ssrbf_psi_comp
|
||||
from . import sslinear_psi_comp
|
||||
from . import linear_psi_comp
|
||||
|
||||
class PSICOMP_RBF(Pickleable):
|
||||
@Cache_this(limit=2, ignore_args=(0,))
|
||||
|
|
@ -17,7 +17,7 @@ class PSICOMP_RBF(Pickleable):
|
|||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||
return ssrbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior)
|
||||
else:
|
||||
raise ValueError, "unknown distriubtion received for psi-statistics"
|
||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||
|
||||
@Cache_this(limit=2, ignore_args=(0,1,2,3))
|
||||
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
|
||||
|
|
@ -26,7 +26,7 @@ class PSICOMP_RBF(Pickleable):
|
|||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||
return ssrbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior)
|
||||
else:
|
||||
raise ValueError, "unknown distriubtion received for psi-statistics"
|
||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||
|
||||
def _setup_observers(self):
|
||||
pass
|
||||
|
|
@ -40,7 +40,7 @@ class PSICOMP_Linear(Pickleable):
|
|||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||
return sslinear_psi_comp.psicomputations(variance, Z, variational_posterior)
|
||||
else:
|
||||
raise ValueError, "unknown distriubtion received for psi-statistics"
|
||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||
|
||||
@Cache_this(limit=2, ignore_args=(0,1,2,3))
|
||||
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior):
|
||||
|
|
@ -49,7 +49,7 @@ class PSICOMP_Linear(Pickleable):
|
|||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||
return sslinear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior)
|
||||
else:
|
||||
raise ValueError, "unknown distriubtion received for psi-statistics"
|
||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||
|
||||
def _setup_observers(self):
|
||||
pass
|
||||
|
|
@ -3,9 +3,9 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from stationary import Stationary
|
||||
from psi_comp import PSICOMP_RBF
|
||||
from psi_comp.rbf_psi_gpucomp import PSICOMP_RBF_GPU
|
||||
from .stationary import Stationary
|
||||
from .psi_comp import PSICOMP_RBF
|
||||
from .psi_comp.rbf_psi_gpucomp import PSICOMP_RBF_GPU
|
||||
from ...util.config import *
|
||||
|
||||
class RBF(Stationary):
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ A new kernel
|
|||
"""
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern,CombinationKernel
|
||||
from .kern import Kern,CombinationKernel
|
||||
from .independent_outputs import index_to_slices
|
||||
import itertools
|
||||
|
||||
|
|
@ -104,7 +104,7 @@ class SplitKern(CombinationKernel):
|
|||
assert len(slices2)<=2, 'The Split kernel only support two different indices'
|
||||
target = np.zeros((X.shape[0], X2.shape[0]))
|
||||
# diagonal blocks
|
||||
[[target.__setitem__((s,s2), self.kern.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices[i], slices2[i])] for i in xrange(min(len(slices),len(slices2)))]
|
||||
[[target.__setitem__((s,s2), self.kern.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices[i], slices2[i])] for i in range(min(len(slices),len(slices2)))]
|
||||
if len(slices)>1:
|
||||
[target.__setitem__((s,s2), self.kern_cross.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices[1], slices2[0])]
|
||||
if len(slices2)>1:
|
||||
|
|
@ -135,7 +135,7 @@ class SplitKern(CombinationKernel):
|
|||
else:
|
||||
assert dL_dK.shape==(X.shape[0],X2.shape[0])
|
||||
slices2 = index_to_slices(X2[:,self.index_dim])
|
||||
[[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s,s2 in itertools.product(slices[i], slices2[i])] for i in xrange(min(len(slices),len(slices2)))]
|
||||
[[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s,s2 in itertools.product(slices[i], slices2[i])] for i in range(min(len(slices),len(slices2)))]
|
||||
if len(slices)>1:
|
||||
[collate_grads(dL_dK[s,s2], X[s], X2[s2], True) for s,s2 in itertools.product(slices[1], slices2[0])]
|
||||
if len(slices2)>1:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
import numpy as np
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
|
|
|
|||
|
|
@ -2,16 +2,21 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from ...util.linalg import tdot
|
||||
from ... import util
|
||||
import numpy as np
|
||||
from scipy import integrate, weave
|
||||
from scipy import integrate
|
||||
from ...util.config import config # for assesing whether to use weave
|
||||
from ...util.caching import Cache_this
|
||||
|
||||
try:
|
||||
from scipy import weave
|
||||
except ImportError:
|
||||
config.set('weave', 'working', 'False')
|
||||
|
||||
class Stationary(Kern):
|
||||
"""
|
||||
Stationary kernels (covariance functions).
|
||||
|
|
@ -65,10 +70,10 @@ class Stationary(Kern):
|
|||
self.link_parameters(self.variance, self.lengthscale)
|
||||
|
||||
def K_of_r(self, r):
|
||||
raise NotImplementedError, "implement the covariance function as a fn of r to use this class"
|
||||
raise NotImplementedError("implement the covariance function as a fn of r to use this class")
|
||||
|
||||
def dK_dr(self, r):
|
||||
raise NotImplementedError, "implement derivative of the covariance function wrt r to use this class"
|
||||
raise NotImplementedError("implement derivative of the covariance function wrt r to use this class")
|
||||
|
||||
@Cache_this(limit=5, ignore_args=())
|
||||
def K(self, X, X2=None):
|
||||
|
|
@ -165,11 +170,11 @@ class Stationary(Kern):
|
|||
try:
|
||||
self.lengthscale.gradient = self.weave_lengthscale_grads(tmp, X, X2)
|
||||
except:
|
||||
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
|
||||
print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n")
|
||||
config.set('weave', 'working', 'False')
|
||||
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)])
|
||||
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in range(self.input_dim)])
|
||||
else:
|
||||
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)])
|
||||
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in range(self.input_dim)])
|
||||
else:
|
||||
r = self._scaled_dist(X, X2)
|
||||
self.lengthscale.gradient = -np.sum(dL_dr*r)/self.lengthscale
|
||||
|
|
@ -214,7 +219,7 @@ class Stationary(Kern):
|
|||
try:
|
||||
return self.gradients_X_weave(dL_dK, X, X2)
|
||||
except:
|
||||
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
|
||||
print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n")
|
||||
config.set('weave', 'working', 'False')
|
||||
return self.gradients_X_(dL_dK, X, X2)
|
||||
else:
|
||||
|
|
@ -234,7 +239,7 @@ class Stationary(Kern):
|
|||
|
||||
#the lower memory way with a loop
|
||||
ret = np.empty(X.shape, dtype=np.float64)
|
||||
for q in xrange(self.input_dim):
|
||||
for q in range(self.input_dim):
|
||||
np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=ret[:,q])
|
||||
ret /= self.lengthscale**2
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Check Matthew Rocklin's blog post.
|
||||
import sympy as sym
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.symbolic import Symbolic_core
|
||||
|
||||
|
||||
|
|
@ -11,7 +11,7 @@ class Symbolic(Kern, Symbolic_core):
|
|||
def __init__(self, input_dim, k=None, output_dim=1, name='symbolic', parameters=None, active_dims=None, operators=None, func_modules=[]):
|
||||
|
||||
if k is None:
|
||||
raise ValueError, "You must provide an argument for the covariance function."
|
||||
raise ValueError("You must provide an argument for the covariance function.")
|
||||
|
||||
Kern.__init__(self, input_dim, active_dims, name=name)
|
||||
kdiag = k
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from ...util.caching import Cache_this
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue