diff --git a/GPy/core/__init__.py b/GPy/core/__init__.py index b3a29859..b0743916 100644 --- a/GPy/core/__init__.py +++ b/GPy/core/__init__.py @@ -43,4 +43,4 @@ def randomize(self, rand_gen=None, *args, **kwargs): Model.randomize = randomize Param.randomize = randomize -Parameterized.randomize = randomize \ No newline at end of file +Parameterized.randomize = randomize diff --git a/GPy/core/model.py b/GPy/core/model.py index ad09c917..7da6552a 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -45,4 +45,4 @@ class Model(ParamzModel, Priorizable): (including the MAP prior), so we return it here. If your model is not probabilistic, just return your *negative* gradient here! """ - return -(self._log_likelihood_gradients() + self._log_prior_gradients()) \ No newline at end of file + return -(self._log_likelihood_gradients() + self._log_prior_gradients()) diff --git a/GPy/core/parameterization/__init__.py b/GPy/core/parameterization/__init__.py index 11b75730..ec9944e8 100644 --- a/GPy/core/parameterization/__init__.py +++ b/GPy/core/parameterization/__init__.py @@ -6,4 +6,4 @@ from .parameterized import Parameterized from paramz import transformations from paramz.core import lists_and_dicts, index_operations, observable_array, observable -from paramz import ties_and_remappings, ObsAr \ No newline at end of file +from paramz import ties_and_remappings, ObsAr diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 69b93548..df755002 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -7,4 +7,4 @@ from paramz.transformations import __fixed__ import logging, numpy as np class Param(Param, Priorizable): - pass \ No newline at end of file + pass diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 9e71ddcf..3ff77c96 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -49,4 +49,4 @@ class Parameterized(Parameterized, Priorizable): If you want to operate on all parameters use m[''] to wildcard select all paramters and concatenate them. Printing m[''] will result in printing of all parameters in detail. """ - pass \ No newline at end of file + pass diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py index 1799a06d..06461ac1 100644 --- a/GPy/core/parameterization/transformations.py +++ b/GPy/core/parameterization/transformations.py @@ -1,4 +1,4 @@ # Copyright (c) 2014, Max Zwiessele, James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) -from paramz.transformations import * \ No newline at end of file +from paramz.transformations import * diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index d71eecc3..8bcb0f21 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -44,7 +44,7 @@ class SparseGP(GP): #pick a sensible inference method if inference_method is None: if isinstance(likelihood, likelihoods.Gaussian): - inference_method = var_dtc.VarDTC(limit=1) + inference_method = var_dtc.VarDTC(limit=3) else: #inference_method = ?? raise NotImplementedError("what to do what to do?") diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index ec055120..dc334059 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -22,7 +22,7 @@ class VarDTC(LatentFunctionInference): """ const_jitter = 1e-8 - def __init__(self, limit=1): + def __init__(self, limit=3): from paramz.caching import Cacher self.limit = limit self.get_trYYT = Cacher(self._get_trYYT, limit) diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index b72e4fd2..603623a7 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -21,7 +21,7 @@ class VarDTC_minibatch(LatentFunctionInference): """ const_jitter = 1e-8 - def __init__(self, batchsize=None, limit=1, mpi_comm=None): + def __init__(self, batchsize=None, limit=3, mpi_comm=None): self.batchsize = batchsize self.mpi_comm = mpi_comm diff --git a/GPy/inference/optimization/__init__.py b/GPy/inference/optimization/__init__.py index 24ca752a..a6247d96 100644 --- a/GPy/inference/optimization/__init__.py +++ b/GPy/inference/optimization/__init__.py @@ -2,4 +2,4 @@ from paramz.optimization import stochastics, Optimizer from paramz.optimization import * import sys sys.modules['GPy.inference.optimization.stochastics'] = stochastics -sys.modules['GPy.inference.optimization.Optimizer'] = Optimizer \ No newline at end of file +sys.modules['GPy.inference.optimization.Optimizer'] = Optimizer diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index e2990f99..7705ed15 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -28,4 +28,4 @@ from .src.trunclinear import TruncLinear,TruncLinear_inf from .src.splitKern import SplitKern,DEtime from .src.splitKern import DEtime as DiffGenomeKern from .src.spline import Spline -from .src.basis_funcs import LogisticBasisFuncKernel, LinearSlopeBasisFuncKernel, BasisFuncKernel, ChangePointBasisFuncKernel, DomainKernel \ No newline at end of file +from .src.basis_funcs import LogisticBasisFuncKernel, LinearSlopeBasisFuncKernel, BasisFuncKernel, ChangePointBasisFuncKernel, DomainKernel diff --git a/GPy/kern/src/ODE_t.py b/GPy/kern/src/ODE_t.py index d5dae665..f09ab77d 100644 --- a/GPy/kern/src/ODE_t.py +++ b/GPy/kern/src/ODE_t.py @@ -162,4 +162,4 @@ class ODE_t(Kern): self.lengthscale_Yt.gradient = np.sum(dkYdlent*(-0.5*self.lengthscale_Yt**(-2)) * dL_dK) - self.ubias.gradient = np.sum(dkdubias * dL_dK) \ No newline at end of file + self.ubias.gradient = np.sum(dkdubias * dL_dK) diff --git a/GPy/kern/src/__init__.py b/GPy/kern/src/__init__.py index d90842ca..69522e32 100644 --- a/GPy/kern/src/__init__.py +++ b/GPy/kern/src/__init__.py @@ -1 +1 @@ -from . import psi_comp \ No newline at end of file +from . import psi_comp diff --git a/GPy/kern/src/add.py b/GPy/kern/src/add.py index 86bceac7..a629a2a4 100644 --- a/GPy/kern/src/add.py +++ b/GPy/kern/src/add.py @@ -37,7 +37,7 @@ class Add(CombinationKernel): else: return False - @Cache_this(limit=2, force_kwargs=['which_parts']) + @Cache_this(limit=3, force_kwargs=['which_parts']) def K(self, X, X2=None, which_parts=None): """ Add all kernels together. @@ -51,7 +51,7 @@ class Add(CombinationKernel): which_parts = [which_parts] return reduce(np.add, (p.K(X, X2) for p in which_parts)) - @Cache_this(limit=2, force_kwargs=['which_parts']) + @Cache_this(limit=3, force_kwargs=['which_parts']) def Kdiag(self, X, which_parts=None): if which_parts is None: which_parts = self.parts @@ -98,17 +98,17 @@ class Add(CombinationKernel): [target.__iadd__(p.gradients_XX_diag(dL_dKdiag, X)) for p in self.parts] return target - @Cache_this(limit=1, force_kwargs=['which_parts']) + @Cache_this(limit=3, force_kwargs=['which_parts']) def psi0(self, Z, variational_posterior): if not self._exact_psicomp: return Kern.psi0(self,Z,variational_posterior) return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts)) - @Cache_this(limit=1, force_kwargs=['which_parts']) + @Cache_this(limit=3, force_kwargs=['which_parts']) def psi1(self, Z, variational_posterior): if not self._exact_psicomp: return Kern.psi1(self,Z,variational_posterior) return reduce(np.add, (p.psi1(Z, variational_posterior) for p in self.parts)) - @Cache_this(limit=1, force_kwargs=['which_parts']) + @Cache_this(limit=3, force_kwargs=['which_parts']) def psi2(self, Z, variational_posterior): if not self._exact_psicomp: return Kern.psi2(self,Z,variational_posterior) psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts)) @@ -144,7 +144,7 @@ class Add(CombinationKernel): raise NotImplementedError("psi2 cannot be computed for this kernel") return psi2 - @Cache_this(limit=1, force_kwargs=['which_parts']) + @Cache_this(limit=3, force_kwargs=['which_parts']) def psi2n(self, Z, variational_posterior): if not self._exact_psicomp: return Kern.psi2n(self, Z, variational_posterior) psi2 = reduce(np.add, (p.psi2n(Z, variational_posterior) for p in self.parts)) diff --git a/GPy/kern/src/eq_ode2.py b/GPy/kern/src/eq_ode2.py index ef71ffe0..8e735248 100644 --- a/GPy/kern/src/eq_ode2.py +++ b/GPy/kern/src/eq_ode2.py @@ -64,7 +64,7 @@ class EQ_ODE2(Kern): self.W = Param('W', W) self.link_parameters(self.lengthscale, self.C, self.B, self.W) - @Cache_this(limit=2) + @Cache_this(limit=3) def K(self, X, X2=None): #This way is not working, indexes are lost after using k._slice_X #index = np.asarray(X, dtype=np.int) diff --git a/GPy/kern/src/kern.py b/GPy/kern/src/kern.py index 3985fe97..b7b2a5ff 100644 --- a/GPy/kern/src/kern.py +++ b/GPy/kern/src/kern.py @@ -68,7 +68,7 @@ class Kern(Parameterized): def _effective_input_dim(self): return np.size(self._all_dims_active) - @Cache_this(limit=20) + @Cache_this(limit=3) def _slice_X(self, X): try: return X[:, self._all_dims_active].astype('float') diff --git a/GPy/kern/src/linear.py b/GPy/kern/src/linear.py index 59595fea..fa412c1d 100644 --- a/GPy/kern/src/linear.py +++ b/GPy/kern/src/linear.py @@ -51,7 +51,7 @@ class Linear(Kern): self.link_parameter(self.variances) self.psicomp = PSICOMP_Linear() - @Cache_this(limit=2) + @Cache_this(limit=3) def K(self, X, X2=None): if self.ARD: if X2 is None: @@ -62,7 +62,7 @@ class Linear(Kern): else: return self._dot_product(X, X2) * self.variances - @Cache_this(limit=1, ignore_args=(0,)) + @Cache_this(limit=3, ignore_args=(0,)) def _dot_product(self, X, X2=None): if X2 is None: return tdot(X) diff --git a/GPy/kern/src/mlp.py b/GPy/kern/src/mlp.py index d86e5b15..6c997881 100644 --- a/GPy/kern/src/mlp.py +++ b/GPy/kern/src/mlp.py @@ -45,7 +45,7 @@ class MLP(Kern): self.link_parameters(self.variance, self.weight_variance, self.bias_variance) - @Cache_this(limit=20, ignore_args=()) + @Cache_this(limit=3, ignore_args=()) def K(self, X, X2=None): if X2 is None: X_denom = np.sqrt(self._comp_prod(X)+1.) @@ -57,7 +57,7 @@ class MLP(Kern): XTX = self._comp_prod(X,X2)/X_denom[:,None]/X2_denom[None,:] return self.variance*four_over_tau*np.arcsin(XTX) - @Cache_this(limit=20, ignore_args=()) + @Cache_this(limit=3, ignore_args=()) def Kdiag(self, X): """Compute the diagonal of the covariance matrix for X.""" X_prod = self._comp_prod(X) @@ -88,14 +88,14 @@ class MLP(Kern): """Gradient of diagonal of covariance with respect to X""" return self._comp_grads_diag(dL_dKdiag, X)[3] - @Cache_this(limit=50, ignore_args=()) + @Cache_this(limit=3, ignore_args=()) def _comp_prod(self, X, X2=None): if X2 is None: return (np.square(X)*self.weight_variance).sum(axis=1)+self.bias_variance else: return (X*self.weight_variance).dot(X2.T)+self.bias_variance - @Cache_this(limit=20, ignore_args=(1,)) + @Cache_this(limit=3, ignore_args=(1,)) def _comp_grads(self, dL_dK, X, X2=None): var,w,b = self.variance, self.weight_variance, self.bias_variance K = self.K(X, X2) @@ -130,7 +130,7 @@ class MLP(Kern): dX2 = common.T.dot(X)*w-((common*XTX).sum(axis=0)/(X2_prod+1.))[:,None]*X2*w return dvar, dw, db, dX, dX2 - @Cache_this(limit=20, ignore_args=(1,)) + @Cache_this(limit=3, ignore_args=(1,)) def _comp_grads_diag(self, dL_dKdiag, X): var,w,b = self.variance, self.weight_variance, self.bias_variance K = self.Kdiag(X) diff --git a/GPy/kern/src/poly.py b/GPy/kern/src/poly.py index 8aa33b1a..57cb8800 100644 --- a/GPy/kern/src/poly.py +++ b/GPy/kern/src/poly.py @@ -27,7 +27,7 @@ class Poly(Kern): _, _, B = self._AB(X, X2) return B * self.variance - @Cache_this(limit=2) + @Cache_this(limit=3) def _AB(self, X, X2=None): if X2 is None: dot_prod = np.dot(X, X.T) diff --git a/GPy/kern/src/prod.py b/GPy/kern/src/prod.py index ae00a949..1e18b405 100644 --- a/GPy/kern/src/prod.py +++ b/GPy/kern/src/prod.py @@ -39,7 +39,7 @@ class Prod(CombinationKernel): kernels.insert(i, part) super(Prod, self).__init__(kernels, name) - @Cache_this(limit=2, force_kwargs=['which_parts']) + @Cache_this(limit=3, force_kwargs=['which_parts']) def K(self, X, X2=None, which_parts=None): if which_parts is None: which_parts = self.parts @@ -48,7 +48,7 @@ class Prod(CombinationKernel): which_parts = [which_parts] return reduce(np.multiply, (p.K(X, X2) for p in which_parts)) - @Cache_this(limit=2, force_kwargs=['which_parts']) + @Cache_this(limit=3, force_kwargs=['which_parts']) def Kdiag(self, X, which_parts=None): if which_parts is None: which_parts = self.parts diff --git a/GPy/kern/src/psi_comp/__init__.py b/GPy/kern/src/psi_comp/__init__.py index 9afa8e8c..0edf4b72 100644 --- a/GPy/kern/src/psi_comp/__init__.py +++ b/GPy/kern/src/psi_comp/__init__.py @@ -21,7 +21,7 @@ from .gaussherm import PSICOMP_GH from . import rbf_psi_comp, linear_psi_comp, ssrbf_psi_comp, sslinear_psi_comp class PSICOMP_RBF(PSICOMP): - @Cache_this(limit=10, ignore_args=(0,)) + @Cache_this(limit=3, ignore_args=(0,)) def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False): variance, lengthscale = kern.variance, kern.lengthscale if isinstance(variational_posterior, variational.NormalPosterior): @@ -31,7 +31,7 @@ class PSICOMP_RBF(PSICOMP): else: raise ValueError("unknown distriubtion received for psi-statistics") - @Cache_this(limit=10, ignore_args=(0,2,3,4)) + @Cache_this(limit=3, ignore_args=(0,2,3,4)) def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): variance, lengthscale = kern.variance, kern.lengthscale if isinstance(variational_posterior, variational.NormalPosterior): @@ -43,7 +43,7 @@ class PSICOMP_RBF(PSICOMP): class PSICOMP_Linear(PSICOMP): - @Cache_this(limit=10, ignore_args=(0,)) + @Cache_this(limit=3, ignore_args=(0,)) def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False): variances = kern.variances if isinstance(variational_posterior, variational.NormalPosterior): @@ -53,7 +53,7 @@ class PSICOMP_Linear(PSICOMP): else: raise ValueError("unknown distriubtion received for psi-statistics") - @Cache_this(limit=10, ignore_args=(0,2,3,4)) + @Cache_this(limit=3, ignore_args=(0,2,3,4)) def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): variances = kern.variances if isinstance(variational_posterior, variational.NormalPosterior): diff --git a/GPy/kern/src/psi_comp/gaussherm.py b/GPy/kern/src/psi_comp/gaussherm.py index 5fac6619..fe343aff 100644 --- a/GPy/kern/src/psi_comp/gaussherm.py +++ b/GPy/kern/src/psi_comp/gaussherm.py @@ -27,7 +27,7 @@ class PSICOMP_GH(PSICOMP): def _setup_observers(self): pass - @Cache_this(limit=10, ignore_args=(0,)) + @Cache_this(limit=3, ignore_args=(0,)) def comp_K(self, Z, qX): if self.Xs is None or self.Xs.shape != qX.mean.shape: from paramz import ObsAr @@ -38,7 +38,7 @@ class PSICOMP_GH(PSICOMP): self.Xs[i] = self.locs[i]*S_sq+mu return self.Xs - @Cache_this(limit=10, ignore_args=(0,)) + @Cache_this(limit=3, ignore_args=(0,)) def psicomputations(self, kern, Z, qX, return_psi2_n=False): mu, S = qX.mean.values, qX.variance.values N,M,Q = mu.shape[0],Z.shape[0],mu.shape[1] @@ -62,7 +62,7 @@ class PSICOMP_GH(PSICOMP): psi2 += self.weights[i]* tdot(Kfu.T) return psi0, psi1, psi2 - @Cache_this(limit=10, ignore_args=(0, 2,3,4)) + @Cache_this(limit=3, ignore_args=(0, 2,3,4)) def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, qX): mu, S = qX.mean.values, qX.variance.values if self.cache_K: Xs = self.comp_K(Z, qX) diff --git a/GPy/kern/src/psi_comp/rbf_psi_comp.py b/GPy/kern/src/psi_comp/rbf_psi_comp.py index bf954717..670f24de 100644 --- a/GPy/kern/src/psi_comp/rbf_psi_comp.py +++ b/GPy/kern/src/psi_comp/rbf_psi_comp.py @@ -132,5 +132,5 @@ def _psi2compDer(dL_dpsi2, variance, lengthscale, Z, mu, S): return _dL_dvar, _dL_dl, _dL_dZ, _dL_dmu, _dL_dS -_psi1computations = Cacher(__psi1computations, limit=5) -_psi2computations = Cacher(__psi2computations, limit=5) +_psi1computations = Cacher(__psi1computations, limit=3) +_psi2computations = Cacher(__psi2computations, limit=3) diff --git a/GPy/kern/src/psi_comp/rbf_psi_gpucomp.py b/GPy/kern/src/psi_comp/rbf_psi_gpucomp.py index baab83ec..8f62aac3 100644 --- a/GPy/kern/src/psi_comp/rbf_psi_gpucomp.py +++ b/GPy/kern/src/psi_comp/rbf_psi_gpucomp.py @@ -326,7 +326,7 @@ class PSICOMP_RBF_GPU(PSICOMP_RBF): except: return self.fall_back.psicomputations(kern, Z, variational_posterior, return_psi2_n) - @Cache_this(limit=10, ignore_args=(0,)) + @Cache_this(limit=3, ignore_args=(0,)) def _psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False): """ Z - MxQ @@ -371,7 +371,7 @@ class PSICOMP_RBF_GPU(PSICOMP_RBF): except: return self.fall_back.psiDerivativecomputations(kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior) - @Cache_this(limit=10, ignore_args=(0,2,3,4)) + @Cache_this(limit=3, ignore_args=(0,2,3,4)) def _psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): # resolve the requirement of dL_dpsi2 to be symmetric if len(dL_dpsi2.shape)==2: dL_dpsi2 = (dL_dpsi2+dL_dpsi2.T)/2 diff --git a/GPy/kern/src/psi_comp/ssrbf_psi_comp.py b/GPy/kern/src/psi_comp/ssrbf_psi_comp.py index 10ea95e4..ba057d4f 100644 --- a/GPy/kern/src/psi_comp/ssrbf_psi_comp.py +++ b/GPy/kern/src/psi_comp/ssrbf_psi_comp.py @@ -88,7 +88,7 @@ try: return psi0,psi1,psi2,psi2n from GPy.util.caching import Cacher - psicomputations = Cacher(_psicomputations, limit=1) + psicomputations = Cacher(_psicomputations, limit=3) def psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior): ARD = (len(lengthscale)!=1) diff --git a/GPy/kern/src/psi_comp/ssrbf_psi_gpucomp.py b/GPy/kern/src/psi_comp/ssrbf_psi_gpucomp.py index 844f944e..e3ef1568 100644 --- a/GPy/kern/src/psi_comp/ssrbf_psi_gpucomp.py +++ b/GPy/kern/src/psi_comp/ssrbf_psi_gpucomp.py @@ -375,7 +375,7 @@ class PSICOMP_SSRBF_GPU(PSICOMP_RBF): def get_dimensions(self, Z, variational_posterior): return variational_posterior.mean.shape[0], Z.shape[0], Z.shape[1] - @Cache_this(limit=1, ignore_args=(0,)) + @Cache_this(limit=3, ignore_args=(0,)) def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False): """ Z - MxQ @@ -409,7 +409,7 @@ class PSICOMP_SSRBF_GPU(PSICOMP_RBF): else: return psi0, psi1_gpu.get(), psi2_gpu.get() - @Cache_this(limit=1, ignore_args=(0,2,3,4)) + @Cache_this(limit=3, ignore_args=(0,2,3,4)) def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): variance, lengthscale = kern.variance, kern.lengthscale from ....util.linalg_gpu import sum_axis diff --git a/GPy/kern/src/stationary.py b/GPy/kern/src/stationary.py index 7b4c3625..5451c7a3 100644 --- a/GPy/kern/src/stationary.py +++ b/GPy/kern/src/stationary.py @@ -81,11 +81,11 @@ class Stationary(Kern): def dK_dr(self, r): raise NotImplementedError("implement derivative of the covariance function wrt r to use this class") - @Cache_this(limit=20, ignore_args=()) + @Cache_this(limit=3, ignore_args=()) def dK2_drdr(self, r): raise NotImplementedError("implement second derivative of covariance wrt r to use this method") - @Cache_this(limit=5, ignore_args=()) + @Cache_this(limit=3, ignore_args=()) def K(self, X, X2=None): """ Kernel function applied on inputs X and X2. diff --git a/GPy/kern/src/trunclinear.py b/GPy/kern/src/trunclinear.py index 3a35744f..bb94ae73 100644 --- a/GPy/kern/src/trunclinear.py +++ b/GPy/kern/src/trunclinear.py @@ -54,12 +54,12 @@ class TruncLinear(Kern): self.add_parameter(self.variances) self.add_parameter(self.delta) - @Cache_this(limit=2) + @Cache_this(limit=3) def K(self, X, X2=None): XX = self.variances*self._product(X, X2) return XX.sum(axis=-1) - @Cache_this(limit=2) + @Cache_this(limit=3) def _product(self, X, X2=None): if X2 is None: X2 = X @@ -149,12 +149,12 @@ class TruncLinear_inf(Kern): self.add_parameter(self.variances) -# @Cache_this(limit=2) +# @Cache_this(limit=3) def K(self, X, X2=None): tmp = self._product(X, X2) return (self.variances*tmp).sum(axis=-1) -# @Cache_this(limit=2) +# @Cache_this(limit=3) def _product(self, X, X2=None): if X2 is None: X2 = X diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 86638eb9..aa8ac4ea 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -61,7 +61,7 @@ class BayesianGPLVM(SparseGP_MPI): else: from ..inference.latent_function_inference.var_dtc import VarDTC self.logger.debug("creating inference_method var_dtc") - inference_method = VarDTC(limit=1 if not missing_data else Y.shape[1]) + inference_method = VarDTC(limit=3 if not missing_data else Y.shape[1]) if isinstance(inference_method,VarDTC_minibatch): inference_method.mpi_comm = mpi_comm diff --git a/GPy/models/bayesian_gplvm_minibatch.py b/GPy/models/bayesian_gplvm_minibatch.py index 73324386..e5a4967a 100644 --- a/GPy/models/bayesian_gplvm_minibatch.py +++ b/GPy/models/bayesian_gplvm_minibatch.py @@ -61,7 +61,7 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch): if inference_method is None: from ..inference.latent_function_inference.var_dtc import VarDTC self.logger.debug("creating inference_method var_dtc") - inference_method = VarDTC(limit=1 if not missing_data else Y.shape[1]) + inference_method = VarDTC(limit=3 if not missing_data else Y.shape[1]) super(BayesianGPLVMMiniBatch,self).__init__(X, Y, Z, kernel, likelihood=likelihood, name=name, inference_method=inference_method, @@ -126,4 +126,4 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch): d = self.output_dim self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)*self.stochastics.batchsize/d - self._Xgrad = self.X.gradient.copy() \ No newline at end of file + self._Xgrad = self.X.gradient.copy() diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index 6416847c..cdc0ab47 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -41,4 +41,4 @@ class GPLVM(GP): def parameters_changed(self): super(GPLVM, self).parameters_changed() - self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None) \ No newline at end of file + self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None) diff --git a/GPy/models/sparse_gp_minibatch.py b/GPy/models/sparse_gp_minibatch.py index 73393d85..6afb19e9 100644 --- a/GPy/models/sparse_gp_minibatch.py +++ b/GPy/models/sparse_gp_minibatch.py @@ -45,7 +45,7 @@ class SparseGPMiniBatch(SparseGP): # pick a sensible inference method if inference_method is None: if isinstance(likelihood, likelihoods.Gaussian): - inference_method = var_dtc.VarDTC(limit=1 if not missing_data else Y.shape[1]) + inference_method = var_dtc.VarDTC(limit=3 if not missing_data else Y.shape[1]) else: #inference_method = ?? raise NotImplementedError("what to do what to do?") diff --git a/GPy/models/sparse_gp_regression.py b/GPy/models/sparse_gp_regression.py index 31bde23d..b1180511 100644 --- a/GPy/models/sparse_gp_regression.py +++ b/GPy/models/sparse_gp_regression.py @@ -62,4 +62,4 @@ class SparseGPRegression(SparseGP_MPI): if isinstance(self.inference_method,VarDTC_minibatch): update_gradients_sparsegp(self, mpi_comm=self.mpi_comm) else: - super(SparseGPRegression, self).parameters_changed() \ No newline at end of file + super(SparseGPRegression, self).parameters_changed() diff --git a/GPy/plotting/Tango.py b/GPy/plotting/Tango.py index eb943962..50460410 100644 --- a/GPy/plotting/Tango.py +++ b/GPy/plotting/Tango.py @@ -104,4 +104,4 @@ cdict_Alu = {'red' :((0./5,colorsRGB['Aluminium1'][0]/256.,colorsRGB['Aluminium1 (2./5,colorsRGB['Aluminium3'][2]/256.,colorsRGB['Aluminium3'][2]/256.), (3./5,colorsRGB['Aluminium4'][2]/256.,colorsRGB['Aluminium4'][2]/256.), (4./5,colorsRGB['Aluminium5'][2]/256.,colorsRGB['Aluminium5'][2]/256.), - (5./5,colorsRGB['Aluminium6'][2]/256.,colorsRGB['Aluminium6'][2]/256.))} \ No newline at end of file + (5./5,colorsRGB['Aluminium6'][2]/256.,colorsRGB['Aluminium6'][2]/256.))} diff --git a/GPy/plotting/__init__.py b/GPy/plotting/__init__.py index 4b833fe3..0bb91254 100644 --- a/GPy/plotting/__init__.py +++ b/GPy/plotting/__init__.py @@ -107,4 +107,4 @@ try: lib = config.get('plotting', 'library') change_plotting_library(lib) except NoOptionError: - print("No plotting library was specified in config file. \n{}".format(error_suggestion)) \ No newline at end of file + print("No plotting library was specified in config file. \n{}".format(error_suggestion)) diff --git a/GPy/plotting/gpy_plot/gp_plots.py b/GPy/plotting/gpy_plot/gp_plots.py index eb252c0f..c6975326 100644 --- a/GPy/plotting/gpy_plot/gp_plots.py +++ b/GPy/plotting/gpy_plot/gp_plots.py @@ -420,4 +420,4 @@ def _plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_indu if helper_prediction[2] is not None: plots.update(_plot_samples(self, canvas, helper_data, helper_prediction, projection, "Samples")) - return plots \ No newline at end of file + return plots diff --git a/GPy/plotting/gpy_plot/kernel_plots.py b/GPy/plotting/gpy_plot/kernel_plots.py index 2255a665..1e80a8e8 100644 --- a/GPy/plotting/gpy_plot/kernel_plots.py +++ b/GPy/plotting/gpy_plot/kernel_plots.py @@ -140,4 +140,4 @@ def plot_covariance(kernel, x=None, label=None, return pl().add_to_canvas(canvas, plots) else: - raise NotImplementedError("Cannot plot a kernel with more than two input dimensions") \ No newline at end of file + raise NotImplementedError("Cannot plot a kernel with more than two input dimensions") diff --git a/GPy/plotting/gpy_plot/plot_util.py b/GPy/plotting/gpy_plot/plot_util.py index 4e71a3bc..3089af20 100644 --- a/GPy/plotting/gpy_plot/plot_util.py +++ b/GPy/plotting/gpy_plot/plot_util.py @@ -380,4 +380,4 @@ def x_frame2D(X,plot_limits=None,resolution=None): resolution = resolution or 50 xx, yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution] Xnew = np.vstack((xx.flatten(),yy.flatten())).T - return Xnew, xx, yy, xmin, xmax \ No newline at end of file + return Xnew, xx, yy, xmin, xmax diff --git a/GPy/plotting/matplot_dep/__init__.py b/GPy/plotting/matplot_dep/__init__.py index d163519b..dbdbd7d5 100644 --- a/GPy/plotting/matplot_dep/__init__.py +++ b/GPy/plotting/matplot_dep/__init__.py @@ -18,4 +18,4 @@ from .util import align_subplot_array, align_subplots, fewerXticks, removeRightTicks, removeUpperTicks -from . import controllers, base_plots \ No newline at end of file +from . import controllers, base_plots diff --git a/GPy/plotting/matplot_dep/controllers/__init__.py b/GPy/plotting/matplot_dep/controllers/__init__.py index 61cfb73b..a7e897e8 100644 --- a/GPy/plotting/matplot_dep/controllers/__init__.py +++ b/GPy/plotting/matplot_dep/controllers/__init__.py @@ -1 +1 @@ -from .imshow_controller import ImshowController, ImAnnotateController \ No newline at end of file +from .imshow_controller import ImshowController, ImAnnotateController diff --git a/GPy/plotting/matplot_dep/controllers/imshow_controller.py b/GPy/plotting/matplot_dep/controllers/imshow_controller.py index d67c9b4b..de64ed23 100644 --- a/GPy/plotting/matplot_dep/controllers/imshow_controller.py +++ b/GPy/plotting/matplot_dep/controllers/imshow_controller.py @@ -72,4 +72,4 @@ class ImAnnotateController(ImshowController): text.set_x(x+xoffset) text.set_y(y+yoffset) text.set_text("{}".format(X[1][j, i])) - return view \ No newline at end of file + return view diff --git a/GPy/plotting/matplot_dep/defaults.py b/GPy/plotting/matplot_dep/defaults.py index eab98298..69257c8c 100644 --- a/GPy/plotting/matplot_dep/defaults.py +++ b/GPy/plotting/matplot_dep/defaults.py @@ -72,4 +72,4 @@ latent = dict(aspect='auto', cmap='Greys', interpolation='bicubic') gradient = dict(aspect='auto', cmap='RdBu', interpolation='nearest', alpha=.7) magnification = dict(aspect='auto', cmap='Greys', interpolation='bicubic') latent_scatter = dict(s=40, linewidth=.2, edgecolor='k', alpha=.9) -annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7) \ No newline at end of file +annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7) diff --git a/GPy/plotting/matplot_dep/util.py b/GPy/plotting/matplot_dep/util.py index 2dd6af85..ca129bc9 100644 --- a/GPy/plotting/matplot_dep/util.py +++ b/GPy/plotting/matplot_dep/util.py @@ -116,4 +116,4 @@ def align_subplot_array(axes,xlim=None, ylim=None): if i<(M*(N-1)): ax.set_xticks([]) else: - removeUpperTicks(ax) \ No newline at end of file + removeUpperTicks(ax) diff --git a/GPy/plotting/plotly_dep/defaults.py b/GPy/plotting/plotly_dep/defaults.py index 24170b95..121e0b37 100644 --- a/GPy/plotting/plotly_dep/defaults.py +++ b/GPy/plotting/plotly_dep/defaults.py @@ -73,4 +73,4 @@ latent = dict(colorscale='Greys', reversescale=True, zsmooth='best') gradient = dict(colorscale='RdBu', opacity=.7) magnification = dict(colorscale='Greys', zsmooth='best', reversescale=True) latent_scatter = dict(marker_kwargs=dict(size='5', opacity=.7)) -# annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7) \ No newline at end of file +# annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7) diff --git a/GPy/testing/bgplvm_minibatch_tests.py b/GPy/testing/bgplvm_minibatch_tests.py index 4a824368..a6276eaa 100644 --- a/GPy/testing/bgplvm_minibatch_tests.py +++ b/GPy/testing/bgplvm_minibatch_tests.py @@ -106,4 +106,4 @@ class BGPLVMTest(unittest.TestCase): if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] - unittest.main() \ No newline at end of file + unittest.main() diff --git a/GPy/testing/gp_tests.py b/GPy/testing/gp_tests.py index b8cd89e2..3ce3ffc4 100644 --- a/GPy/testing/gp_tests.py +++ b/GPy/testing/gp_tests.py @@ -97,4 +97,4 @@ class Test(unittest.TestCase): if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] - unittest.main() \ No newline at end of file + unittest.main() diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index b4ffd1b0..83a6452b 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -78,7 +78,7 @@ def jitchol(A, maxtries=5): try: raise except: logging.warning('\n'.join(['Added jitter of {:.10e}'.format(jitter), - ' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]])) + ' in '+traceback.format_list(traceback.extract_stack(limit=3)[-2:-1])[0][2:]])) return L # def dtrtri(L, lower=1): diff --git a/benchmarks/regression/evaluation.py b/benchmarks/regression/evaluation.py index fbbfe6d7..c57bce7e 100644 --- a/benchmarks/regression/evaluation.py +++ b/benchmarks/regression/evaluation.py @@ -18,4 +18,4 @@ class RMSE(Evaluation): def evaluate(self, gt, pred): return np.sqrt(np.square(gt-pred).astype(np.float).mean()) - \ No newline at end of file +