[chaching] changing all chacher limits to 3

This commit is contained in:
mzwiessele 2016-03-07 11:37:22 +00:00
parent 99caca6702
commit b1e073318f
48 changed files with 72 additions and 72 deletions

View file

@ -43,4 +43,4 @@ def randomize(self, rand_gen=None, *args, **kwargs):
Model.randomize = randomize
Param.randomize = randomize
Parameterized.randomize = randomize
Parameterized.randomize = randomize

View file

@ -45,4 +45,4 @@ class Model(ParamzModel, Priorizable):
(including the MAP prior), so we return it here. If your model is not
probabilistic, just return your *negative* gradient here!
"""
return -(self._log_likelihood_gradients() + self._log_prior_gradients())
return -(self._log_likelihood_gradients() + self._log_prior_gradients())

View file

@ -6,4 +6,4 @@ from .parameterized import Parameterized
from paramz import transformations
from paramz.core import lists_and_dicts, index_operations, observable_array, observable
from paramz import ties_and_remappings, ObsAr
from paramz import ties_and_remappings, ObsAr

View file

@ -7,4 +7,4 @@ from paramz.transformations import __fixed__
import logging, numpy as np
class Param(Param, Priorizable):
pass
pass

View file

@ -49,4 +49,4 @@ class Parameterized(Parameterized, Priorizable):
If you want to operate on all parameters use m[''] to wildcard select all paramters
and concatenate them. Printing m[''] will result in printing of all parameters in detail.
"""
pass
pass

View file

@ -1,4 +1,4 @@
# Copyright (c) 2014, Max Zwiessele, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from paramz.transformations import *
from paramz.transformations import *

View file

@ -44,7 +44,7 @@ class SparseGP(GP):
#pick a sensible inference method
if inference_method is None:
if isinstance(likelihood, likelihoods.Gaussian):
inference_method = var_dtc.VarDTC(limit=1)
inference_method = var_dtc.VarDTC(limit=3)
else:
#inference_method = ??
raise NotImplementedError("what to do what to do?")

View file

@ -22,7 +22,7 @@ class VarDTC(LatentFunctionInference):
"""
const_jitter = 1e-8
def __init__(self, limit=1):
def __init__(self, limit=3):
from paramz.caching import Cacher
self.limit = limit
self.get_trYYT = Cacher(self._get_trYYT, limit)

View file

@ -21,7 +21,7 @@ class VarDTC_minibatch(LatentFunctionInference):
"""
const_jitter = 1e-8
def __init__(self, batchsize=None, limit=1, mpi_comm=None):
def __init__(self, batchsize=None, limit=3, mpi_comm=None):
self.batchsize = batchsize
self.mpi_comm = mpi_comm

View file

@ -2,4 +2,4 @@ from paramz.optimization import stochastics, Optimizer
from paramz.optimization import *
import sys
sys.modules['GPy.inference.optimization.stochastics'] = stochastics
sys.modules['GPy.inference.optimization.Optimizer'] = Optimizer
sys.modules['GPy.inference.optimization.Optimizer'] = Optimizer

View file

@ -28,4 +28,4 @@ from .src.trunclinear import TruncLinear,TruncLinear_inf
from .src.splitKern import SplitKern,DEtime
from .src.splitKern import DEtime as DiffGenomeKern
from .src.spline import Spline
from .src.basis_funcs import LogisticBasisFuncKernel, LinearSlopeBasisFuncKernel, BasisFuncKernel, ChangePointBasisFuncKernel, DomainKernel
from .src.basis_funcs import LogisticBasisFuncKernel, LinearSlopeBasisFuncKernel, BasisFuncKernel, ChangePointBasisFuncKernel, DomainKernel

View file

@ -162,4 +162,4 @@ class ODE_t(Kern):
self.lengthscale_Yt.gradient = np.sum(dkYdlent*(-0.5*self.lengthscale_Yt**(-2)) * dL_dK)
self.ubias.gradient = np.sum(dkdubias * dL_dK)
self.ubias.gradient = np.sum(dkdubias * dL_dK)

View file

@ -1 +1 @@
from . import psi_comp
from . import psi_comp

View file

@ -37,7 +37,7 @@ class Add(CombinationKernel):
else:
return False
@Cache_this(limit=2, force_kwargs=['which_parts'])
@Cache_this(limit=3, force_kwargs=['which_parts'])
def K(self, X, X2=None, which_parts=None):
"""
Add all kernels together.
@ -51,7 +51,7 @@ class Add(CombinationKernel):
which_parts = [which_parts]
return reduce(np.add, (p.K(X, X2) for p in which_parts))
@Cache_this(limit=2, force_kwargs=['which_parts'])
@Cache_this(limit=3, force_kwargs=['which_parts'])
def Kdiag(self, X, which_parts=None):
if which_parts is None:
which_parts = self.parts
@ -98,17 +98,17 @@ class Add(CombinationKernel):
[target.__iadd__(p.gradients_XX_diag(dL_dKdiag, X)) for p in self.parts]
return target
@Cache_this(limit=1, force_kwargs=['which_parts'])
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi0(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi0(self,Z,variational_posterior)
return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts))
@Cache_this(limit=1, force_kwargs=['which_parts'])
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi1(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi1(self,Z,variational_posterior)
return reduce(np.add, (p.psi1(Z, variational_posterior) for p in self.parts))
@Cache_this(limit=1, force_kwargs=['which_parts'])
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi2(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi2(self,Z,variational_posterior)
psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts))
@ -144,7 +144,7 @@ class Add(CombinationKernel):
raise NotImplementedError("psi2 cannot be computed for this kernel")
return psi2
@Cache_this(limit=1, force_kwargs=['which_parts'])
@Cache_this(limit=3, force_kwargs=['which_parts'])
def psi2n(self, Z, variational_posterior):
if not self._exact_psicomp: return Kern.psi2n(self, Z, variational_posterior)
psi2 = reduce(np.add, (p.psi2n(Z, variational_posterior) for p in self.parts))

View file

@ -64,7 +64,7 @@ class EQ_ODE2(Kern):
self.W = Param('W', W)
self.link_parameters(self.lengthscale, self.C, self.B, self.W)
@Cache_this(limit=2)
@Cache_this(limit=3)
def K(self, X, X2=None):
#This way is not working, indexes are lost after using k._slice_X
#index = np.asarray(X, dtype=np.int)

View file

@ -68,7 +68,7 @@ class Kern(Parameterized):
def _effective_input_dim(self):
return np.size(self._all_dims_active)
@Cache_this(limit=20)
@Cache_this(limit=3)
def _slice_X(self, X):
try:
return X[:, self._all_dims_active].astype('float')

View file

@ -51,7 +51,7 @@ class Linear(Kern):
self.link_parameter(self.variances)
self.psicomp = PSICOMP_Linear()
@Cache_this(limit=2)
@Cache_this(limit=3)
def K(self, X, X2=None):
if self.ARD:
if X2 is None:
@ -62,7 +62,7 @@ class Linear(Kern):
else:
return self._dot_product(X, X2) * self.variances
@Cache_this(limit=1, ignore_args=(0,))
@Cache_this(limit=3, ignore_args=(0,))
def _dot_product(self, X, X2=None):
if X2 is None:
return tdot(X)

View file

@ -45,7 +45,7 @@ class MLP(Kern):
self.link_parameters(self.variance, self.weight_variance, self.bias_variance)
@Cache_this(limit=20, ignore_args=())
@Cache_this(limit=3, ignore_args=())
def K(self, X, X2=None):
if X2 is None:
X_denom = np.sqrt(self._comp_prod(X)+1.)
@ -57,7 +57,7 @@ class MLP(Kern):
XTX = self._comp_prod(X,X2)/X_denom[:,None]/X2_denom[None,:]
return self.variance*four_over_tau*np.arcsin(XTX)
@Cache_this(limit=20, ignore_args=())
@Cache_this(limit=3, ignore_args=())
def Kdiag(self, X):
"""Compute the diagonal of the covariance matrix for X."""
X_prod = self._comp_prod(X)
@ -88,14 +88,14 @@ class MLP(Kern):
"""Gradient of diagonal of covariance with respect to X"""
return self._comp_grads_diag(dL_dKdiag, X)[3]
@Cache_this(limit=50, ignore_args=())
@Cache_this(limit=3, ignore_args=())
def _comp_prod(self, X, X2=None):
if X2 is None:
return (np.square(X)*self.weight_variance).sum(axis=1)+self.bias_variance
else:
return (X*self.weight_variance).dot(X2.T)+self.bias_variance
@Cache_this(limit=20, ignore_args=(1,))
@Cache_this(limit=3, ignore_args=(1,))
def _comp_grads(self, dL_dK, X, X2=None):
var,w,b = self.variance, self.weight_variance, self.bias_variance
K = self.K(X, X2)
@ -130,7 +130,7 @@ class MLP(Kern):
dX2 = common.T.dot(X)*w-((common*XTX).sum(axis=0)/(X2_prod+1.))[:,None]*X2*w
return dvar, dw, db, dX, dX2
@Cache_this(limit=20, ignore_args=(1,))
@Cache_this(limit=3, ignore_args=(1,))
def _comp_grads_diag(self, dL_dKdiag, X):
var,w,b = self.variance, self.weight_variance, self.bias_variance
K = self.Kdiag(X)

View file

@ -27,7 +27,7 @@ class Poly(Kern):
_, _, B = self._AB(X, X2)
return B * self.variance
@Cache_this(limit=2)
@Cache_this(limit=3)
def _AB(self, X, X2=None):
if X2 is None:
dot_prod = np.dot(X, X.T)

View file

@ -39,7 +39,7 @@ class Prod(CombinationKernel):
kernels.insert(i, part)
super(Prod, self).__init__(kernels, name)
@Cache_this(limit=2, force_kwargs=['which_parts'])
@Cache_this(limit=3, force_kwargs=['which_parts'])
def K(self, X, X2=None, which_parts=None):
if which_parts is None:
which_parts = self.parts
@ -48,7 +48,7 @@ class Prod(CombinationKernel):
which_parts = [which_parts]
return reduce(np.multiply, (p.K(X, X2) for p in which_parts))
@Cache_this(limit=2, force_kwargs=['which_parts'])
@Cache_this(limit=3, force_kwargs=['which_parts'])
def Kdiag(self, X, which_parts=None):
if which_parts is None:
which_parts = self.parts

View file

@ -21,7 +21,7 @@ from .gaussherm import PSICOMP_GH
from . import rbf_psi_comp, linear_psi_comp, ssrbf_psi_comp, sslinear_psi_comp
class PSICOMP_RBF(PSICOMP):
@Cache_this(limit=10, ignore_args=(0,))
@Cache_this(limit=3, ignore_args=(0,))
def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
variance, lengthscale = kern.variance, kern.lengthscale
if isinstance(variational_posterior, variational.NormalPosterior):
@ -31,7 +31,7 @@ class PSICOMP_RBF(PSICOMP):
else:
raise ValueError("unknown distriubtion received for psi-statistics")
@Cache_this(limit=10, ignore_args=(0,2,3,4))
@Cache_this(limit=3, ignore_args=(0,2,3,4))
def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
variance, lengthscale = kern.variance, kern.lengthscale
if isinstance(variational_posterior, variational.NormalPosterior):
@ -43,7 +43,7 @@ class PSICOMP_RBF(PSICOMP):
class PSICOMP_Linear(PSICOMP):
@Cache_this(limit=10, ignore_args=(0,))
@Cache_this(limit=3, ignore_args=(0,))
def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
variances = kern.variances
if isinstance(variational_posterior, variational.NormalPosterior):
@ -53,7 +53,7 @@ class PSICOMP_Linear(PSICOMP):
else:
raise ValueError("unknown distriubtion received for psi-statistics")
@Cache_this(limit=10, ignore_args=(0,2,3,4))
@Cache_this(limit=3, ignore_args=(0,2,3,4))
def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
variances = kern.variances
if isinstance(variational_posterior, variational.NormalPosterior):

View file

@ -27,7 +27,7 @@ class PSICOMP_GH(PSICOMP):
def _setup_observers(self):
pass
@Cache_this(limit=10, ignore_args=(0,))
@Cache_this(limit=3, ignore_args=(0,))
def comp_K(self, Z, qX):
if self.Xs is None or self.Xs.shape != qX.mean.shape:
from paramz import ObsAr
@ -38,7 +38,7 @@ class PSICOMP_GH(PSICOMP):
self.Xs[i] = self.locs[i]*S_sq+mu
return self.Xs
@Cache_this(limit=10, ignore_args=(0,))
@Cache_this(limit=3, ignore_args=(0,))
def psicomputations(self, kern, Z, qX, return_psi2_n=False):
mu, S = qX.mean.values, qX.variance.values
N,M,Q = mu.shape[0],Z.shape[0],mu.shape[1]
@ -62,7 +62,7 @@ class PSICOMP_GH(PSICOMP):
psi2 += self.weights[i]* tdot(Kfu.T)
return psi0, psi1, psi2
@Cache_this(limit=10, ignore_args=(0, 2,3,4))
@Cache_this(limit=3, ignore_args=(0, 2,3,4))
def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, qX):
mu, S = qX.mean.values, qX.variance.values
if self.cache_K: Xs = self.comp_K(Z, qX)

View file

@ -132,5 +132,5 @@ def _psi2compDer(dL_dpsi2, variance, lengthscale, Z, mu, S):
return _dL_dvar, _dL_dl, _dL_dZ, _dL_dmu, _dL_dS
_psi1computations = Cacher(__psi1computations, limit=5)
_psi2computations = Cacher(__psi2computations, limit=5)
_psi1computations = Cacher(__psi1computations, limit=3)
_psi2computations = Cacher(__psi2computations, limit=3)

View file

@ -326,7 +326,7 @@ class PSICOMP_RBF_GPU(PSICOMP_RBF):
except:
return self.fall_back.psicomputations(kern, Z, variational_posterior, return_psi2_n)
@Cache_this(limit=10, ignore_args=(0,))
@Cache_this(limit=3, ignore_args=(0,))
def _psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
"""
Z - MxQ
@ -371,7 +371,7 @@ class PSICOMP_RBF_GPU(PSICOMP_RBF):
except:
return self.fall_back.psiDerivativecomputations(kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)
@Cache_this(limit=10, ignore_args=(0,2,3,4))
@Cache_this(limit=3, ignore_args=(0,2,3,4))
def _psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
# resolve the requirement of dL_dpsi2 to be symmetric
if len(dL_dpsi2.shape)==2: dL_dpsi2 = (dL_dpsi2+dL_dpsi2.T)/2

View file

@ -88,7 +88,7 @@ try:
return psi0,psi1,psi2,psi2n
from GPy.util.caching import Cacher
psicomputations = Cacher(_psicomputations, limit=1)
psicomputations = Cacher(_psicomputations, limit=3)
def psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
ARD = (len(lengthscale)!=1)

View file

@ -375,7 +375,7 @@ class PSICOMP_SSRBF_GPU(PSICOMP_RBF):
def get_dimensions(self, Z, variational_posterior):
return variational_posterior.mean.shape[0], Z.shape[0], Z.shape[1]
@Cache_this(limit=1, ignore_args=(0,))
@Cache_this(limit=3, ignore_args=(0,))
def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
"""
Z - MxQ
@ -409,7 +409,7 @@ class PSICOMP_SSRBF_GPU(PSICOMP_RBF):
else:
return psi0, psi1_gpu.get(), psi2_gpu.get()
@Cache_this(limit=1, ignore_args=(0,2,3,4))
@Cache_this(limit=3, ignore_args=(0,2,3,4))
def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
variance, lengthscale = kern.variance, kern.lengthscale
from ....util.linalg_gpu import sum_axis

View file

@ -81,11 +81,11 @@ class Stationary(Kern):
def dK_dr(self, r):
raise NotImplementedError("implement derivative of the covariance function wrt r to use this class")
@Cache_this(limit=20, ignore_args=())
@Cache_this(limit=3, ignore_args=())
def dK2_drdr(self, r):
raise NotImplementedError("implement second derivative of covariance wrt r to use this method")
@Cache_this(limit=5, ignore_args=())
@Cache_this(limit=3, ignore_args=())
def K(self, X, X2=None):
"""
Kernel function applied on inputs X and X2.

View file

@ -54,12 +54,12 @@ class TruncLinear(Kern):
self.add_parameter(self.variances)
self.add_parameter(self.delta)
@Cache_this(limit=2)
@Cache_this(limit=3)
def K(self, X, X2=None):
XX = self.variances*self._product(X, X2)
return XX.sum(axis=-1)
@Cache_this(limit=2)
@Cache_this(limit=3)
def _product(self, X, X2=None):
if X2 is None:
X2 = X
@ -149,12 +149,12 @@ class TruncLinear_inf(Kern):
self.add_parameter(self.variances)
# @Cache_this(limit=2)
# @Cache_this(limit=3)
def K(self, X, X2=None):
tmp = self._product(X, X2)
return (self.variances*tmp).sum(axis=-1)
# @Cache_this(limit=2)
# @Cache_this(limit=3)
def _product(self, X, X2=None):
if X2 is None:
X2 = X

View file

@ -61,7 +61,7 @@ class BayesianGPLVM(SparseGP_MPI):
else:
from ..inference.latent_function_inference.var_dtc import VarDTC
self.logger.debug("creating inference_method var_dtc")
inference_method = VarDTC(limit=1 if not missing_data else Y.shape[1])
inference_method = VarDTC(limit=3 if not missing_data else Y.shape[1])
if isinstance(inference_method,VarDTC_minibatch):
inference_method.mpi_comm = mpi_comm

View file

@ -61,7 +61,7 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
if inference_method is None:
from ..inference.latent_function_inference.var_dtc import VarDTC
self.logger.debug("creating inference_method var_dtc")
inference_method = VarDTC(limit=1 if not missing_data else Y.shape[1])
inference_method = VarDTC(limit=3 if not missing_data else Y.shape[1])
super(BayesianGPLVMMiniBatch,self).__init__(X, Y, Z, kernel, likelihood=likelihood,
name=name, inference_method=inference_method,
@ -126,4 +126,4 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
d = self.output_dim
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)*self.stochastics.batchsize/d
self._Xgrad = self.X.gradient.copy()
self._Xgrad = self.X.gradient.copy()

View file

@ -41,4 +41,4 @@ class GPLVM(GP):
def parameters_changed(self):
super(GPLVM, self).parameters_changed()
self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None)
self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None)

View file

@ -45,7 +45,7 @@ class SparseGPMiniBatch(SparseGP):
# pick a sensible inference method
if inference_method is None:
if isinstance(likelihood, likelihoods.Gaussian):
inference_method = var_dtc.VarDTC(limit=1 if not missing_data else Y.shape[1])
inference_method = var_dtc.VarDTC(limit=3 if not missing_data else Y.shape[1])
else:
#inference_method = ??
raise NotImplementedError("what to do what to do?")

View file

@ -62,4 +62,4 @@ class SparseGPRegression(SparseGP_MPI):
if isinstance(self.inference_method,VarDTC_minibatch):
update_gradients_sparsegp(self, mpi_comm=self.mpi_comm)
else:
super(SparseGPRegression, self).parameters_changed()
super(SparseGPRegression, self).parameters_changed()

View file

@ -104,4 +104,4 @@ cdict_Alu = {'red' :((0./5,colorsRGB['Aluminium1'][0]/256.,colorsRGB['Aluminium1
(2./5,colorsRGB['Aluminium3'][2]/256.,colorsRGB['Aluminium3'][2]/256.),
(3./5,colorsRGB['Aluminium4'][2]/256.,colorsRGB['Aluminium4'][2]/256.),
(4./5,colorsRGB['Aluminium5'][2]/256.,colorsRGB['Aluminium5'][2]/256.),
(5./5,colorsRGB['Aluminium6'][2]/256.,colorsRGB['Aluminium6'][2]/256.))}
(5./5,colorsRGB['Aluminium6'][2]/256.,colorsRGB['Aluminium6'][2]/256.))}

View file

@ -107,4 +107,4 @@ try:
lib = config.get('plotting', 'library')
change_plotting_library(lib)
except NoOptionError:
print("No plotting library was specified in config file. \n{}".format(error_suggestion))
print("No plotting library was specified in config file. \n{}".format(error_suggestion))

View file

@ -420,4 +420,4 @@ def _plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_indu
if helper_prediction[2] is not None:
plots.update(_plot_samples(self, canvas, helper_data, helper_prediction, projection, "Samples"))
return plots
return plots

View file

@ -140,4 +140,4 @@ def plot_covariance(kernel, x=None, label=None,
return pl().add_to_canvas(canvas, plots)
else:
raise NotImplementedError("Cannot plot a kernel with more than two input dimensions")
raise NotImplementedError("Cannot plot a kernel with more than two input dimensions")

View file

@ -380,4 +380,4 @@ def x_frame2D(X,plot_limits=None,resolution=None):
resolution = resolution or 50
xx, yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution]
Xnew = np.vstack((xx.flatten(),yy.flatten())).T
return Xnew, xx, yy, xmin, xmax
return Xnew, xx, yy, xmin, xmax

View file

@ -18,4 +18,4 @@
from .util import align_subplot_array, align_subplots, fewerXticks, removeRightTicks, removeUpperTicks
from . import controllers, base_plots
from . import controllers, base_plots

View file

@ -1 +1 @@
from .imshow_controller import ImshowController, ImAnnotateController
from .imshow_controller import ImshowController, ImAnnotateController

View file

@ -72,4 +72,4 @@ class ImAnnotateController(ImshowController):
text.set_x(x+xoffset)
text.set_y(y+yoffset)
text.set_text("{}".format(X[1][j, i]))
return view
return view

View file

@ -72,4 +72,4 @@ latent = dict(aspect='auto', cmap='Greys', interpolation='bicubic')
gradient = dict(aspect='auto', cmap='RdBu', interpolation='nearest', alpha=.7)
magnification = dict(aspect='auto', cmap='Greys', interpolation='bicubic')
latent_scatter = dict(s=40, linewidth=.2, edgecolor='k', alpha=.9)
annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7)
annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7)

View file

@ -116,4 +116,4 @@ def align_subplot_array(axes,xlim=None, ylim=None):
if i<(M*(N-1)):
ax.set_xticks([])
else:
removeUpperTicks(ax)
removeUpperTicks(ax)

View file

@ -73,4 +73,4 @@ latent = dict(colorscale='Greys', reversescale=True, zsmooth='best')
gradient = dict(colorscale='RdBu', opacity=.7)
magnification = dict(colorscale='Greys', zsmooth='best', reversescale=True)
latent_scatter = dict(marker_kwargs=dict(size='5', opacity=.7))
# annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7)
# annotation = dict(fontdict=dict(family='sans-serif', weight='light', fontsize=9), zorder=.3, alpha=.7)

View file

@ -106,4 +106,4 @@ class BGPLVMTest(unittest.TestCase):
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
unittest.main()

View file

@ -97,4 +97,4 @@ class Test(unittest.TestCase):
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
unittest.main()

View file

@ -78,7 +78,7 @@ def jitchol(A, maxtries=5):
try: raise
except:
logging.warning('\n'.join(['Added jitter of {:.10e}'.format(jitter),
' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]]))
' in '+traceback.format_list(traceback.extract_stack(limit=3)[-2:-1])[0][2:]]))
return L
# def dtrtri(L, lower=1):

View file

@ -18,4 +18,4 @@ class RMSE(Evaluation):
def evaluate(self, gt, pred):
return np.sqrt(np.square(gt-pred).astype(np.float).mean())