From 1bbf58fdcfc174ae822900e30e7910cf6a15165c Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Wed, 25 Feb 2015 21:38:21 +0000 Subject: [PATCH 01/99] Updated README.md to refer to GPy/testing for running the tests --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5e98af85..68b66b31 100644 --- a/README.md +++ b/README.md @@ -105,7 +105,7 @@ Ensure nose is installed via pip: Run nosetests from the root directory of the repository: - nosetests -v + nosetests -v GPy/testing or from within IPython From 5e4afb765a2aca96026159ab967a427ab922c919 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 07:10:10 +0000 Subject: [PATCH 02/99] Relative import fixes for Python 3 compatibility --- GPy/__init__.py | 26 ++++++++--------- GPy/core/__init__.py | 16 +++++------ GPy/core/parameterization/__init__.py | 4 +-- GPy/examples/__init__.py | 8 +++--- GPy/inference/__init__.py | 6 ++-- GPy/kern/__init__.py | 36 ++++++++++++------------ GPy/likelihoods/__init__.py | 16 +++++------ GPy/mappings/__init__.py | 6 ++-- GPy/models/__init__.py | 40 +++++++++++++-------------- GPy/plotting/__init__.py | 2 +- GPy/util/__init__.py | 28 +++++++++---------- 11 files changed, 94 insertions(+), 94 deletions(-) diff --git a/GPy/__init__.py b/GPy/__init__.py index 5e091170..26713406 100644 --- a/GPy/__init__.py +++ b/GPy/__init__.py @@ -3,23 +3,23 @@ import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) -import core -from core.parameterization import transformations, priors +from . import core +from .core.parameterization import transformations, priors constraints = transformations -import models -import mappings -import inference -import util -import examples -import likelihoods -import testing +from . import models +from . import mappings +from . import inference +from . import util +from . import examples +from . import likelihoods +from . import testing from numpy.testing import Tester -import kern -import plotting +from . import kern +from . import plotting # Direct imports for convenience: -from core import Model -from core.parameterization import Param, Parameterized, ObsAr +from .core import Model +from .core.parameterization import Param, Parameterized, ObsAr #@nottest try: diff --git a/GPy/core/__init__.py b/GPy/core/__init__.py index ebed29bb..142eccbf 100644 --- a/GPy/core/__init__.py +++ b/GPy/core/__init__.py @@ -1,12 +1,12 @@ # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from model import * -from parameterization.parameterized import adjust_name_for_printing, Parameterizable -from parameterization.param import Param, ParamConcatenation -from parameterization.observable_array import ObsAr +from .model import * +from .parameterization.parameterized import adjust_name_for_printing, Parameterizable +from .parameterization.param import Param, ParamConcatenation +from .parameterization.observable_array import ObsAr -from gp import GP -from svgp import SVGP -from sparse_gp import SparseGP -from mapping import * +from .gp import GP +from .svgp import SVGP +from .sparse_gp import SparseGP +from .mapping import * diff --git a/GPy/core/parameterization/__init__.py b/GPy/core/parameterization/__init__.py index 8e9aa094..de736671 100644 --- a/GPy/core/parameterization/__init__.py +++ b/GPy/core/parameterization/__init__.py @@ -1,5 +1,5 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from param import Param, ObsAr -from parameterized import Parameterized +from .param import Param, ObsAr +from .parameterized import Parameterized diff --git a/GPy/examples/__init__.py b/GPy/examples/__init__.py index 968333e0..4e9e984e 100644 --- a/GPy/examples/__init__.py +++ b/GPy/examples/__init__.py @@ -1,7 +1,7 @@ # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import classification -import regression -import dimensionality_reduction -import non_gaussian +from . import classification +from . import regression +from . import dimensionality_reduction +from . import non_gaussian diff --git a/GPy/inference/__init__.py b/GPy/inference/__init__.py index 7b1307e3..c5044582 100644 --- a/GPy/inference/__init__.py +++ b/GPy/inference/__init__.py @@ -1,3 +1,3 @@ -import latent_function_inference -import optimization -import mcmc +from . import latent_function_inference +from . import optimization +from . import mcmc diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index 718be74f..aaeb99a2 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -1,20 +1,20 @@ -from _src.kern import Kern -from _src.rbf import RBF -from _src.linear import Linear, LinearFull -from _src.static import Bias, White, Fixed -from _src.brownian import Brownian -from _src.stationary import Exponential, OU, Matern32, Matern52, ExpQuad, RatQuad, Cosine -from _src.mlp import MLP -from _src.periodic import PeriodicExponential, PeriodicMatern32, PeriodicMatern52 -from _src.independent_outputs import IndependentOutputs, Hierarchical -from _src.coregionalize import Coregionalize -from _src.ODE_UY import ODE_UY -from _src.ODE_UYC import ODE_UYC -from _src.ODE_st import ODE_st -from _src.ODE_t import ODE_t -from _src.poly import Poly -from _src.eq_ode2 import EQ_ODE2 +from ._src.kern import Kern +from ._src.rbf import RBF +from ._src.linear import Linear, LinearFull +from ._src.static import Bias, White, Fixed +from ._src.brownian import Brownian +from ._src.stationary import Exponential, OU, Matern32, Matern52, ExpQuad, RatQuad, Cosine +from ._src.mlp import MLP +from ._src.periodic import PeriodicExponential, PeriodicMatern32, PeriodicMatern52 +from ._src.independent_outputs import IndependentOutputs, Hierarchical +from ._src.coregionalize import Coregionalize +from ._src.ODE_UY import ODE_UY +from ._src.ODE_UYC import ODE_UYC +from ._src.ODE_st import ODE_st +from ._src.ODE_t import ODE_t +from ._src.poly import Poly +from ._src.eq_ode2 import EQ_ODE2 -from _src.trunclinear import TruncLinear,TruncLinear_inf -from _src.splitKern import SplitKern,DiffGenomeKern +from ._src.trunclinear import TruncLinear,TruncLinear_inf +from ._src.splitKern import SplitKern,DiffGenomeKern diff --git a/GPy/likelihoods/__init__.py b/GPy/likelihoods/__init__.py index 28e44541..ef29da08 100644 --- a/GPy/likelihoods/__init__.py +++ b/GPy/likelihoods/__init__.py @@ -1,8 +1,8 @@ -from bernoulli import Bernoulli -from exponential import Exponential -from gaussian import Gaussian -from gamma import Gamma -from poisson import Poisson -from student_t import StudentT -from likelihood import Likelihood -from mixed_noise import MixedNoise +from .bernoulli import Bernoulli +from .exponential import Exponential +from .gaussian import Gaussian +from .gamma import Gamma +from .poisson import Poisson +from .student_t import StudentT +from .likelihood import Likelihood +from .mixed_noise import MixedNoise diff --git a/GPy/mappings/__init__.py b/GPy/mappings/__init__.py index d331c678..d9c13ad0 100644 --- a/GPy/mappings/__init__.py +++ b/GPy/mappings/__init__.py @@ -1,7 +1,7 @@ # Copyright (c) 2013, 2014 GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kernel import Kernel -from linear import Linear -from mlp import MLP +from .kernel import Kernel +from .linear import Linear +from .mlp import MLP #from rbf import RBF diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index c6abb5de..8f8fd838 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -1,23 +1,23 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from gp_regression import GPRegression -from gp_classification import GPClassification -from sparse_gp_regression import SparseGPRegression, SparseGPRegressionUncertainInput -from sparse_gp_classification import SparseGPClassification -from gplvm import GPLVM -from bcgplvm import BCGPLVM -from sparse_gplvm import SparseGPLVM -from warped_gp import WarpedGP -from bayesian_gplvm import BayesianGPLVM -from mrd import MRD -from gradient_checker import GradientChecker -from ss_gplvm import SSGPLVM -from gp_coregionalized_regression import GPCoregionalizedRegression -from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression -from gp_heteroscedastic_regression import GPHeteroscedasticRegression -from ss_mrd import SSMRD -from gp_kronecker_gaussian_regression import GPKroneckerGaussianRegression -from gp_var_gauss import GPVariationalGaussianApproximation -from one_vs_all_classification import OneVsAllClassification -from one_vs_all_sparse_classification import OneVsAllSparseClassification +from .gp_regression import GPRegression +from .gp_classification import GPClassification +from .sparse_gp_regression import SparseGPRegression, SparseGPRegressionUncertainInput +from .sparse_gp_classification import SparseGPClassification +from .gplvm import GPLVM +from .bcgplvm import BCGPLVM +from .sparse_gplvm import SparseGPLVM +from .warped_gp import WarpedGP +from .bayesian_gplvm import BayesianGPLVM +from .mrd import MRD +from .gradient_checker import GradientChecker +from .ss_gplvm import SSGPLVM +from .gp_coregionalized_regression import GPCoregionalizedRegression +from .sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression +from .gp_heteroscedastic_regression import GPHeteroscedasticRegression +from .ss_mrd import SSMRD +from .gp_kronecker_gaussian_regression import GPKroneckerGaussianRegression +from .gp_var_gauss import GPVariationalGaussianApproximation +from .one_vs_all_classification import OneVsAllClassification +from .one_vs_all_sparse_classification import OneVsAllSparseClassification diff --git a/GPy/plotting/__init__.py b/GPy/plotting/__init__.py index d3a96914..652bc628 100644 --- a/GPy/plotting/__init__.py +++ b/GPy/plotting/__init__.py @@ -2,6 +2,6 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) try: - import matplot_dep + from . import matplot_dep except (ImportError, NameError): print 'Fail to load GPy.plotting.matplot_dep.' \ No newline at end of file diff --git a/GPy/util/__init__.py b/GPy/util/__init__.py index c3edfc48..e8d2456e 100644 --- a/GPy/util/__init__.py +++ b/GPy/util/__init__.py @@ -2,18 +2,18 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -import linalg -import misc -import squashers -import warping_functions -import datasets -import mocap -import decorators -import classification -import subarray_and_sorting -import caching -import diag -import initialization -import multioutput -import linalg_gpu +from . import linalg +from . import misc +from . import squashers +from . import warping_functions +from . import datasets +from . import mocap +from . import decorators +from . import classification +from . import subarray_and_sorting +from . import caching +from . import diag +from . import initialization +from . import multioutput +from . import linalg_gpu From 2ca24a88f5431f370907e555f3ab402c71de7e6a Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 07:14:40 +0000 Subject: [PATCH 03/99] More relative import fixes for Python 3 compatibility --- GPy/core/gp.py | 6 +++--- GPy/core/mapping.py | 4 ++-- GPy/core/model.py | 2 +- GPy/core/parameterization/index_operations.py | 2 +- GPy/core/parameterization/lists_and_dicts.py | 2 +- GPy/core/parameterization/observable.py | 2 +- GPy/core/parameterization/observable_array.py | 6 +++--- GPy/core/parameterization/param.py | 8 ++++---- GPy/core/parameterization/parameter_core.py | 14 +++++++------- GPy/core/parameterization/parameterized.py | 4 ++-- GPy/core/parameterization/priors.py | 2 +- GPy/core/parameterization/ties_and_remappings.py | 4 ++-- GPy/core/parameterization/transformations.py | 2 +- GPy/core/parameterization/updateable.py | 2 +- GPy/core/parameterization/variational.py | 6 +++--- GPy/core/sparse_gp.py | 6 +++--- GPy/core/sparse_gp_mpi.py | 2 +- GPy/core/svgp.py | 4 ++-- GPy/likelihoods/bernoulli.py | 4 ++-- GPy/likelihoods/exponential.py | 4 ++-- GPy/likelihoods/gamma.py | 4 ++-- GPy/likelihoods/gaussian.py | 4 ++-- GPy/likelihoods/likelihood.py | 2 +- GPy/likelihoods/mixed_noise.py | 6 +++--- GPy/likelihoods/poisson.py | 4 ++-- GPy/likelihoods/student_t.py | 4 ++-- GPy/util/choleskies.py | 2 +- GPy/util/datasets.py | 2 +- GPy/util/linalg.py | 2 +- GPy/util/ln_diff_erfs.py | 2 +- GPy/util/misc.py | 2 +- 31 files changed, 60 insertions(+), 60 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 3252ac08..5110e9a5 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -4,11 +4,11 @@ import numpy as np import sys from .. import kern -from model import Model -from parameterization import ObsAr +from .model import Model +from .parameterization import ObsAr from .. import likelihoods from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation -from parameterization.variational import VariationalPosterior +from .parameterization.variational import VariationalPosterior import logging from GPy.util.normalizer import MeanNorm diff --git a/GPy/core/mapping.py b/GPy/core/mapping.py index 111fec6f..163db0c9 100644 --- a/GPy/core/mapping.py +++ b/GPy/core/mapping.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import sys -from parameterization import Parameterized +from .parameterization import Parameterized import numpy as np class Mapping(Parameterized): @@ -74,7 +74,7 @@ class Bijective_mapping(Mapping): """Inverse mapping from output domain of the function to the inputs.""" raise NotImplementedError -from model import Model +from .model import Model class Mapping_check_model(Model): """ diff --git a/GPy/core/model.py b/GPy/core/model.py index c63a29e5..8eb34f33 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -5,7 +5,7 @@ from .. import likelihoods from ..inference import optimization from ..util.misc import opt_wrapper -from parameterization import Parameterized +from .parameterization import Parameterized import multiprocessing as mp import numpy as np from numpy.linalg.linalg import LinAlgError diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index 61c82da1..e5273e55 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -3,7 +3,7 @@ import numpy from numpy.lib.function_base import vectorize -from lists_and_dicts import IntArrayDict +from .lists_and_dicts import IntArrayDict def extract_properties_to_index(index, props): prop_index = dict() diff --git a/GPy/core/parameterization/lists_and_dicts.py b/GPy/core/parameterization/lists_and_dicts.py index 5afbb8ed..626603ec 100644 --- a/GPy/core/parameterization/lists_and_dicts.py +++ b/GPy/core/parameterization/lists_and_dicts.py @@ -75,7 +75,7 @@ class ObserverList(object): def __str__(self): from . import ObsAr, Param - from parameter_core import Parameterizable + from .parameter_core import Parameterizable ret = [] curr_p = None diff --git a/GPy/core/parameterization/observable.py b/GPy/core/parameterization/observable.py index 8a85c6ca..0836b5d6 100644 --- a/GPy/core/parameterization/observable.py +++ b/GPy/core/parameterization/observable.py @@ -12,7 +12,7 @@ class Observable(object): """ def __init__(self, *args, **kwargs): super(Observable, self).__init__() - from lists_and_dicts import ObserverList + from .lists_and_dicts import ObserverList self.observers = ObserverList() self._update_on = True diff --git a/GPy/core/parameterization/observable_array.py b/GPy/core/parameterization/observable_array.py index 271fe7b9..c6fea497 100644 --- a/GPy/core/parameterization/observable_array.py +++ b/GPy/core/parameterization/observable_array.py @@ -3,8 +3,8 @@ import numpy as np -from parameter_core import Pickleable -from observable import Observable +from .parameter_core import Pickleable +from .observable import Observable class ObsAr(np.ndarray, Pickleable, Observable): """ @@ -39,7 +39,7 @@ class ObsAr(np.ndarray, Pickleable, Observable): return self.view(np.ndarray) def copy(self): - from lists_and_dicts import ObserverList + from .lists_and_dicts import ObserverList memo = {} memo[id(self)] = self memo[id(self.observers)] = ObserverList() diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 1246bc18..fbbb59ed 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -4,8 +4,8 @@ import itertools import numpy np = numpy -from parameter_core import Parameterizable, adjust_name_for_printing, Pickleable -from observable_array import ObsAr +from .parameter_core import Parameterizable, adjust_name_for_printing, Pickleable +from .observable_array import ObsAr ###### printing __constraints_name__ = "Constraint" @@ -156,7 +156,7 @@ class Param(Parameterizable, ObsAr): #=========================================================================== @property def is_fixed(self): - from transformations import __fixed__ + from .transformations import __fixed__ return self.constraints[__fixed__].size == self.size def _get_original(self, param): @@ -313,7 +313,7 @@ class ParamConcatenation(object): See :py:class:`GPy.core.parameter.Param` for more details on constraining. """ # self.params = params - from lists_and_dicts import ArrayList + from .lists_and_dicts import ArrayList self.params = ArrayList([]) for p in params: for p in p.flattened_parameters: diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index bee160b2..5baa81c8 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -13,11 +13,11 @@ Observable Pattern for patameterization """ -from transformations import Transformation,Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED +from .transformations import Transformation,Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED import numpy as np import re import logging -from updateable import Updateable +from .updateable import Updateable class HierarchyError(Exception): """ @@ -170,7 +170,7 @@ class Pickleable(object): def __setstate__(self, state): self.__dict__.update(state) - from lists_and_dicts import ObserverList + from .lists_and_dicts import ObserverList self.observers = ObserverList() self._setup_observers() self._optimizer_copy_transformed = False @@ -268,7 +268,7 @@ class Indexable(Nameable, Updateable): def __init__(self, name, default_constraint=None, *a, **kw): super(Indexable, self).__init__(name=name, *a, **kw) self._default_constraint_ = default_constraint - from index_operations import ParameterIndexOperations + from .index_operations import ParameterIndexOperations self.constraints = ParameterIndexOperations() self.priors = ParameterIndexOperations() if self._default_constraint_ is not None: @@ -310,7 +310,7 @@ class Indexable(Nameable, Updateable): that is an int array, containing the indexes for the flattened param inside this parameterized logic. """ - from param import ParamConcatenation + from .param import ParamConcatenation if isinstance(param, ParamConcatenation): return np.hstack((self._raveled_index_for(p) for p in param.params)) return param._raveled_index() + self._offset_for(param) @@ -407,7 +407,7 @@ class Indexable(Nameable, Updateable): repriorized = self.unset_priors() self._add_to_index_operations(self.priors, repriorized, prior, warning) - from domains import _REAL, _POSITIVE, _NEGATIVE + from .domains import _REAL, _POSITIVE, _NEGATIVE if prior.domain is _POSITIVE: self.constrain_positive(warning) elif prior.domain is _NEGATIVE: @@ -536,7 +536,7 @@ class Indexable(Nameable, Updateable): update the constraints and priors view, so that constraining is automized for the parent. """ - from index_operations import ParameterIndexOperationsView + from .index_operations import ParameterIndexOperationsView #if getattr(self, "_in_init_"): #import ipdb;ipdb.set_trace() #self.constraints.update(param.constraints, start) diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 44173f58..6bdd8036 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -5,8 +5,8 @@ import numpy; np = numpy import itertools from re import compile, _pattern_type -from param import ParamConcatenation -from parameter_core import HierarchyError, Parameterizable, adjust_name_for_printing +from .param import ParamConcatenation +from .parameter_core import HierarchyError, Parameterizable, adjust_name_for_printing import logging from GPy.core.parameterization.index_operations import ParameterIndexOperationsView diff --git a/GPy/core/parameterization/priors.py b/GPy/core/parameterization/priors.py index 84b6357e..61835e28 100644 --- a/GPy/core/parameterization/priors.py +++ b/GPy/core/parameterization/priors.py @@ -5,7 +5,7 @@ import numpy as np from scipy.special import gammaln, digamma from ...util.linalg import pdinv -from domains import _REAL, _POSITIVE +from .domains import _REAL, _POSITIVE import warnings import weakref diff --git a/GPy/core/parameterization/ties_and_remappings.py b/GPy/core/parameterization/ties_and_remappings.py index a81b8d61..f0bb2d61 100644 --- a/GPy/core/parameterization/ties_and_remappings.py +++ b/GPy/core/parameterization/ties_and_remappings.py @@ -2,8 +2,8 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from parameterized import Parameterized -from param import Param +from .parameterized import Parameterized +from .param import Param class Remapping(Parameterized): def mapping(self): diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py index d929b1d9..181c16e0 100644 --- a/GPy/core/parameterization/transformations.py +++ b/GPy/core/parameterization/transformations.py @@ -3,7 +3,7 @@ import numpy as np -from domains import _POSITIVE,_NEGATIVE, _BOUNDED +from .domains import _POSITIVE,_NEGATIVE, _BOUNDED import weakref import sys diff --git a/GPy/core/parameterization/updateable.py b/GPy/core/parameterization/updateable.py index 278ba8cd..86446fa0 100644 --- a/GPy/core/parameterization/updateable.py +++ b/GPy/core/parameterization/updateable.py @@ -3,7 +3,7 @@ Created on 11 Nov 2014 @author: maxz ''' -from observable import Observable +from .observable import Observable class Updateable(Observable): diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index 7cc5c99a..25efdc92 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -5,9 +5,9 @@ Created on 6 Nov 2013 ''' import numpy as np -from parameterized import Parameterized -from param import Param -from transformations import Logexp, Logistic,__fixed__ +from .parameterized import Parameterized +from .param import Param +from .transformations import Logexp, Logistic,__fixed__ from GPy.util.misc import param_to_array from GPy.util.caching import Cache_this diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 9004c9c7..a9866f48 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -2,11 +2,11 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from gp import GP -from parameterization.param import Param +from .gp import GP +from .parameterization.param import Param from ..inference.latent_function_inference import var_dtc from .. import likelihoods -from parameterization.variational import VariationalPosterior, NormalPosterior +from .parameterization.variational import VariationalPosterior, NormalPosterior from ..util.linalg import mdot import logging diff --git a/GPy/core/sparse_gp_mpi.py b/GPy/core/sparse_gp_mpi.py index 15d3ad76..ac53d4ac 100644 --- a/GPy/core/sparse_gp_mpi.py +++ b/GPy/core/sparse_gp_mpi.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from sparse_gp import SparseGP +from .sparse_gp import SparseGP from numpy.linalg.linalg import LinAlgError from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch diff --git a/GPy/core/svgp.py b/GPy/core/svgp.py index 603a64a5..9d74889a 100644 --- a/GPy/core/svgp.py +++ b/GPy/core/svgp.py @@ -3,8 +3,8 @@ import numpy as np from ..util import choleskies -from sparse_gp import SparseGP -from parameterization.param import Param +from .sparse_gp import SparseGP +from .parameterization.param import Param from ..inference.latent_function_inference import SVGP as svgp_inf diff --git a/GPy/likelihoods/bernoulli.py b/GPy/likelihoods/bernoulli.py index ff2ab30a..2c246042 100644 --- a/GPy/likelihoods/bernoulli.py +++ b/GPy/likelihoods/bernoulli.py @@ -3,8 +3,8 @@ import numpy as np from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf -import link_functions -from likelihood import Likelihood +from . import link_functions +from .likelihood import Likelihood from scipy import stats class Bernoulli(Likelihood): diff --git a/GPy/likelihoods/exponential.py b/GPy/likelihoods/exponential.py index 8110c7d4..1df48412 100644 --- a/GPy/likelihoods/exponential.py +++ b/GPy/likelihoods/exponential.py @@ -5,8 +5,8 @@ import numpy as np from scipy import stats,special import scipy as sp -import link_functions -from likelihood import Likelihood +from . import link_functions +from .likelihood import Likelihood class Exponential(Likelihood): """ diff --git a/GPy/likelihoods/gamma.py b/GPy/likelihoods/gamma.py index c79e196c..c153bd1c 100644 --- a/GPy/likelihoods/gamma.py +++ b/GPy/likelihoods/gamma.py @@ -6,8 +6,8 @@ import numpy as np from scipy import stats,special import scipy as sp from ..core.parameterization import Param -import link_functions -from likelihood import Likelihood +from . import link_functions +from .likelihood import Likelihood class Gamma(Likelihood): """ diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index a6e5b7e0..8029eeba 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -13,8 +13,8 @@ James 11/12/13 import numpy as np from scipy import stats, special -import link_functions -from likelihood import Likelihood +from . import link_functions +from .likelihood import Likelihood from ..core.parameterization import Param from ..core.parameterization.transformations import Logexp from scipy import stats diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 790c6ba4..33698eb2 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -4,7 +4,7 @@ import numpy as np from scipy import stats,special import scipy as sp -import link_functions +from . import link_functions from ..util.misc import chain_1, chain_2, chain_3 from scipy.integrate import quad import warnings diff --git a/GPy/likelihoods/mixed_noise.py b/GPy/likelihoods/mixed_noise.py index 8c56f45b..84b3001d 100644 --- a/GPy/likelihoods/mixed_noise.py +++ b/GPy/likelihoods/mixed_noise.py @@ -3,9 +3,9 @@ import numpy as np from scipy import stats, special -import link_functions -from likelihood import Likelihood -from gaussian import Gaussian +from . import link_functions +from .likelihood import Likelihood +from .gaussian import Gaussian from ..core.parameterization import Param from ..core.parameterization.transformations import Logexp from ..core.parameterization import Parameterized diff --git a/GPy/likelihoods/poisson.py b/GPy/likelihoods/poisson.py index ea9b2d10..d6c4334b 100644 --- a/GPy/likelihoods/poisson.py +++ b/GPy/likelihoods/poisson.py @@ -5,8 +5,8 @@ from __future__ import division import numpy as np from scipy import stats,special import scipy as sp -import link_functions -from likelihood import Likelihood +from . import link_functions +from .likelihood import Likelihood class Poisson(Likelihood): """ diff --git a/GPy/likelihoods/student_t.py b/GPy/likelihoods/student_t.py index 855f6b40..745ce9e8 100644 --- a/GPy/likelihoods/student_t.py +++ b/GPy/likelihoods/student_t.py @@ -4,10 +4,10 @@ import numpy as np from scipy import stats, special import scipy as sp -import link_functions +from . import link_functions from scipy import stats, integrate from scipy.special import gammaln, gamma -from likelihood import Likelihood +from .likelihood import Likelihood from ..core.parameterization import Param from ..core.parameterization.transformations import Logexp diff --git a/GPy/util/choleskies.py b/GPy/util/choleskies.py index 3f37fc3f..cc3a7f75 100644 --- a/GPy/util/choleskies.py +++ b/GPy/util/choleskies.py @@ -3,7 +3,7 @@ import numpy as np from scipy import weave -import linalg +from . import linalg def safe_root(N): diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 254639a6..10835463 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -11,7 +11,7 @@ import datetime import json import re -from config import * +from .config import * ipython_available=True try: diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index b148f2f4..216a1050 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -13,7 +13,7 @@ from ctypes import byref, c_char, c_int, c_double # TODO import scipy import warnings import os -from config import config +from .config import config import logging _scipyversion = np.float64((scipy.__version__).split('.')[:2]) diff --git a/GPy/util/ln_diff_erfs.py b/GPy/util/ln_diff_erfs.py index bb9cfe03..582a4585 100644 --- a/GPy/util/ln_diff_erfs.py +++ b/GPy/util/ln_diff_erfs.py @@ -6,7 +6,7 @@ try: from scipy.special import erfcx, erf except ImportError: from scipy.special import erf - from erfcx import erfcx + from .erfcx import erfcx import numpy as np diff --git a/GPy/util/misc.py b/GPy/util/misc.py index bf37159d..1f746e19 100644 --- a/GPy/util/misc.py +++ b/GPy/util/misc.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from config import * +from .config import * def chain_1(df_dg, dg_dx): """ From 1521b3e26020f1ff52d435bf5e17acaf99522528 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 08:11:11 +0000 Subject: [PATCH 04/99] Convert print to function for Python 3 compatibility. This breaks compatibility for versions of Python < 2.6 --- GPy/core/gp.py | 4 ++-- GPy/core/model.py | 18 +++++++++--------- GPy/core/parameterization/parameter_core.py | 2 +- GPy/core/parameterization/parameterized.py | 2 +- .../parameterization/ties_and_remappings.py | 8 ++++---- GPy/core/parameterization/transformations.py | 16 ++++++++-------- GPy/core/parameterization/updateable.py | 2 +- GPy/core/sparse_gp.py | 2 +- GPy/core/sparse_gp_mpi.py | 2 +- GPy/core/verbose_optimization.py | 18 +++++++++--------- 10 files changed, 37 insertions(+), 37 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 5110e9a5..0ef6e15e 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -82,7 +82,7 @@ class GP(Model): inference_method = exact_gaussian_inference.ExactGaussianInference() else: inference_method = expectation_propagation.EP() - print "defaulting to ", inference_method, "for latent function inference" + print("defaulting to ", inference_method, "for latent function inference") self.inference_method = inference_method logger.info("adding kernel and likelihood as parameters") @@ -441,7 +441,7 @@ class GP(Model): try: super(GP, self).optimize(optimizer, start, **kwargs) except KeyboardInterrupt: - print "KeyboardInterrupt caught, calling on_optimization_end() to round things up" + print("KeyboardInterrupt caught, calling on_optimization_end() to round things up") self.inference_method.on_optimization_end() raise diff --git a/GPy/core/model.py b/GPy/core/model.py index 8eb34f33..348cebf1 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -82,7 +82,7 @@ class Model(Parameterized): pool.close() # signal that no more data coming in pool.join() # wait for all the tasks to complete except KeyboardInterrupt: - print "Ctrl+c received, terminating and joining pool." + print("Ctrl+c received, terminating and joining pool.") pool.terminate() pool.join() @@ -95,10 +95,10 @@ class Model(Parameterized): self.optimization_runs.append(jobs[i].get()) if verbose: - print("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt)) + print(("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt))) except Exception as e: if robust: - print("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts)) + print(("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts))) else: raise e @@ -237,10 +237,10 @@ class Model(Parameterized): """ if self.is_fixed or self.size == 0: - print 'nothing to optimize' + print('nothing to optimize') if not self.update_model(): - print "updates were off, setting updates on again" + print("updates were off, setting updates on again") self.update_model(True) if start == None: @@ -305,7 +305,7 @@ class Model(Parameterized): transformed_index = (indices - (~self._fixes_).cumsum())[transformed_index[which[0]]] if transformed_index.size == 0: - print "No free parameters to check" + print("No free parameters to check") return # just check the global ratio @@ -342,7 +342,7 @@ class Model(Parameterized): header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))] header_string = map(lambda x: '|'.join(x), [header_string]) separator = '-' * len(header_string[0]) - print '\n'.join([header_string[0], separator]) + print('\n'.join([header_string[0], separator])) if target_param is None: param_index = range(len(x)) transformed_index = param_index @@ -358,7 +358,7 @@ class Model(Parameterized): transformed_index = param_index if param_index.size == 0: - print "No free parameters to check" + print("No free parameters to check") return gradient = self._grads(x).copy() @@ -392,7 +392,7 @@ class Model(Parameterized): ng = '%.6f' % float(numerical_gradient) df = '%1.e' % float(df_ratio) grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5]) - print grad_string + print(grad_string) self.optimizer_array = x return ret diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 5baa81c8..06991ab0 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -558,7 +558,7 @@ class Indexable(Nameable, Updateable): """ if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those - print "WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name) + print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)) index = self._raveled_index() which.add(what, index) return index diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 6bdd8036..1a5ff123 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -325,7 +325,7 @@ class Parameterized(Parameterizable): self._notify_parent_change() self.parameters_changed() except Exception as e: - print "WARNING: caught exception {!s}, trying to continue".format(e) + print("WARNING: caught exception {!s}, trying to continue".format(e)) def copy(self, memo=None): if memo is None: diff --git a/GPy/core/parameterization/ties_and_remappings.py b/GPy/core/parameterization/ties_and_remappings.py index f0bb2d61..bafa8a98 100644 --- a/GPy/core/parameterization/ties_and_remappings.py +++ b/GPy/core/parameterization/ties_and_remappings.py @@ -98,7 +98,7 @@ class Tie(Parameterized): if np.all(self.label_buf[idx]==0): # None of p has been tied before. tie_idx = self._expandTieParam(1) - print tie_idx + print(tie_idx) tie_id = self.label_buf.max()+1 self.label_buf[tie_idx] = tie_id else: @@ -189,14 +189,14 @@ class Tie(Parameterized): b0 = self.label_buf==self.label_buf[self.buf_idx[i]] b = self._highest_parent_.param_array[b0]!=self.tied_param[i] if b.sum()==0: - print 'XXX' + print('XXX') continue elif b.sum()==1: - print '!!!' + print('!!!') val = self._highest_parent_.param_array[b0][b][0] self._highest_parent_.param_array[b0] = val else: - print '@@@' + print('@@@') self._highest_parent_.param_array[b0] = self.tied_param[i] changed = True return changed diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py index 181c16e0..05051c92 100644 --- a/GPy/core/parameterization/transformations.py +++ b/GPy/core/parameterization/transformations.py @@ -72,7 +72,7 @@ class Logexp(Transformation): return np.einsum('i,i->i', df, np.where(f>_lim_val, 1., 1. - np.exp(-f))) def initialize(self, f): if np.any(f < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") return np.abs(f) def __str__(self): return '+ve' @@ -130,7 +130,7 @@ class NormalTheta(Transformation): def initialize(self, f): if np.any(f[self.var_indices] < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") f[self.var_indices] = np.abs(f[self.var_indices]) return f @@ -177,7 +177,7 @@ class NormalNaturalAntti(NormalTheta): def initialize(self, f): if np.any(f[self.var_indices] < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") f[self.var_indices] = np.abs(f[self.var_indices]) return f @@ -220,7 +220,7 @@ class NormalEta(Transformation): def initialize(self, f): if np.any(f[self.var_indices] < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") f[self.var_indices] = np.abs(f[self.var_indices]) return f @@ -360,7 +360,7 @@ class LogexpNeg(Transformation): return np.einsum('i,i->i', df, np.where(f>_lim_val, -1, -1 + np.exp(-f))) def initialize(self, f): if np.any(f < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") return np.abs(f) def __str__(self): return '+ve' @@ -412,7 +412,7 @@ class LogexpClipped(Logexp): return np.einsum('i,i->i', df, gf) # np.where(f < self.lower, 0, gf) def initialize(self, f): if np.any(f < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") return np.abs(f) def __str__(self): return '+ve_c' @@ -428,7 +428,7 @@ class Exponent(Transformation): return np.einsum('i,i->i', df, f) def initialize(self, f): if np.any(f < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") return np.abs(f) def __str__(self): return '+ve' @@ -486,7 +486,7 @@ class Logistic(Transformation): return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference) def initialize(self, f): if np.any(np.logical_or(f < self.lower, f > self.upper)): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") #return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f) #FIXME: Max, zeros_like right? return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f) diff --git a/GPy/core/parameterization/updateable.py b/GPy/core/parameterization/updateable.py index 86446fa0..6abf0280 100644 --- a/GPy/core/parameterization/updateable.py +++ b/GPy/core/parameterization/updateable.py @@ -36,7 +36,7 @@ class Updateable(Observable): self.trigger_update() def toggle_update(self): - print "deprecated: toggle_update was renamed to update_toggle for easier access" + print("deprecated: toggle_update was renamed to update_toggle for easier access") self.update_toggle() def update_toggle(self): self.update_model(not self.update_model()) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index a9866f48..96e3dbe7 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -49,7 +49,7 @@ class SparseGP(GP): else: #inference_method = ?? raise NotImplementedError, "what to do what to do?" - print "defaulting to ", inference_method, "for latent function inference" + print("defaulting to ", inference_method, "for latent function inference") self.Z = Param('inducing inputs', Z) self.num_inducing = Z.shape[0] diff --git a/GPy/core/sparse_gp_mpi.py b/GPy/core/sparse_gp_mpi.py index ac53d4ac..28de3124 100644 --- a/GPy/core/sparse_gp_mpi.py +++ b/GPy/core/sparse_gp_mpi.py @@ -56,7 +56,7 @@ class SparseGP_MPI(SparseGP): self.N_range = (N_start, N_end) self.N_list = np.array(N_list) self.Y_local = self.Y[N_start:N_end] - print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range) + print('MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range)) mpi_comm.Bcast(self.param_array, root=0) self.update_model(True) diff --git a/GPy/core/verbose_optimization.py b/GPy/core/verbose_optimization.py index 78b6127e..affa7d43 100644 --- a/GPy/core/verbose_optimization.py +++ b/GPy/core/verbose_optimization.py @@ -1,7 +1,7 @@ # Copyright (c) 2012-2014, Max Zwiessele. # Licensed under the BSD 3-clause license (see LICENSE.txt) - +from __future__ import print_function import numpy as np import sys import time @@ -65,8 +65,8 @@ class VerboseOptimization(object): #self.progress.add_class('box-flex1') else: self.exps = exponents(self.fnow, self.current_gradient) - print 'Running {} Code:'.format(self.opt_name) - print ' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters) + print('Running {} Code:'.format(self.opt_name)) + print(' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters)) def __enter__(self): self.start = time.time() @@ -107,11 +107,11 @@ class VerboseOptimization(object): b = np.any(n_exps < self.exps) if a or b: self.p_iter = self.iteration - print '' + print('') if b: self.exps = n_exps - print '\r', - print '{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r', + print('\r', end=' ') + print('{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r', sys.stdout.flush() def print_status(self, me, which=None): @@ -140,6 +140,6 @@ class VerboseOptimization(object): self.print_out() if not self.ipython_notebook: - print - print 'Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start) - print + print() + print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)) + print() From d284953b6933fda5eab5d4d2f6154ce82b94769d Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 08:17:41 +0000 Subject: [PATCH 05/99] Added details of Python 3 work --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 68b66b31..4dc5b807 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,12 @@ # GPy +# Moving to Python 3 +Work is underway to make GPy run on Python 3. We are not there yet! Changes performed so far have retained compatibility with Python 2.6 and abive. + +Work done so far: + +* Use 2to3 to fix relative imports +* use 2to3 to convert print from statement to function. Some advanced uses of print meant that this could not be done in a way that retained compatibility with old versions of Python. The oldest version of Python that is supported by this version is 2.6 due to the required future imports. A Gaussian processes framework in Python. From 906f69e20e04883f5d19c4f918b29e2362ca365a Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 08:31:44 +0000 Subject: [PATCH 06/99] Convert print to function for Python 3 compatibility. --- GPy/examples/classification.py | 24 ++++++------- GPy/examples/dimensionality_reduction.py | 12 +++---- GPy/examples/non_gaussian.py | 44 ++++++++++++------------ GPy/examples/regression.py | 26 +++++++------- README.md | 16 ++++----- 5 files changed, 61 insertions(+), 61 deletions(-) diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py index b3780073..d4518f24 100644 --- a/GPy/examples/classification.py +++ b/GPy/examples/classification.py @@ -15,7 +15,7 @@ def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True): """ try:import pods - except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets' + except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.oil() X = data['X'] Xtest = data['Xtest'] @@ -52,7 +52,7 @@ def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True): """ try:import pods - except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets' + except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data['Y'][:, 0:1] Y[Y.flatten() == -1] = 0 @@ -75,7 +75,7 @@ def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True): m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) - print m + print(m) return m def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=True): @@ -88,7 +88,7 @@ def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot= """ try:import pods - except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets' + except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data['Y'][:, 0:1] Y[Y.flatten() == -1] = 0 @@ -114,7 +114,7 @@ def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot= m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) - print m + print(m) return m def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, optimize=True, plot=True): @@ -127,7 +127,7 @@ def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, opti """ try:import pods - except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets' + except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data['Y'][:, 0:1] Y[Y.flatten() == -1] = 0 @@ -147,7 +147,7 @@ def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, opti m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) - print m + print(m) return m def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True): @@ -160,7 +160,7 @@ def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True): """ try:import pods - except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets' + except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.toy_linear_1d_classification(seed=seed) Y = data['Y'][:, 0:1] Y[Y.flatten() == -1] = 0 @@ -177,7 +177,7 @@ def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True): # Parameters optimization: for _ in range(5): m.optimize(max_iters=int(max_iters/5)) - print m + print(m) # Plot if plot: @@ -186,7 +186,7 @@ def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True): m.plot_f(ax=axes[0]) m.plot(ax=axes[1]) - print m + print(m) return m def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True): @@ -202,7 +202,7 @@ def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel= :type kernel: a GPy kernel """ try:import pods - except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets' + except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets') data = pods.datasets.crescent_data(seed=seed) Y = data['Y'] Y[Y.flatten()==-1] = 0 @@ -224,5 +224,5 @@ def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel= if plot: m.plot() - print m + print(m) return m diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index df9093a2..fe1fa1e5 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -333,7 +333,7 @@ def bgplvm_simulation(optimize=True, verbose=1, m.likelihood.variance = .1 if optimize: - print "Optimizing model:" + print("Optimizing model:") m.optimize('bfgs', messages=verbose, max_iters=max_iters, gtol=.05) if plot: @@ -358,7 +358,7 @@ def ssgplvm_simulation(optimize=True, verbose=1, m.likelihood.variance = .1 if optimize: - print "Optimizing model:" + print("Optimizing model:") m.optimize('scg', messages=verbose, max_iters=max_iters, gtol=.05) if plot: @@ -388,7 +388,7 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1, m.Yreal = Y if optimize: - print "Optimizing model:" + print("Optimizing model:") m.optimize('bfgs', messages=verbose, max_iters=max_iters, gtol=.05) if plot: @@ -411,7 +411,7 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw): m['.*noise'] = [Y.var() / 40. for Y in Ylist] if optimize: - print "Optimizing Model:" + print("Optimizing Model:") m.optimize(messages=verbose, max_iters=8e3) if plot: m.X.plot("MRD Latent Space 1D") @@ -439,7 +439,7 @@ def mrd_simulation_missing_data(optimize=True, verbose=True, plot=True, plot_sim initx="random", initz='permute', **kw) if optimize: - print "Optimizing Model:" + print("Optimizing Model:") m.optimize('bfgs', messages=verbose, max_iters=8e3, gtol=.1) if plot: m.X.plot("MRD Latent Space 1D") @@ -603,7 +603,7 @@ def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True): try: if optimize: m.optimize('bfgs', messages=verbose, max_iters=5e3, bfgs_factor=10) except KeyboardInterrupt: - print "Keyboard interrupt, continuing to plot and return" + print("Keyboard interrupt, continuing to plot and return") if plot: fig, (latent_axes, sense_axes) = plt.subplots(1, 2) diff --git a/GPy/examples/non_gaussian.py b/GPy/examples/non_gaussian.py index ddac8813..3652b4d3 100644 --- a/GPy/examples/non_gaussian.py +++ b/GPy/examples/non_gaussian.py @@ -37,7 +37,7 @@ def student_t_approx(optimize=True, plot=True): #Add student t random noise to datapoints deg_free = 1 - print "Real noise: ", real_std + print("Real noise: ", real_std) initial_var_guess = 0.5 edited_real_sd = initial_var_guess @@ -73,7 +73,7 @@ def student_t_approx(optimize=True, plot=True): m4['.*t_scale2'].constrain_bounded(1e-6, 10.) m4['.*white'].constrain_fixed(1e-5) m4.randomize() - print m4 + print(m4) debug=True if debug: m4.optimize(messages=1) @@ -81,18 +81,18 @@ def student_t_approx(optimize=True, plot=True): pb.plot(m4.X, m4.inference_method.f_hat) pb.plot(m4.X, m4.Y, 'rx') m4.plot() - print m4 + print(m4) return m4 if optimize: optimizer='scg' - print "Clean Gaussian" + print("Clean Gaussian") m1.optimize(optimizer, messages=1) - print "Corrupt Gaussian" + print("Corrupt Gaussian") m2.optimize(optimizer, messages=1) - print "Clean student t" + print("Clean student t") m3.optimize(optimizer, messages=1) - print "Corrupt student t" + print("Corrupt student t") m4.optimize(optimizer, messages=1) if plot: @@ -151,7 +151,7 @@ def boston_example(optimize=True, plot=True): for n, (train, test) in enumerate(kf): X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test] - print "Fold {}".format(n) + print("Fold {}".format(n)) noise = 1e-1 #np.exp(-2) rbf_len = 0.5 @@ -163,21 +163,21 @@ def boston_example(optimize=True, plot=True): score_folds[0, n] = rmse(Y_test, np.mean(Y_train)) #Gaussian GP - print "Gauss GP" + print("Gauss GP") mgp = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelgp.copy()) mgp.constrain_fixed('.*white', 1e-5) mgp['.*len'] = rbf_len mgp['.*noise'] = noise - print mgp + print(mgp) if optimize: mgp.optimize(optimizer=optimizer, messages=messages) Y_test_pred = mgp.predict(X_test) score_folds[1, n] = rmse(Y_test, Y_test_pred[0]) pred_density[1, n] = np.mean(mgp.log_predictive_density(X_test, Y_test)) - print mgp - print pred_density + print(mgp) + print(pred_density) - print "Gaussian Laplace GP" + print("Gaussian Laplace GP") N, D = Y_train.shape g_distribution = GPy.likelihoods.noise_model_constructors.gaussian(variance=noise, N=N, D=D) g_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), g_distribution) @@ -186,18 +186,18 @@ def boston_example(optimize=True, plot=True): mg.constrain_fixed('.*white', 1e-5) mg['rbf_len'] = rbf_len mg['noise'] = noise - print mg + print(mg) if optimize: mg.optimize(optimizer=optimizer, messages=messages) Y_test_pred = mg.predict(X_test) score_folds[2, n] = rmse(Y_test, Y_test_pred[0]) pred_density[2, n] = np.mean(mg.log_predictive_density(X_test, Y_test)) - print pred_density - print mg + print(pred_density) + print(mg) for stu_num, df in enumerate(degrees_freedoms): #Student T - print "Student-T GP {}df".format(df) + print("Student-T GP {}df".format(df)) t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=df, sigma2=noise) stu_t_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), t_distribution) mstu_t = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=stu_t_likelihood) @@ -205,14 +205,14 @@ def boston_example(optimize=True, plot=True): mstu_t.constrain_bounded('.*t_scale2', 0.0001, 1000) mstu_t['rbf_len'] = rbf_len mstu_t['.*t_scale2'] = noise - print mstu_t + print(mstu_t) if optimize: mstu_t.optimize(optimizer=optimizer, messages=messages) Y_test_pred = mstu_t.predict(X_test) score_folds[3+stu_num, n] = rmse(Y_test, Y_test_pred[0]) pred_density[3+stu_num, n] = np.mean(mstu_t.log_predictive_density(X_test, Y_test)) - print pred_density - print mstu_t + print(pred_density) + print(mstu_t) if plot: plt.figure() @@ -230,8 +230,8 @@ def boston_example(optimize=True, plot=True): plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x') plt.title('Stu t {}df'.format(df)) - print "Average scores: {}".format(np.mean(score_folds, 1)) - print "Average pred density: {}".format(np.mean(pred_density, 1)) + print("Average scores: {}".format(np.mean(score_folds, 1))) + print("Average pred density: {}".format(np.mean(pred_density, 1))) if plot: #Plotting diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 37a18f63..cf8205f9 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -15,7 +15,7 @@ def olympic_marathon_men(optimize=True, plot=True): """Run a standard Gaussian process regression on the Olympic marathon data.""" try:import pods except ImportError: - print 'pods unavailable, see https://github.com/sods/ods for example datasets' + print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.olympic_marathon_men() @@ -88,7 +88,7 @@ def epomeo_gpx(max_iters=200, optimize=True, plot=True): """ try:import pods except ImportError: - print 'pods unavailable, see https://github.com/sods/ods for example datasets' + print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.epomeo_gpx() num_data_list = [] @@ -135,7 +135,7 @@ def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=1000 try:import pods except ImportError: - print 'pods unavailable, see https://github.com/sods/ods for example datasets' + print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number) # data['Y'] = data['Y'][0::2, :] @@ -219,7 +219,7 @@ def olympic_100m_men(optimize=True, plot=True): """Run a standard Gaussian process regression on the Rogers and Girolami olympics data.""" try:import pods except ImportError: - print 'pods unavailable, see https://github.com/sods/ods for example datasets' + print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.olympic_100m_men() @@ -240,7 +240,7 @@ def toy_rbf_1d(optimize=True, plot=True): """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" try:import pods except ImportError: - print 'pods unavailable, see https://github.com/sods/ods for example datasets' + print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.toy_rbf_1d() @@ -258,7 +258,7 @@ def toy_rbf_1d_50(optimize=True, plot=True): """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" try:import pods except ImportError: - print 'pods unavailable, see https://github.com/sods/ods for example datasets' + print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.toy_rbf_1d_50() @@ -377,7 +377,7 @@ def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True): """Predict the location of a robot given wirelss signal strength readings.""" try:import pods except ImportError: - print 'pods unavailable, see https://github.com/sods/ods for example datasets' + print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.robot_wireless() @@ -398,14 +398,14 @@ def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True): sse = ((data['Xtest'] - Xpredict)**2).sum() - print('Sum of squares error on test data: ' + str(sse)) + print(('Sum of squares error on test data: ' + str(sse))) return m def silhouette(max_iters=100, optimize=True, plot=True): """Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper.""" try:import pods except ImportError: - print 'pods unavailable, see https://github.com/sods/ods for example datasets' + print('pods unavailable, see https://github.com/sods/ods for example datasets') return data = pods.datasets.silhouette() @@ -416,7 +416,7 @@ def silhouette(max_iters=100, optimize=True, plot=True): if optimize: m.optimize(messages=True, max_iters=max_iters) - print m + print(m) return m def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True, checkgrad=False): @@ -468,7 +468,7 @@ def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, opt if plot: m.plot() - print m + print(m) return m def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True): @@ -492,7 +492,7 @@ def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True): if plot: m.plot(ax=axes[0]) axes[0].set_title('no input uncertainty') - print m + print(m) # the same Model with uncertainty m = GPy.models.SparseGPRegression(X, Y, kernel=GPy.kern.RBF(1), Z=Z, X_variance=S) @@ -503,5 +503,5 @@ def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True): axes[1].set_title('with input uncertainty') fig.canvas.draw() - print m + print(m) return m diff --git a/README.md b/README.md index 4dc5b807..2e9dc58a 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,5 @@ # GPy -# Moving to Python 3 -Work is underway to make GPy run on Python 3. We are not there yet! Changes performed so far have retained compatibility with Python 2.6 and abive. - -Work done so far: - -* Use 2to3 to fix relative imports -* use 2to3 to convert print from statement to function. Some advanced uses of print meant that this could not be done in a way that retained compatibility with old versions of Python. The oldest version of Python that is supported by this version is 2.6 due to the required future imports. - A Gaussian processes framework in Python. * [GPy homepage](http://sheffieldml.github.io/GPy/) @@ -18,6 +10,14 @@ A Gaussian processes framework in Python. Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GPy.png) +### Moving to Python 3 +Work is underway to make GPy run on Python 3. We are not there yet! Changes performed so far have retained compatibility with Python 2.6 and abive. + +Work done so far: + +* Use 2to3 to fix relative imports +* use 2to3 to convert print from statement to function. Some advanced uses of print meant that this could not be done in a way that retained compatibility with old versions of Python. The oldest version of Python that is supported by this version is 2.6 due to the required future imports. + ### Citation @Misc{gpy2014, From 5601a580deed34877a988ba3adb850f21d944e3f Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 08:48:48 +0000 Subject: [PATCH 07/99] Convert print to function for Python 3 compatibility. This breaks compatibility for versions of Python < 2.6 --- .../expectation_propagation_dtc.py | 2 +- .../latent_function_inference/var_dtc.py | 2 +- GPy/inference/mcmc/samplers.py | 2 +- .../optimization/conjugate_gradient_descent.py | 4 ++-- GPy/inference/optimization/optimization.py | 12 ++++++------ GPy/inference/optimization/scg.py | 15 +++++++-------- 6 files changed, 18 insertions(+), 19 deletions(-) diff --git a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py index 35b1b7dc..0f972a84 100644 --- a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py +++ b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py @@ -179,7 +179,7 @@ class EPDTC(LatentFunctionInference): if VVT_factor.shape[1] == Y.shape[1]: woodbury_vector = Cpsi1Vf # == Cpsi1V else: - print 'foobar' + print('foobar') psi1V = np.dot(mu_tilde[:,None].T*beta, psi1).T tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0) tmp, _ = dpotrs(LB, tmp, lower=1) diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index d61e7f0f..db59df14 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -170,7 +170,7 @@ class VarDTC(LatentFunctionInference): if VVT_factor.shape[1] == Y.shape[1]: woodbury_vector = Cpsi1Vf # == Cpsi1V else: - print 'foobar' + print('foobar') import ipdb; ipdb.set_trace() psi1V = np.dot(Y.T*beta, psi1).T tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0) diff --git a/GPy/inference/mcmc/samplers.py b/GPy/inference/mcmc/samplers.py index 444d99d7..ff396a96 100644 --- a/GPy/inference/mcmc/samplers.py +++ b/GPy/inference/mcmc/samplers.py @@ -40,7 +40,7 @@ class Metropolis_Hastings: fcurrent = self.model.log_likelihood() + self.model.log_prior() accepted = np.zeros(Ntotal,dtype=np.bool) for it in range(Ntotal): - print "sample %d of %d\r"%(it,Ntotal), + print("sample %d of %d\r"%(it,Ntotal), end=' ') sys.stdout.flush() prop = np.random.multivariate_normal(current, self.cov*self.scale*self.scale) self.model._set_params_transformed(prop) diff --git a/GPy/inference/optimization/conjugate_gradient_descent.py b/GPy/inference/optimization/conjugate_gradient_descent.py index dfc4a48d..274de784 100644 --- a/GPy/inference/optimization/conjugate_gradient_descent.py +++ b/GPy/inference/optimization/conjugate_gradient_descent.py @@ -74,7 +74,7 @@ class _Async_Optimization(Thread): if self.outq is not None: self.outq.put(self.SENTINEL) if self.messages: - print "" + print("") self.runsignal.clear() def run(self, *args, **kwargs): @@ -213,7 +213,7 @@ class Async_Optimize(object): # # print "^C" # self.runsignal.clear() # c.join() - print "WARNING: callback still running, optimisation done!" + print("WARNING: callback still running, optimisation done!") return p.result class CGD(Async_Optimize): diff --git a/GPy/inference/optimization/optimization.py b/GPy/inference/optimization/optimization.py index aa9be793..0d6887e5 100644 --- a/GPy/inference/optimization/optimization.py +++ b/GPy/inference/optimization/optimization.py @@ -125,9 +125,9 @@ class opt_lbfgsb(Optimizer): opt_dict = {} if self.xtol is not None: - print "WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it" + print("WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it") if self.ftol is not None: - print "WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it" + print("WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it") if self.gtol is not None: opt_dict['pgtol'] = self.gtol if self.bfgs_factor is not None: @@ -158,7 +158,7 @@ class opt_simplex(Optimizer): if self.ftol is not None: opt_dict['ftol'] = self.ftol if self.gtol is not None: - print "WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it" + print("WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it") opt_result = optimize.fmin(f, self.x_init, (), disp=self.messages, maxfun=self.max_f_eval, full_output=True, **opt_dict) @@ -186,11 +186,11 @@ class opt_rasm(Optimizer): opt_dict = {} if self.xtol is not None: - print "WARNING: minimize doesn't have an xtol arg, so I'm going to ignore it" + print("WARNING: minimize doesn't have an xtol arg, so I'm going to ignore it") if self.ftol is not None: - print "WARNING: minimize doesn't have an ftol arg, so I'm going to ignore it" + print("WARNING: minimize doesn't have an ftol arg, so I'm going to ignore it") if self.gtol is not None: - print "WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it" + print("WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it") opt_result = rasm.minimize(self.x_init, f_fp, (), messages=self.messages, maxnumfuneval=self.max_f_eval) diff --git a/GPy/inference/optimization/scg.py b/GPy/inference/optimization/scg.py index 34dd181f..8960de1d 100644 --- a/GPy/inference/optimization/scg.py +++ b/GPy/inference/optimization/scg.py @@ -21,14 +21,13 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. - +from __future__ import print_function import numpy as np import sys - def print_out(len_maxiters, fnow, current_grad, beta, iteration): - print '\r', - print '{0:>0{mi}g} {1:> 12e} {2:< 12.6e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r', + print('\r', end=' ') + print('{0:>0{mi}g} {1:> 12e} {2:< 12.6e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r', sys.stdout.flush() def exponents(fnow, current_grad): @@ -80,7 +79,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True, len_maxiters = len(str(maxiters)) if display: - print ' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters) + print(' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters)) exps = exponents(fnow, current_grad) p_iter = iteration @@ -140,7 +139,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True, b = np.any(n_exps < exps) if a or b: p_iter = iteration - print '' + print('') if b: exps = n_exps @@ -189,6 +188,6 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True, if display: print_out(len_maxiters, fnow, current_grad, beta, iteration) - print "" - print status + print("") + print(status) return x, flog, function_eval, status From 2a433244280a7e3f7636562103f475ad6320e55f Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 08:52:24 +0000 Subject: [PATCH 08/99] Convert print to function for Python 3 compatibility. --- GPy/kern/_src/coregionalize.py | 4 ++-- GPy/kern/_src/stationary.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/GPy/kern/_src/coregionalize.py b/GPy/kern/_src/coregionalize.py index 291402ec..b6a3aecf 100644 --- a/GPy/kern/_src/coregionalize.py +++ b/GPy/kern/_src/coregionalize.py @@ -61,7 +61,7 @@ class Coregionalize(Kern): try: return self._K_weave(X, X2) except: - print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n" + print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n") config.set('weave', 'working', 'False') return self._K_numpy(X, X2) else: @@ -123,7 +123,7 @@ class Coregionalize(Kern): try: dL_dK_small = self._gradient_reduce_weave(dL_dK, index, index2) except: - print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n" + print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n") config.set('weave', 'working', 'False') dL_dK_small = self._gradient_reduce_weave(dL_dK, index, index2) else: diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 06671b23..426296f7 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -165,7 +165,7 @@ class Stationary(Kern): try: self.lengthscale.gradient = self.weave_lengthscale_grads(tmp, X, X2) except: - print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n" + print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n") config.set('weave', 'working', 'False') self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)]) else: @@ -214,7 +214,7 @@ class Stationary(Kern): try: return self.gradients_X_weave(dL_dK, X, X2) except: - print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n" + print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n") config.set('weave', 'working', 'False') return self.gradients_X_(dL_dK, X, X2) else: From 4512964f0933a1ee36ab38b4b4f3753235bd9bb7 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 08:58:05 +0000 Subject: [PATCH 09/99] Convert print to function for Python 3 compatibility. This breaks compatibility for versions of Python < 2.6 --- .../one_vs_all_sparse_classification.py | 2 +- GPy/models/sparse_gp_minibatch.py | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/GPy/models/one_vs_all_sparse_classification.py b/GPy/models/one_vs_all_sparse_classification.py index 3bdd2647..7528ffd2 100644 --- a/GPy/models/one_vs_all_sparse_classification.py +++ b/GPy/models/one_vs_all_sparse_classification.py @@ -30,7 +30,7 @@ class OneVsAllSparseClassification(object): self.results = {} for yj in labels: - print 'Class %s vs all' %yj + print('Class %s vs all' %yj) Ynew = Y.copy() Ynew[Y.flatten()!=yj] = 0 Ynew[Y.flatten()==yj] = 1 diff --git a/GPy/models/sparse_gp_minibatch.py b/GPy/models/sparse_gp_minibatch.py index e827bb70..a6081e61 100644 --- a/GPy/models/sparse_gp_minibatch.py +++ b/GPy/models/sparse_gp_minibatch.py @@ -1,6 +1,7 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) +from __future__ import print_function import numpy as np from ..core.parameterization.param import Param from ..core.sparse_gp import SparseGP @@ -50,7 +51,7 @@ class SparseGPMiniBatch(SparseGP): else: #inference_method = ?? raise NotImplementedError, "what to do what to do?" - print "defaulting to ", inference_method, "for latent function inference" + print("defaulting to ", inference_method, "for latent function inference") self.kl_factr = 1. self.Z = Param('inducing inputs', Z) @@ -80,13 +81,13 @@ class SparseGPMiniBatch(SparseGP): overall = self.Y_normalized.shape[1] m_f = lambda i: "Precomputing Y for missing data: {: >7.2%}".format(float(i+1)/overall) message = m_f(-1) - print message, + print(message, end=' ') for d in xrange(overall): self.Ylist.append(self.Y_normalized[self.ninan[:, d], d][:, None]) - print ' '*(len(message)+1) + '\r', + print(' '*(len(message)+1) + '\r', end=' ') message = m_f(d) - print message, - print '' + print(message, end=' ') + print('') self.posterior = None @@ -241,15 +242,15 @@ class SparseGPMiniBatch(SparseGP): if not self.stochastics: m_f = lambda i: "Inference with missing_data: {: >7.2%}".format(float(i+1)/self.output_dim) message = m_f(-1) - print message, + print(message, end=' ') for d in self.stochastics.d: ninan = self.ninan[:, d] if not self.stochastics: - print ' '*(len(message)) + '\r', + print(' '*(len(message)) + '\r', end=' ') message = m_f(d) - print message, + print(message, end=' ') posterior, log_marginal_likelihood, \ grad_dict, current_values, value_indices = self._inner_parameters_changed( @@ -268,7 +269,7 @@ class SparseGPMiniBatch(SparseGP): woodbury_vector[:, d:d+1] = posterior.woodbury_vector self._log_marginal_likelihood += log_marginal_likelihood if not self.stochastics: - print '' + print('') if self.posterior is None: self.posterior = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, From 4b4e5d490191228ec3270e95abb5afd6e03a0fd4 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 09:01:27 +0000 Subject: [PATCH 10/99] Convert print to function for Python 3 compatibility --- GPy/plotting/matplot_dep/dim_reduction_plots.py | 6 +++--- GPy/plotting/matplot_dep/inference_plots.py | 2 +- GPy/plotting/matplot_dep/kernel_plots.py | 2 +- GPy/plotting/matplot_dep/maps.py | 4 ++-- GPy/plotting/matplot_dep/visualize.py | 6 +++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/GPy/plotting/matplot_dep/dim_reduction_plots.py b/GPy/plotting/matplot_dep/dim_reduction_plots.py index 1398b40c..982f8fa9 100644 --- a/GPy/plotting/matplot_dep/dim_reduction_plots.py +++ b/GPy/plotting/matplot_dep/dim_reduction_plots.py @@ -62,7 +62,7 @@ def plot_latent(model, labels=None, which_indices=None, if X.shape[0] > 1000: - print "Warning: subsampling X, as it has more samples then 1000. X.shape={!s}".format(X.shape) + print("Warning: subsampling X, as it has more samples then 1000. X.shape={!s}".format(X.shape)) subsample = np.random.choice(X.shape[0], size=1000, replace=False) X = X[subsample] labels = labels[subsample] @@ -187,14 +187,14 @@ def plot_latent(model, labels=None, which_indices=None, fig.tight_layout() fig.canvas.draw() except Exception as e: - print "Could not invoke tight layout: {}".format(e) + print("Could not invoke tight layout: {}".format(e)) pass if updates: try: ax.figure.canvas.show() except Exception as e: - print "Could not invoke show: {}".format(e) + print("Could not invoke show: {}".format(e)) raw_input('Enter to continue') view.deactivate() return ax diff --git a/GPy/plotting/matplot_dep/inference_plots.py b/GPy/plotting/matplot_dep/inference_plots.py index c802932c..02007390 100644 --- a/GPy/plotting/matplot_dep/inference_plots.py +++ b/GPy/plotting/matplot_dep/inference_plots.py @@ -12,7 +12,7 @@ except: def plot_optimizer(optimizer): if optimizer.trace == None: - print "No trace present so I can't plot it. Please check that the optimizer actually supplies a trace." + print("No trace present so I can't plot it. Please check that the optimizer actually supplies a trace.") else: pb.figure() pb.plot(optimizer.trace) diff --git a/GPy/plotting/matplot_dep/kernel_plots.py b/GPy/plotting/matplot_dep/kernel_plots.py index 347e3d08..fc061ca7 100644 --- a/GPy/plotting/matplot_dep/kernel_plots.py +++ b/GPy/plotting/matplot_dep/kernel_plots.py @@ -81,7 +81,7 @@ def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False, filtering=Non last_bottom = ard_params[i,:] bottom += last_bottom else: - print "filtering out {}".format(kernel.parameters[i].name) + print("filtering out {}".format(kernel.parameters[i].name)) ax.set_xlim(-.5, kernel.input_dim - .5) add_bar_labels(fig, ax, [bars[-1]], bottom=bottom-last_bottom) diff --git a/GPy/plotting/matplot_dep/maps.py b/GPy/plotting/matplot_dep/maps.py index fcb03b38..65cecd30 100644 --- a/GPy/plotting/matplot_dep/maps.py +++ b/GPy/plotting/matplot_dep/maps.py @@ -159,10 +159,10 @@ def new_shape_string(sf,name,regex,field=2,type=None): newshp.line(parts=_parts) newshp.records.append(sr.record) - print len(sr.record) + print(len(sr.record)) newshp.save(name) - print index + print(index) def apply_bbox(sf,ax): """ diff --git a/GPy/plotting/matplot_dep/visualize.py b/GPy/plotting/matplot_dep/visualize.py index 9ff41730..50eb4b82 100644 --- a/GPy/plotting/matplot_dep/visualize.py +++ b/GPy/plotting/matplot_dep/visualize.py @@ -225,8 +225,8 @@ class lvm_dimselect(lvm): self.labels = labels lvm.__init__(self,vals,model,data_visualize,latent_axes,sense_axes,latent_index) self.show_sensitivities() - print self.latent_values - print "use left and right mouse buttons to select dimensions" + print(self.latent_values) + print("use left and right mouse buttons to select dimensions") def on_click(self, event): @@ -255,7 +255,7 @@ class lvm_dimselect(lvm): def on_leave(self,event): - print type(self.latent_values) + print(type(self.latent_values)) latent_values = self.latent_values.copy() y = self.model.predict(latent_values[None,:])[0] self.data_visualize.modify(y) From c5b91e543ab19c46691a426ebef34d6f4441e395 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 09:03:04 +0000 Subject: [PATCH 11/99] Convert print to function for Python 3 compatibility --- GPy/util/block_matrices.py | 2 +- GPy/util/classification.py | 10 +++--- GPy/util/datasets.py | 64 +++++++++++++++++------------------ GPy/util/debug.py | 8 ++--- GPy/util/gpu_init.py | 2 +- GPy/util/linalg.py | 8 ++--- GPy/util/warping_functions.py | 2 +- 7 files changed, 48 insertions(+), 48 deletions(-) diff --git a/GPy/util/block_matrices.py b/GPy/util/block_matrices.py index 95920868..2d68fd47 100644 --- a/GPy/util/block_matrices.py +++ b/GPy/util/block_matrices.py @@ -23,4 +23,4 @@ if __name__=='__main__': A = np.zeros((5,5)) B = get_blocks(A,[2,3]) B[0,0] += 7 - print B + print(B) diff --git a/GPy/util/classification.py b/GPy/util/classification.py index c0859793..69609091 100644 --- a/GPy/util/classification.py +++ b/GPy/util/classification.py @@ -25,9 +25,9 @@ def conf_matrix(p,labels,names=['1','0'],threshold=.5,show=True): true_0 = labels.size - true_1 - false_0 - false_1 error = (false_1 + false_0)/np.float(labels.size) if show: - print 100. - error * 100,'% instances correctly classified' - print '%-10s| %-10s| %-10s| ' % ('',names[0],names[1]) - print '----------|------------|------------|' - print '%-10s| %-10s| %-10s| ' % (names[0],true_1,false_0) - print '%-10s| %-10s| %-10s| ' % (names[1],false_1,true_0) + print(100. - error * 100,'% instances correctly classified') + print('%-10s| %-10s| %-10s| ' % ('',names[0],names[1])) + print('----------|------------|------------|') + print('%-10s| %-10s| %-10s| ' % (names[0],true_1,false_0)) + print('%-10s| %-10s| %-10s| ' % (names[1],false_1,true_0)) return error,true_1, false_1, true_0, false_0 diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 10835463..346a9c45 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -75,7 +75,7 @@ def prompt_user(prompt): elif choice in no: return False else: - print("Your response was a " + choice) + print(("Your response was a " + choice)) print("Please respond with 'yes', 'y' or 'no', 'n'") #return prompt_user() @@ -99,7 +99,7 @@ def download_url(url, store_directory, save_name=None, messages=True, suffix='') """Download a file from a url and save it to disk.""" i = url.rfind('/') file = url[i+1:] - print file + print(file) dir_name = os.path.join(data_path, store_directory) if save_name is None: save_name = os.path.join(dir_name, file) @@ -107,7 +107,7 @@ def download_url(url, store_directory, save_name=None, messages=True, suffix='') if suffix is None: suffix='' - print "Downloading ", url, "->", save_name + print("Downloading ", url, "->", save_name) if not os.path.exists(dir_name): os.makedirs(dir_name) try: @@ -150,7 +150,7 @@ def download_url(url, store_directory, save_name=None, messages=True, suffix='') sys.stdout.write(status) sys.stdout.flush() sys.stdout.write(" "*(len(status)) + "\r") - print status + print(status) # if we wanted to get more sophisticated maybe we should check the response code here again even for successes. #with open(save_name, 'wb') as f: # f.write(response.read()) @@ -159,32 +159,32 @@ def download_url(url, store_directory, save_name=None, messages=True, suffix='') def authorize_download(dataset_name=None): """Check with the user that the are happy with terms and conditions for the data set.""" - print('Acquiring resource: ' + dataset_name) + print(('Acquiring resource: ' + dataset_name)) # TODO, check resource is in dictionary! print('') dr = data_resources[dataset_name] print('Details of data: ') - print(dr['details']) + print((dr['details'])) print('') if dr['citation']: print('Please cite:') - print(dr['citation']) + print((dr['citation'])) print('') if dr['size']: - print('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.') + print(('After downloading the data will take up ' + str(dr['size']) + ' bytes of space.')) print('') - print('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.') + print(('Data will be stored in ' + os.path.join(data_path, dataset_name) + '.')) print('') if overide_manual_authorize: if dr['license']: print('You have agreed to the following license:') - print(dr['license']) + print((dr['license'])) print('') return True else: if dr['license']: print('You must also agree to the following license:') - print(dr['license']) + print((dr['license'])) print('') return prompt_user('Do you wish to proceed with the download? [yes/no]') @@ -495,18 +495,18 @@ def google_trends(query_terms=['big data', 'machine learning', 'data science'], file = 'data.csv' file_name = os.path.join(dir_path,file) if not os.path.exists(file_name) or refresh_data: - print "Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks." + print("Accessing Google trends to acquire the data. Note that repeated accesses will result in a block due to a google terms of service violation. Failure at this point may be due to such blocks.") # quote the query terms. quoted_terms = [] for term in query_terms: quoted_terms.append(urllib2.quote(term)) - print "Query terms: ", ', '.join(query_terms) + print("Query terms: ", ', '.join(query_terms)) - print "Fetching query:" + print("Fetching query:") query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(quoted_terms) data = urllib2.urlopen(query).read() - print "Done." + print("Done.") # In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD. header = """// Data table response\ngoogle.visualization.Query.setResponse(""" data = data[len(header):-2] @@ -520,8 +520,8 @@ def google_trends(query_terms=['big data', 'machine learning', 'data science'], df.to_csv(file_name) else: - print "Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function." - print "Query terms: ", ', '.join(query_terms) + print("Reading cached data for google trends. To refresh the cache set 'refresh_data=True' when calling this function.") + print("Query terms: ", ', '.join(query_terms)) df = pandas.read_csv(file_name, parse_dates=[0]) @@ -679,11 +679,11 @@ def ripley_synth(data_set='ripley_prnn_data'): def global_average_temperature(data_set='global_temperature', num_train=1000, refresh_data=False): path = os.path.join(data_path, data_set) if data_available(data_set) and not refresh_data: - print 'Using cached version of the data set, to use latest version set refresh_data to True' + print('Using cached version of the data set, to use latest version set refresh_data to True') else: download_data(data_set) data = np.loadtxt(os.path.join(data_path, data_set, 'GLBTS.long.data')) - print 'Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0] + print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0]) allX = data[data[:, 3]!=-99.99, 2:3] allY = data[data[:, 3]!=-99.99, 3:4] X = allX[:num_train, 0:1] @@ -695,11 +695,11 @@ def global_average_temperature(data_set='global_temperature', num_train=1000, re def mauna_loa(data_set='mauna_loa', num_train=545, refresh_data=False): path = os.path.join(data_path, data_set) if data_available(data_set) and not refresh_data: - print 'Using cached version of the data set, to use latest version set refresh_data to True' + print('Using cached version of the data set, to use latest version set refresh_data to True') else: download_data(data_set) data = np.loadtxt(os.path.join(data_path, data_set, 'co2_mm_mlo.txt')) - print 'Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0] + print('Most recent data observation from month ', data[-1, 1], ' in year ', data[-1, 0]) allX = data[data[:, 3]!=-99.99, 2:3] allY = data[data[:, 3]!=-99.99, 3:4] X = allX[:num_train, 0:1] @@ -802,10 +802,10 @@ def hapmap3(data_set='hapmap3'): if not reduce(lambda a,b: a and b, map(os.path.exists, preprocessed_data_paths)): if not overide_manual_authorize and not prompt_user("Preprocessing requires ~25GB " "of memory and can take a (very) long time, continue? [Y/n]"): - print "Preprocessing required for further usage." + print("Preprocessing required for further usage.") return status = "Preprocessing data, please be patient..." - print status + print(status) def write_status(message, progress, status): stdout.write(" "*len(status)); stdout.write("\r"); stdout.flush() status = r"[{perc: <{ll}}] {message: <13s}".format(message=message, ll=20, @@ -873,13 +873,13 @@ def hapmap3(data_set='hapmap3'): inandf = DataFrame(index=metadf.index, data=inan, columns=mapnp[:,1]) inandf.to_pickle(preprocessed_data_paths[2]) status=write_status('done :)', 100, status) - print '' + print('') else: - print "loading snps..." + print("loading snps...") snpsdf = read_pickle(preprocessed_data_paths[0]) - print "loading metainfo..." + print("loading metainfo...") metadf = read_pickle(preprocessed_data_paths[1]) - print "loading nan entries..." + print("loading nan entries...") inandf = read_pickle(preprocessed_data_paths[2]) snps = snpsdf.values populations = metadf.population.values.astype('S3') @@ -1001,7 +1001,7 @@ def singlecell_rna_seq_deng(dataset='singlecell_deng'): # Extract the tar file filename = os.path.join(dir_path, 'GSE45719_Raw.tar') with tarfile.open(filename, 'r') as files: - print "Extracting Archive {}...".format(files.name) + print("Extracting Archive {}...".format(files.name)) data = None gene_info = None message = '' @@ -1010,9 +1010,9 @@ def singlecell_rna_seq_deng(dataset='singlecell_deng'): for i, file_info in enumerate(members): f = files.extractfile(file_info) inner = read_csv(f, sep='\t', header=0, compression='gzip', index_col=0) - print ' '*(len(message)+1) + '\r', + print(' '*(len(message)+1) + '\r', end=' ') message = "{: >7.2%}: Extracting: {}".format(float(i+1)/overall, file_info.name[:20]+"...txt.gz") - print message, + print(message, end=' ') if data is None: data = inner.RPKM.to_frame() data.columns = [file_info.name[:-18]] @@ -1035,8 +1035,8 @@ def singlecell_rna_seq_deng(dataset='singlecell_deng'): sys.stdout.write(' '*len(message) + '\r') sys.stdout.flush() - print - print "Read Archive {}".format(files.name) + print() + print("Read Archive {}".format(files.name)) return data_details_return({'Y': data, 'series_info': info, diff --git a/GPy/util/debug.py b/GPy/util/debug.py index 00107f5e..d691ad82 100644 --- a/GPy/util/debug.py +++ b/GPy/util/debug.py @@ -13,7 +13,7 @@ def checkFinite(arr, name=None): if np.any(np.logical_not(np.isfinite(arr))): idx = np.where(np.logical_not(np.isfinite(arr)))[0] - print name+' at indices '+str(idx)+' have not finite values: '+str(arr[idx])+'!' + print(name+' at indices '+str(idx)+' have not finite values: '+str(arr[idx])+'!') return False return True @@ -23,13 +23,13 @@ def checkFullRank(m, tol=1e-10, name=None, force_check=False): assert len(m.shape)==2 and m.shape[0]==m.shape[1], 'The input of checkFullRank has to be a square matrix!' if not force_check and m.shape[0]>=10000: - print 'The size of '+name+'is too big to check (>=10000)!' + print('The size of '+name+'is too big to check (>=10000)!') return True s = np.real(np.linalg.eigvals(m)) if s.min()/s.max()=pycuda.driver.Device.count(): - print '['+MPI.Get_processor_name()+'] more processes than the GPU numbers!' + print('['+MPI.Get_processor_name()+'] more processes than the GPU numbers!') #MPI.COMM_WORLD.Abort() raise gpu_device = pycuda.driver.Device(gpuid) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 216a1050..d7ad5d61 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -34,7 +34,7 @@ if config.getboolean('anaconda', 'installed') and config.getboolean('anaconda', dsyrk = mkl_rt.dsyrk dsyr = mkl_rt.dsyr _blas_available = True - print 'anaconda installed and mkl is loaded' + print('anaconda installed and mkl is loaded') except: _blas_available = False else: @@ -64,7 +64,7 @@ def force_F_ordered(A): """ if A.flags['F_CONTIGUOUS']: return A - print "why are your arrays not F order?" + print("why are your arrays not F order?") return np.asfortranarray(A) # def jitchol(A, maxtries=5): @@ -288,7 +288,7 @@ def pca(Y, input_dim): """ if not np.allclose(Y.mean(axis=0), 0.0): - print "Y is not zero mean, centering it locally (GPy.util.linalg.pca)" + print("Y is not zero mean, centering it locally (GPy.util.linalg.pca)") # Y -= Y.mean(axis=0) @@ -423,7 +423,7 @@ def symmetrify(A, upper=False): try: symmetrify_weave(A, upper) except: - print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n" + print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n") config.set('weave', 'working', 'False') symmetrify_numpy(A, upper) else: diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index a0a385e0..70804c67 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -207,7 +207,7 @@ class TanhWarpingFunction_d(WarpingFunction): y -= update it += 1 if it == max_iterations: - print "WARNING!!! Maximum number of iterations reached in f_inv " + print("WARNING!!! Maximum number of iterations reached in f_inv ") return y From 8a7123f55a75092927039e17cf35acc84e37d652 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 09:10:27 +0000 Subject: [PATCH 12/99] Fixed Python 2 compatibility --- GPy/util/datasets.py | 1 + 1 file changed, 1 insertion(+) diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 346a9c45..3ba46f52 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -1,3 +1,4 @@ +from __future__ import print_function import csv import os import copy From 70c8f4a410a300546fbc70ac1bebf592dc248624 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 09:16:21 +0000 Subject: [PATCH 13/99] Typo --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2e9dc58a..9111a48c 100644 --- a/README.md +++ b/README.md @@ -11,12 +11,12 @@ A Gaussian processes framework in Python. Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GPy.png) ### Moving to Python 3 -Work is underway to make GPy run on Python 3. We are not there yet! Changes performed so far have retained compatibility with Python 2.6 and abive. +Work is underway to make GPy run on Python 3. We are not there yet! Changes performed so far have retained compatibility with Python 2.6 and above. Work done so far: -* Use 2to3 to fix relative imports -* use 2to3 to convert print from statement to function. Some advanced uses of print meant that this could not be done in a way that retained compatibility with old versions of Python. The oldest version of Python that is supported by this version is 2.6 due to the required future imports. +* Used 2to3 to fix relative imports +* Used 2to3 to convert print from statement to function. Some advanced uses of print meant that this could not be done in a way that retained compatibility with old versions of Python. The oldest version of Python that is supported by this version is 2.6 due to the required future imports. ### Citation From c4fb58176dbaaec2e5b7fce216aac5656d59225d Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 13:33:39 +0000 Subject: [PATCH 14/99] Exception fixes for Python 3 compat --- GPy/core/mapping.py | 4 ++-- GPy/core/model.py | 4 ++-- GPy/core/parameterization/lists_and_dicts.py | 2 +- GPy/core/parameterization/parameter_core.py | 14 +++++++------- GPy/core/parameterization/parameterized.py | 18 +++++++++--------- GPy/core/parameterization/variational.py | 4 ++-- GPy/core/sparse_gp.py | 2 +- GPy/core/symbolic.py | 2 +- 8 files changed, 25 insertions(+), 25 deletions(-) diff --git a/GPy/core/mapping.py b/GPy/core/mapping.py index 163db0c9..c84087cc 100644 --- a/GPy/core/mapping.py +++ b/GPy/core/mapping.py @@ -60,7 +60,7 @@ class Mapping(Parameterized): from ..plotting.matplot_dep import models_plots mapping_plots.plot_mapping(self,*args) else: - raise NameError, "matplotlib package has not been imported." + raise NameError("matplotlib package has not been imported.") class Bijective_mapping(Mapping): """ @@ -111,7 +111,7 @@ class Mapping_check_model(Model): return (self.dL_df*self.mapping.f(self.X)).sum() def _log_likelihood_gradients(self): - raise NotImplementedError, "This needs to be implemented to use the Mapping_check_model class." + raise NotImplementedError("This needs to be implemented to use the Mapping_check_model class.") class Mapping_check_df_dtheta(Mapping_check_model): """This class allows gradient checks for the gradient of a mapping with respect to parameters. """ diff --git a/GPy/core/model.py b/GPy/core/model.py index 348cebf1..65a85589 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -30,7 +30,7 @@ class Model(Parameterized): self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500) def log_likelihood(self): - raise NotImplementedError, "this needs to be implemented to use the model class" + raise NotImplementedError("this needs to be implemented to use the model class") def _log_likelihood_gradients(self): return self.gradient.copy() @@ -119,7 +119,7 @@ class Model(Parameterized): DEPRECATED. """ - raise DeprecationWarning, 'parameters now have default constraints' + raise DeprecationWarning('parameters now have default constraints') def objective_function(self): """ diff --git a/GPy/core/parameterization/lists_and_dicts.py b/GPy/core/parameterization/lists_and_dicts.py index 626603ec..2d774a76 100644 --- a/GPy/core/parameterization/lists_and_dicts.py +++ b/GPy/core/parameterization/lists_and_dicts.py @@ -32,7 +32,7 @@ class ArrayList(list): if el is item: return index index += 1 - raise ValueError, "{} is not in list".format(item) + raise ValueError("{} is not in list".format(item)) pass class ObserverList(object): diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 06991ab0..02cb0a12 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -36,7 +36,7 @@ def adjust_name_for_printing(name): name = name.replace("/", "_l_").replace("@", '_at_') name = name.replace("(", "_of_").replace(")", "") if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None: - raise NameError, "name {} converted to {} cannot be further converted to valid python variable name!".format(name2, name) + raise NameError("name {} converted to {} cannot be further converted to valid python variable name!".format(name2, name)) return name return '' @@ -65,13 +65,13 @@ class Parentable(object): Gets called, when the parent changed, so we can adjust our inner attributes according to the new parent. """ - raise NotImplementedError, "shouldnt happen, Parentable objects need to be able to change their parent" + raise NotImplementedError("shouldnt happen, Parentable objects need to be able to change their parent") def _disconnect_parent(self, *args, **kw): """ Disconnect this object from its parent """ - raise NotImplementedError, "Abstract superclass" + raise NotImplementedError("Abstract superclass") @property def _highest_parent_(self): @@ -214,7 +214,7 @@ class Gradcheckable(Pickleable, Parentable): Perform the checkgrad on the model. TODO: this can be done more efficiently, when doing it inside here """ - raise HierarchyError, "This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!" + raise HierarchyError("This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!") class Nameable(Gradcheckable): """ @@ -652,10 +652,10 @@ class OptimizationHandlable(Indexable): self.trigger_update() def _get_params_transformed(self): - raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!" + raise DeprecationWarning("_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!") # def _set_params_transformed(self, p): - raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!" + raise DeprecationWarning("_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!") def _trigger_params_changed(self, trigger_parent=True): """ @@ -701,7 +701,7 @@ class OptimizationHandlable(Indexable): Return the number of parameters of this parameter_handle. Param objects will always return 0. """ - raise NotImplemented, "Abstract, please implement in respective classes" + raise NotImplemented("Abstract, please implement in respective classes") def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True): """ diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 1a5ff123..62914636 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -131,7 +131,7 @@ class Parameterized(Parameterizable): if param.has_parent(): def visit(parent, self): if parent is self: - raise HierarchyError, "You cannot add a parameter twice into the hierarchy" + raise HierarchyError("You cannot add a parameter twice into the hierarchy") param.traverse_parents(visit, self) param._parent_.unlink_parameter(param) # make sure the size is set @@ -173,7 +173,7 @@ class Parameterized(Parameterizable): self._highest_parent_._connect_fixes() else: - raise HierarchyError, """Parameter exists already, try making a copy""" + raise HierarchyError("""Parameter exists already, try making a copy""") def link_parameters(self, *parameters): @@ -189,9 +189,9 @@ class Parameterized(Parameterizable): """ if not param in self.parameters: try: - raise RuntimeError, "{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name) + raise RuntimeError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name)) except AttributeError: - raise RuntimeError, "{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param)) + raise RuntimeError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param))) start = sum([p.size for p in self.parameters[:param._parent_index_]]) self._remove_parameter_name(param) @@ -215,9 +215,9 @@ class Parameterized(Parameterizable): self._highest_parent_._notify_parent_change() def add_parameter(self, *args, **kwargs): - raise DeprecationWarning, "add_parameter was renamed to link_parameter to avoid confusion of setting variables, use link_parameter instead" + raise DeprecationWarning("add_parameter was renamed to link_parameter to avoid confusion of setting variables, use link_parameter instead") def remove_parameter(self, *args, **kwargs): - raise DeprecationWarning, "remove_parameter was renamed to unlink_parameter to avoid confusion of setting variables, use unlink_parameter instead" + raise DeprecationWarning("remove_parameter was renamed to unlink_parameter to avoid confusion of setting variables, use unlink_parameter instead") def _connect_parameters(self, ignore_added_names=False): # connect parameterlist to this parameterized object @@ -237,7 +237,7 @@ class Parameterized(Parameterizable): self._param_slices_ = [] for i, p in enumerate(self.parameters): if not p.param_array.flags['C_CONTIGUOUS']: - raise ValueError, "This should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS" + raise ValueError("This should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS") p._parent_ = self p._parent_index_ = i @@ -279,7 +279,7 @@ class Parameterized(Parameterizable): else: if paramlist is None: paramlist = self.grep_param_names(name) - if len(paramlist) < 1: raise AttributeError, name + if len(paramlist) < 1: raise AttributeError(name) if len(paramlist) == 1: if isinstance(paramlist[-1], Parameterized): paramlist = paramlist[-1].flattened_parameters @@ -295,7 +295,7 @@ class Parameterized(Parameterizable): try: self.param_array[name] = value except: - raise ValueError, "Setting by slice or index only allowed with array-like" + raise ValueError("Setting by slice or index only allowed with array-like") self.trigger_update() else: try: param = self.__getitem__(name, paramlist) diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index 25efdc92..842183fb 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -16,13 +16,13 @@ class VariationalPrior(Parameterized): super(VariationalPrior, self).__init__(name=name, **kw) def KL_divergence(self, variational_posterior): - raise NotImplementedError, "override this for variational inference of latent space" + raise NotImplementedError("override this for variational inference of latent space") def update_gradients_KL(self, variational_posterior): """ updates the gradients for mean and variance **in place** """ - raise NotImplementedError, "override this for variational inference of latent space" + raise NotImplementedError("override this for variational inference of latent space") class NormalPrior(VariationalPrior): def KL_divergence(self, variational_posterior): diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 96e3dbe7..ebaf721f 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -48,7 +48,7 @@ class SparseGP(GP): inference_method = var_dtc.VarDTC(limit=1 if not self.missing_data else Y.shape[1]) else: #inference_method = ?? - raise NotImplementedError, "what to do what to do?" + raise NotImplementedError("what to do what to do?") print("defaulting to ", inference_method, "for latent function inference") self.Z = Param('inducing inputs', Z) diff --git a/GPy/core/symbolic.py b/GPy/core/symbolic.py index ed3a9d59..4a9fcb76 100644 --- a/GPy/core/symbolic.py +++ b/GPy/core/symbolic.py @@ -223,7 +223,7 @@ class Symbolic_core(): def code_gradients_cacheable(self, function, variable): if variable not in self.cacheable: - raise RuntimeError, variable + ' must be a cacheable.' + raise RuntimeError(variable + ' must be a cacheable.') lcode = 'gradients_' + variable + ' = np.zeros_like(' + variable + ')\n' lcode += 'self.update_cache(' + ', '.join(self.cacheable) + ')\n' for i, theta in enumerate(self.variables[variable]): From 7c6ff2982fce37988b53b8aa81ebc0958bfffca7 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 13:36:45 +0000 Subject: [PATCH 15/99] Exception fixes for Python 3 compat --- GPy/inference/latent_function_inference/dtc.py | 4 ++-- .../expectation_propagation_dtc.py | 2 +- GPy/inference/latent_function_inference/fitc.py | 2 +- GPy/inference/latent_function_inference/posterior.py | 6 +++--- GPy/inference/latent_function_inference/var_dtc.py | 2 +- GPy/inference/optimization/optimization.py | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/GPy/inference/latent_function_inference/dtc.py b/GPy/inference/latent_function_inference/dtc.py index 5590a079..57a451b2 100644 --- a/GPy/inference/latent_function_inference/dtc.py +++ b/GPy/inference/latent_function_inference/dtc.py @@ -29,7 +29,7 @@ class DTC(LatentFunctionInference): #make sure the noise is not hetero beta = 1./likelihood.gaussian_variance(Y_metadata) if beta.size > 1: - raise NotImplementedError, "no hetero noise with this implementation of DTC" + raise NotImplementedError("no hetero noise with this implementation of DTC") Kmm = kern.K(Z) Knn = kern.Kdiag(X) @@ -97,7 +97,7 @@ class vDTC(object): #make sure the noise is not hetero beta = 1./likelihood.gaussian_variance(Y_metadata) if beta.size > 1: - raise NotImplementedError, "no hetero noise with this implementation of DTC" + raise NotImplementedError("no hetero noise with this implementation of DTC") Kmm = kern.K(Z) Knn = kern.Kdiag(X) diff --git a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py index 0f972a84..e25df388 100644 --- a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py +++ b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py @@ -314,7 +314,7 @@ def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, dL_dR = None elif het_noise: if uncertain_inputs: - raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented" + raise NotImplementedError("heteroscedatic derivates with uncertain inputs not implemented") else: #from ...util.linalg import chol_inv #LBi = chol_inv(LB) diff --git a/GPy/inference/latent_function_inference/fitc.py b/GPy/inference/latent_function_inference/fitc.py index a184c6c4..abe53f3d 100644 --- a/GPy/inference/latent_function_inference/fitc.py +++ b/GPy/inference/latent_function_inference/fitc.py @@ -26,7 +26,7 @@ class FITC(LatentFunctionInference): #make sure the noise is not hetero sigma_n = likelihood.gaussian_variance(Y_metadata) if sigma_n.size >1: - raise NotImplementedError, "no hetero noise with this implementation of FITC" + raise NotImplementedError("no hetero noise with this implementation of FITC") Kmm = kern.K(Z) Knn = kern.Kdiag(X) diff --git a/GPy/inference/latent_function_inference/posterior.py b/GPy/inference/latent_function_inference/posterior.py index 34f0b3bb..73d65df6 100644 --- a/GPy/inference/latent_function_inference/posterior.py +++ b/GPy/inference/latent_function_inference/posterior.py @@ -52,7 +52,7 @@ class Posterior(object): or ((mean is not None) and (cov is not None)): pass # we have sufficient to compute the posterior else: - raise ValueError, "insufficient information to compute the posterior" + raise ValueError("insufficient information to compute the posterior") self._K_chol = K_chol self._K = K @@ -134,13 +134,13 @@ class Posterior(object): #self._woodbury_chol = jitchol(W) #try computing woodbury chol from cov elif self._covariance is not None: - raise NotImplementedError, "TODO: check code here" + raise NotImplementedError("TODO: check code here") B = self._K - self._covariance tmp, _ = dpotrs(self.K_chol, B) self._woodbury_inv, _ = dpotrs(self.K_chol, tmp.T) _, _, self._woodbury_chol, _ = pdinv(self._woodbury_inv) else: - raise ValueError, "insufficient information to compute posterior" + raise ValueError("insufficient information to compute posterior") return self._woodbury_chol @property diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index db59df14..1be2557b 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -213,7 +213,7 @@ def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, dL_dR = None elif het_noise: if uncertain_inputs: - raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented" + raise NotImplementedError("heteroscedatic derivates with uncertain inputs not implemented") else: #from ...util.linalg import chol_inv #LBi = chol_inv(LB) diff --git a/GPy/inference/optimization/optimization.py b/GPy/inference/optimization/optimization.py index 0d6887e5..600de35d 100644 --- a/GPy/inference/optimization/optimization.py +++ b/GPy/inference/optimization/optimization.py @@ -54,7 +54,7 @@ class Optimizer(): self.time = str(end - start) def opt(self, f_fp=None, f=None, fp=None): - raise NotImplementedError, "this needs to be implemented to use the optimizer class" + raise NotImplementedError("this needs to be implemented to use the optimizer class") def plot(self): """ From f961520c4220fc803e5f2416f6f96b92ba5e57cc Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 13:38:13 +0000 Subject: [PATCH 16/99] Exception fixes for Python 3 compat --- GPy/kern/_src/ODE_UY.py | 2 +- GPy/kern/_src/ODE_UYC.py | 2 +- GPy/kern/_src/ODE_st.py | 2 +- GPy/kern/_src/ODE_t.py | 2 +- GPy/kern/_src/add.py | 2 +- GPy/kern/_src/psi_comp/__init__.py | 8 ++++---- GPy/kern/_src/stationary.py | 4 ++-- GPy/kern/_src/symbolic.py | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/GPy/kern/_src/ODE_UY.py b/GPy/kern/_src/ODE_UY.py index b4a2b42d..eef8609b 100644 --- a/GPy/kern/_src/ODE_UY.py +++ b/GPy/kern/_src/ODE_UY.py @@ -114,7 +114,7 @@ class ODE_UY(Kern): elif i==1: Kdiag[s1]+= Vu*Vy*(k1+k2+k3) else: - raise ValueError, "invalid input/output index" + raise ValueError("invalid input/output index") #Kdiag[slices[0][0]]+= self.variance_U #matern32 diag #Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag return Kdiag diff --git a/GPy/kern/_src/ODE_UYC.py b/GPy/kern/_src/ODE_UYC.py index 1722d2e1..4c39a9c9 100644 --- a/GPy/kern/_src/ODE_UYC.py +++ b/GPy/kern/_src/ODE_UYC.py @@ -115,7 +115,7 @@ class ODE_UYC(Kern): elif i==1: Kdiag[s1]+= Vu*Vy*(k1+k2+k3) else: - raise ValueError, "invalid input/output index" + raise ValueError("invalid input/output index") #Kdiag[slices[0][0]]+= self.variance_U #matern32 diag #Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag return Kdiag diff --git a/GPy/kern/_src/ODE_st.py b/GPy/kern/_src/ODE_st.py index 665be230..1c3b661b 100644 --- a/GPy/kern/_src/ODE_st.py +++ b/GPy/kern/_src/ODE_st.py @@ -135,7 +135,7 @@ class ODE_st(Kern): Kdiag[s1]+= b**2*k1 - 2*a*c*k2 + a**2*k3 + c**2*vyt*vyx #Kdiag[s1]+= Vu*Vy*(k1+k2+k3) else: - raise ValueError, "invalid input/output index" + raise ValueError("invalid input/output index") return Kdiag diff --git a/GPy/kern/_src/ODE_t.py b/GPy/kern/_src/ODE_t.py index a470cbec..268917ae 100644 --- a/GPy/kern/_src/ODE_t.py +++ b/GPy/kern/_src/ODE_t.py @@ -85,7 +85,7 @@ class ODE_t(Kern): Kdiag[s1]+= k1 + vyt+self.ubias #Kdiag[s1]+= Vu*Vy*(k1+k2+k3) else: - raise ValueError, "invalid input/output index" + raise ValueError("invalid input/output index") return Kdiag diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 4c72a254..0f612f5b 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -111,7 +111,7 @@ class Add(CombinationKernel): psi2 += np.einsum('nm,no->mo',tmp1,tmp2)+np.einsum('nm,no->mo',tmp2,tmp1) #(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :]) else: - raise NotImplementedError, "psi2 cannot be computed for this kernel" + raise NotImplementedError("psi2 cannot be computed for this kernel") return psi2 def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): diff --git a/GPy/kern/_src/psi_comp/__init__.py b/GPy/kern/_src/psi_comp/__init__.py index a277ff02..74aacd75 100644 --- a/GPy/kern/_src/psi_comp/__init__.py +++ b/GPy/kern/_src/psi_comp/__init__.py @@ -17,7 +17,7 @@ class PSICOMP_RBF(Pickleable): elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior): return ssrbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior) else: - raise ValueError, "unknown distriubtion received for psi-statistics" + raise ValueError("unknown distriubtion received for psi-statistics") @Cache_this(limit=2, ignore_args=(0,1,2,3)) def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior): @@ -26,7 +26,7 @@ class PSICOMP_RBF(Pickleable): elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior): return ssrbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior) else: - raise ValueError, "unknown distriubtion received for psi-statistics" + raise ValueError("unknown distriubtion received for psi-statistics") def _setup_observers(self): pass @@ -40,7 +40,7 @@ class PSICOMP_Linear(Pickleable): elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior): return sslinear_psi_comp.psicomputations(variance, Z, variational_posterior) else: - raise ValueError, "unknown distriubtion received for psi-statistics" + raise ValueError("unknown distriubtion received for psi-statistics") @Cache_this(limit=2, ignore_args=(0,1,2,3)) def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior): @@ -49,7 +49,7 @@ class PSICOMP_Linear(Pickleable): elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior): return sslinear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior) else: - raise ValueError, "unknown distriubtion received for psi-statistics" + raise ValueError("unknown distriubtion received for psi-statistics") def _setup_observers(self): pass \ No newline at end of file diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 426296f7..0cd85b38 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -65,10 +65,10 @@ class Stationary(Kern): self.link_parameters(self.variance, self.lengthscale) def K_of_r(self, r): - raise NotImplementedError, "implement the covariance function as a fn of r to use this class" + raise NotImplementedError("implement the covariance function as a fn of r to use this class") def dK_dr(self, r): - raise NotImplementedError, "implement derivative of the covariance function wrt r to use this class" + raise NotImplementedError("implement derivative of the covariance function wrt r to use this class") @Cache_this(limit=5, ignore_args=()) def K(self, X, X2=None): diff --git a/GPy/kern/_src/symbolic.py b/GPy/kern/_src/symbolic.py index 006af9dc..9ca20ea5 100644 --- a/GPy/kern/_src/symbolic.py +++ b/GPy/kern/_src/symbolic.py @@ -11,7 +11,7 @@ class Symbolic(Kern, Symbolic_core): def __init__(self, input_dim, k=None, output_dim=1, name='symbolic', parameters=None, active_dims=None, operators=None, func_modules=[]): if k is None: - raise ValueError, "You must provide an argument for the covariance function." + raise ValueError("You must provide an argument for the covariance function.") Kern.__init__(self, input_dim, active_dims, name=name) kdiag = k From 2b8ef1041bf95d7f67ca4fc4d4a3d73cb7e909ff Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 13:43:55 +0000 Subject: [PATCH 17/99] Exception fixes for Python 3 compat --- GPy/likelihoods/likelihood.py | 2 +- GPy/likelihoods/link_functions.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 33698eb2..813f912f 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -203,7 +203,7 @@ class Likelihood(Parameterized): def _conditional_mean(self, f): """Quadrature calculation of the conditional mean: E(Y_star|f)""" - raise NotImplementedError, "implement this function to make predictions" + raise NotImplementedError("implement this function to make predictions") def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None): """ diff --git a/GPy/likelihoods/link_functions.py b/GPy/likelihoods/link_functions.py index a4ddc760..60e260e7 100644 --- a/GPy/likelihoods/link_functions.py +++ b/GPy/likelihoods/link_functions.py @@ -182,7 +182,7 @@ class Heaviside(GPTransformation): return np.where(f>0, 1, 0) def dtransf_df(self,f): - raise NotImplementedError, "This function is not differentiable!" + raise NotImplementedError("This function is not differentiable!") def d2transf_df2(self,f): - raise NotImplementedError, "This function is not differentiable!" + raise NotImplementedError("This function is not differentiable!") From 74f8caba2bbca9bdc4fa5f63d7ddb26f372f3add Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 14:23:12 +0000 Subject: [PATCH 18/99] Exception fixes for Python 3 compat --- GPy/models/sparse_gp_minibatch.py | 2 +- GPy/plotting/matplot_dep/base_plots.py | 4 ++-- GPy/plotting/matplot_dep/dim_reduction_plots.py | 4 ++-- GPy/plotting/matplot_dep/kernel_plots.py | 6 +++--- GPy/plotting/matplot_dep/mapping_plots.py | 2 +- GPy/plotting/matplot_dep/models_plots.py | 2 +- GPy/plotting/matplot_dep/priors_plots.py | 2 +- GPy/plotting/matplot_dep/visualize.py | 8 ++++---- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/GPy/models/sparse_gp_minibatch.py b/GPy/models/sparse_gp_minibatch.py index a6081e61..d3bbe5fe 100644 --- a/GPy/models/sparse_gp_minibatch.py +++ b/GPy/models/sparse_gp_minibatch.py @@ -50,7 +50,7 @@ class SparseGPMiniBatch(SparseGP): inference_method = var_dtc.VarDTC(limit=1 if not self.missing_data else Y.shape[1]) else: #inference_method = ?? - raise NotImplementedError, "what to do what to do?" + raise NotImplementedError("what to do what to do?") print("defaulting to ", inference_method, "for latent function inference") self.kl_factr = 1. diff --git a/GPy/plotting/matplot_dep/base_plots.py b/GPy/plotting/matplot_dep/base_plots.py index b4142342..f25aee49 100644 --- a/GPy/plotting/matplot_dep/base_plots.py +++ b/GPy/plotting/matplot_dep/base_plots.py @@ -133,7 +133,7 @@ def x_frame1D(X,plot_limits=None,resolution=None): elif len(plot_limits)==2: xmin, xmax = plot_limits else: - raise ValueError, "Bad limits for plotting" + raise ValueError("Bad limits for plotting") Xnew = np.linspace(xmin,xmax,resolution or 200)[:,None] return Xnew, xmin, xmax @@ -149,7 +149,7 @@ def x_frame2D(X,plot_limits=None,resolution=None): elif len(plot_limits)==2: xmin, xmax = plot_limits else: - raise ValueError, "Bad limits for plotting" + raise ValueError("Bad limits for plotting") resolution = resolution or 50 xx,yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution] diff --git a/GPy/plotting/matplot_dep/dim_reduction_plots.py b/GPy/plotting/matplot_dep/dim_reduction_plots.py index 982f8fa9..2c243e13 100644 --- a/GPy/plotting/matplot_dep/dim_reduction_plots.py +++ b/GPy/plotting/matplot_dep/dim_reduction_plots.py @@ -27,7 +27,7 @@ def most_significant_input_dimensions(model, which_indices): try: input_1, input_2 = np.argsort(model.input_sensitivity())[::-1][:2] except: - raise ValueError, "cannot automatically determine which dimensions to plot, please pass 'which_indices'" + raise ValueError("cannot automatically determine which dimensions to plot, please pass 'which_indices'") else: input_1, input_2 = which_indices return input_1, input_2 @@ -133,7 +133,7 @@ def plot_latent(model, labels=None, which_indices=None, try: xmin, xmax, ymin, ymax = plot_limits except (TypeError, ValueError) as e: - raise e.__class__, "Wrong plot limits: {} given -> need (xmin, xmax, ymin, ymax)".format(plot_limits) + raise e.__class__("Wrong plot limits: {} given -> need (xmin, xmax, ymin, ymax)".format(plot_limits)) view = ImshowController(ax, plot_function, (xmin, ymin, xmax, ymax), resolution, aspect=aspect, interpolation='bilinear', diff --git a/GPy/plotting/matplot_dep/kernel_plots.py b/GPy/plotting/matplot_dep/kernel_plots.py index fc061ca7..aa015009 100644 --- a/GPy/plotting/matplot_dep/kernel_plots.py +++ b/GPy/plotting/matplot_dep/kernel_plots.py @@ -132,7 +132,7 @@ def plot(kernel,x=None, fignum=None, ax=None, title=None, plot_limits=None, reso elif len(plot_limits) == 2: xmin, xmax = plot_limits else: - raise ValueError, "Bad limits for plotting" + raise ValueError("Bad limits for plotting") Xnew = np.linspace(xmin, xmax, resolution or 201)[:, None] Kx = kernel.K(Xnew, x) @@ -154,7 +154,7 @@ def plot(kernel,x=None, fignum=None, ax=None, title=None, plot_limits=None, reso elif len(plot_limits) == 2: xmin, xmax = plot_limits else: - raise ValueError, "Bad limits for plotting" + raise ValueError("Bad limits for plotting") resolution = resolution or 51 xx, yy = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:xmax[1]:1j * resolution] @@ -168,4 +168,4 @@ def plot(kernel,x=None, fignum=None, ax=None, title=None, plot_limits=None, reso ax.set_ylabel("x2") ax.set_title("k(x1,x2 ; %0.1f,%0.1f)" % (x[0, 0], x[0, 1])) else: - raise NotImplementedError, "Cannot plot a kernel with more than two input dimensions" + raise NotImplementedError("Cannot plot a kernel with more than two input dimensions") diff --git a/GPy/plotting/matplot_dep/mapping_plots.py b/GPy/plotting/matplot_dep/mapping_plots.py index 6156687d..53bc1de2 100644 --- a/GPy/plotting/matplot_dep/mapping_plots.py +++ b/GPy/plotting/matplot_dep/mapping_plots.py @@ -81,4 +81,4 @@ def plot_mapping(self, plot_limits=None, which_data='all', which_parts='all', re ax.set_ylim(xmin[1], xmax[1]) else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" + raise NotImplementedError("Cannot define a frame with more than two input dimensions") diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index d2d5a8e2..5cdf69fc 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -175,7 +175,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', plots['inducing_inputs'] = ax.plot(Zu[:,0], Zu[:,1], 'wo') else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" + raise NotImplementedError("Cannot define a frame with more than two input dimensions") return plots def plot_fit_f(model, *args, **kwargs): diff --git a/GPy/plotting/matplot_dep/priors_plots.py b/GPy/plotting/matplot_dep/priors_plots.py index 8f02a03b..39dad631 100644 --- a/GPy/plotting/matplot_dep/priors_plots.py +++ b/GPy/plotting/matplot_dep/priors_plots.py @@ -29,4 +29,4 @@ def plot(prior): pb.contour(xx, yy, zz, linewidths=2) else: - raise NotImplementedError, "Cannot define a frame with more than two input dimensions" + raise NotImplementedError("Cannot define a frame with more than two input dimensions") diff --git a/GPy/plotting/matplot_dep/visualize.py b/GPy/plotting/matplot_dep/visualize.py index 50eb4b82..97f2c88b 100644 --- a/GPy/plotting/matplot_dep/visualize.py +++ b/GPy/plotting/matplot_dep/visualize.py @@ -25,10 +25,10 @@ class data_show: # If no axes are defined, create some. def modify(self, vals): - raise NotImplementedError, "this needs to be implemented to use the data_show class" + raise NotImplementedError("this needs to be implemented to use the data_show class") def close(self): - raise NotImplementedError, "this needs to be implemented to use the data_show class" + raise NotImplementedError("this needs to be implemented to use the data_show class") class vpython_show(data_show): """ @@ -403,7 +403,7 @@ class mocap_data_show_vpython(vpython_show): self.modify_vertices() def process_values(self): - raise NotImplementedError, "this needs to be implemented to use the data_show class" + raise NotImplementedError("this needs to be implemented to use the data_show class") class mocap_data_show(matplotlib_show): """Base class for visualizing motion capture data.""" @@ -455,7 +455,7 @@ class mocap_data_show(matplotlib_show): self.axes.figure.canvas.draw() def process_values(self): - raise NotImplementedError, "this needs to be implemented to use the data_show class" + raise NotImplementedError("this needs to be implemented to use the data_show class") def initialize_axes(self, boundary=0.05): """Set up the axes with the right limits and scaling.""" From c6b43d91da24a3339b7dbb197bc8eb3d4a15cd9f Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 14:29:40 +0000 Subject: [PATCH 19/99] Exception fixes for Python 3 compat --- GPy/testing/kernel_tests.py | 2 +- GPy/util/caching.py | 4 ++-- GPy/util/choleskies.py | 2 +- GPy/util/config.py | 2 +- GPy/util/datasets.py | 2 +- GPy/util/linalg.py | 4 ++-- GPy/util/ln_diff_erfs.py | 2 +- GPy/util/mocap.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index c1bb9265..3b09d6e7 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -282,7 +282,7 @@ class KernelGradientTestsContinuous(unittest.TestCase): try: k.K(self.X) except AssertionError: - raise AssertionError, "k.K(X) should run on self.D-1 dimension" + raise AssertionError("k.K(X) should run on self.D-1 dimension") def test_Matern52(self): k = GPy.kern.Matern52(self.D) diff --git a/GPy/util/caching.py b/GPy/util/caching.py index 16adc320..b1419aec 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -148,10 +148,10 @@ class Cacher(object): return Cacher(self.operation, self.limit, self.ignore_args, self.force_kwargs) def __getstate__(self, memo=None): - raise NotImplementedError, "Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation)) + raise NotImplementedError("Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation))) def __setstate__(self, memo=None): - raise NotImplementedError, "Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation)) + raise NotImplementedError("Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation))) @property def __name__(self): diff --git a/GPy/util/choleskies.py b/GPy/util/choleskies.py index cc3a7f75..606229f7 100644 --- a/GPy/util/choleskies.py +++ b/GPy/util/choleskies.py @@ -10,7 +10,7 @@ def safe_root(N): i = np.sqrt(N) j = int(i) if i != j: - raise ValueError, "N is not square!" + raise ValueError("N is not square!") return j def flat_to_triang(flat): diff --git a/GPy/util/config.py b/GPy/util/config.py index 6dad46c8..8496fe36 100644 --- a/GPy/util/config.py +++ b/GPy/util/config.py @@ -20,4 +20,4 @@ user_file = os.path.join(home,'.gpy_user.cfg') config.readfp(open(default_file)) config.read([local_file, user_file]) if not config: - raise ValueError, "No configuration file found at either " + user_file + " or " + local_file + " or " + default_file + "." + raise ValueError("No configuration file found at either " + user_file + " or " + local_file + " or " + default_file + ".") diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 3ba46f52..2648dd8d 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -785,7 +785,7 @@ def hapmap3(data_set='hapmap3'): from sys import stdout import bz2 except ImportError as i: - raise i, "Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset" + raise i("Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset") dir_path = os.path.join(data_path,'hapmap3') hapmap_file_name = 'hapmap3_r2_b36_fwd.consensus.qc.poly' diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index d7ad5d61..04b341f3 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -91,7 +91,7 @@ def jitchol(A, maxtries=5): else: diagA = np.diag(A) if np.any(diagA <= 0.): - raise linalg.LinAlgError, "not pd: non-positive diagonal elements" + raise linalg.LinAlgError("not pd: non-positive diagonal elements") jitter = diagA.mean() * 1e-6 num_tries = 1 while num_tries <= maxtries and np.isfinite(jitter): @@ -105,7 +105,7 @@ def jitchol(A, maxtries=5): import traceback logging.warning('\n'.join(['Added {} rounds of jitter, jitter of {:.10e}'.format(num_tries-1, jitter), ' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]])) - raise linalg.LinAlgError, "not positive definite, even with jitter." + raise linalg.LinAlgError("not positive definite, even with jitter.") # def dtrtri(L, lower=1): # """ diff --git a/GPy/util/ln_diff_erfs.py b/GPy/util/ln_diff_erfs.py index 582a4585..c1137283 100644 --- a/GPy/util/ln_diff_erfs.py +++ b/GPy/util/ln_diff_erfs.py @@ -35,7 +35,7 @@ def ln_diff_erfs(x1, x2, return_sign=False): elif x2.size==1: v = np.zeros(x1.shape) else: - raise ValueError, "This function does not broadcast unless provided with a scalar." + raise ValueError("This function does not broadcast unless provided with a scalar.") if x1.size == 1: x1 = np.tile(x1, x2.shape) diff --git a/GPy/util/mocap.py b/GPy/util/mocap.py index 58662cf9..bcc3c029 100644 --- a/GPy/util/mocap.py +++ b/GPy/util/mocap.py @@ -174,7 +174,7 @@ class skeleton(tree): return connection def to_xyz(self, channels): - raise NotImplementedError, "this needs to be implemented to use the skeleton class" + raise NotImplementedError("this needs to be implemented to use the skeleton class") def finalize(self): From e5080eb0ad02cbd369a0e16ce3a0acb6200c75d4 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 14:40:58 +0000 Subject: [PATCH 20/99] Added Python 3 progress to README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 9111a48c..dca746bf 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ Work done so far: * Used 2to3 to fix relative imports * Used 2to3 to convert print from statement to function. Some advanced uses of print meant that this could not be done in a way that retained compatibility with old versions of Python. The oldest version of Python that is supported by this version is 2.6 due to the required future imports. +* Used 2to3 to convert exceptions to Python 3 friendly versions. There are a few oustanding string exceptions to take care of that 2to3 doesn't handle. Will need to do these manually ### Citation From 046bd3d9556bab0a9457b89b0e58ee605f344e40 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 26 Feb 2015 16:55:17 +0000 Subject: [PATCH 21/99] Commented out cholupdate since it uses weave and appears not to be used --- GPy/util/linalg.py | 57 +++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 04b341f3..0c4cdc50 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -489,34 +489,35 @@ def symmetrify_numpy(A, upper=False): else: A[triu] = A.T[triu] -def cholupdate(L, x): - """ - update the LOWER cholesky factor of a pd matrix IN PLACE - - if L is the lower chol. of K, then this function computes L\_ - where L\_ is the lower chol of K + x*x^T - - """ - support_code = """ - #include - """ - code = """ - double r,c,s; - int j,i; - for(j=0; j +# """ +# code = """ +# double r,c,s; +# int j,i; +# for(j=0; j Date: Fri, 27 Feb 2015 07:59:54 +0000 Subject: [PATCH 22/99] Fixed ConfigParser for Python 3 compat --- GPy/util/config.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/GPy/util/config.py b/GPy/util/config.py index 8496fe36..312d6991 100644 --- a/GPy/util/config.py +++ b/GPy/util/config.py @@ -1,9 +1,18 @@ # # This loads the configuration # -import ConfigParser import os -config = ConfigParser.ConfigParser() +try: + #Attempt Python 2 ConfigParser setup + import ConfigParser + config = ConfigParser.ConfigParser() +except ImportError: + #Attempt Python 3 ConfigParser setup + import configparser + config = configparser.ConfigParser() + + + # This is the default configuration file that always needs to be present. default_file = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'defaults.cfg')) From 82ea1979720e1f241bb82b3bb862f0fe42bee5fd Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 15:39:10 +0000 Subject: [PATCH 23/99] Put weave import in a try block so it fails gracefullt in Py3 --- GPy/util/linalg.py | 8 +++++++- README.md | 23 ++++++++++++++--------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 0c4cdc50..9fd44a8d 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -6,7 +6,7 @@ # http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot.py import numpy as np -from scipy import linalg, weave +from scipy import linalg import types import ctypes from ctypes import byref, c_char, c_int, c_double # TODO @@ -16,6 +16,12 @@ import os from .config import config import logging +try: + from scipy import weave +except ImportError: + config.set('weave', 'working', 'False') + + _scipyversion = np.float64((scipy.__version__).split('.')[:2]) _fix_dpotri_scipy_bug = True if np.all(_scipyversion >= np.array([0, 14])): diff --git a/README.md b/README.md index dca746bf..17dfc241 100644 --- a/README.md +++ b/README.md @@ -10,15 +10,6 @@ A Gaussian processes framework in Python. Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GPy.png) -### Moving to Python 3 -Work is underway to make GPy run on Python 3. We are not there yet! Changes performed so far have retained compatibility with Python 2.6 and above. - -Work done so far: - -* Used 2to3 to fix relative imports -* Used 2to3 to convert print from statement to function. Some advanced uses of print meant that this could not be done in a way that retained compatibility with old versions of Python. The oldest version of Python that is supported by this version is 2.6 due to the required future imports. -* Used 2to3 to convert exceptions to Python 3 friendly versions. There are a few oustanding string exceptions to take care of that 2to3 doesn't handle. Will need to do these manually - ### Citation @Misc{gpy2014, @@ -119,6 +110,20 @@ or from within IPython import GPy; GPy.tests() +### Moving to Python 3 +Work is underway to make GPy run on Python 3. We are not there yet! Changes performed so far have retained compatibility with Python 2.6 and above. + +Work done so far: + +* Used 2to3 to fix relative imports +* Used 2to3 to convert print from statement to function. Some advanced uses of print meant that this could not be done in a way that retained compatibility with old versions of Python. The oldest version of Python that is supported by this version is 2.6 due to the required future imports. +* Used 2to3 to convert exceptions to Python 3 friendly versions. There are a few oustanding string exceptions to take care of that 2to3 doesn't handle. Will need to do these manually +* Handled the different imports required for ConfigParser/configparser in Py2/Py3 +* In utils/linalg.py: + * Commented out the function cholupdate(L, x) since it doesn't appear to be used. Its definitely not in the tests.s + * Put the import for scipy.weave in a try/except block so that it will gracefully fail in Py3 + + ## Funding Acknowledgements From 34511494273f974275a539c83fb1dede65fb7076 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 15:54:31 +0000 Subject: [PATCH 24/99] Exception raising fix for Python 3 --- GPy/util/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 2648dd8d..8aced11a 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -113,7 +113,7 @@ def download_url(url, store_directory, save_name=None, messages=True, suffix='') os.makedirs(dir_name) try: response = urllib2.urlopen(url+suffix) - except urllib2.URLError, e: + except urllib2.URLError as e: if not hasattr(e, "code"): raise response = e From 7bdb6ee556dcac89cef87d116f61bc46dd192849 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 15:57:57 +0000 Subject: [PATCH 25/99] Fixed cPickle import for Python 3 --- GPy/util/datasets.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 8aced11a..de157364 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -5,7 +5,6 @@ import copy import numpy as np import GPy import scipy.io -import cPickle as pickle import zipfile import tarfile import datetime @@ -20,6 +19,12 @@ try: except ImportError: ipython_available=False +try: + #In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used + #if available + import cPickle as pickle +except ImportError: + import pickle import sys, urllib2 From 9e94830528af69734470dcb943e9ab1a801bc786 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 16:39:31 +0000 Subject: [PATCH 26/99] urllib2 fixes for Py3 compatibility --- GPy/util/datasets.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index de157364..57755ea9 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -10,7 +10,7 @@ import tarfile import datetime import json import re - +import sys from .config import * ipython_available=True @@ -26,7 +26,13 @@ try: except ImportError: import pickle -import sys, urllib2 +#A Python2/3 import handler - urllib2 changed its name in Py3 and was also reorganised +try: + from urllib2 import urlopen + from urllib2 import URLError +except ImportError: + from urllib.request import urlopen + from urllib.error import URLError def reporthook(a,b,c): # ',' at the end of the line is important! @@ -117,8 +123,8 @@ def download_url(url, store_directory, save_name=None, messages=True, suffix='') if not os.path.exists(dir_name): os.makedirs(dir_name) try: - response = urllib2.urlopen(url+suffix) - except urllib2.URLError as e: + response = urlopen(url+suffix) + except URLError as e: if not hasattr(e, "code"): raise response = e @@ -511,7 +517,7 @@ def google_trends(query_terms=['big data', 'machine learning', 'data science'], print("Fetching query:") query = 'http://www.google.com/trends/fetchComponent?q=%s&cid=TIMESERIES_GRAPH_0&export=3' % ",".join(quoted_terms) - data = urllib2.urlopen(query).read() + data = urlopen(query).read() print("Done.") # In the notebook they did some data cleaning: remove Javascript header+footer, and translate new Date(....,..,..) into YYYY-MM-DD. header = """// Data table response\ngoogle.visualization.Query.setResponse(""" From a9559acbd04fe6be253509614ece200a73f2063e Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 16:47:26 +0000 Subject: [PATCH 27/99] Removed import urllib2 since it wasn't being used --- GPy/util/mocap.py | 1 - 1 file changed, 1 deletion(-) diff --git a/GPy/util/mocap.py b/GPy/util/mocap.py index bcc3c029..4f6336c5 100644 --- a/GPy/util/mocap.py +++ b/GPy/util/mocap.py @@ -2,7 +2,6 @@ import os import numpy as np import math from GPy.util import datasets as dat -import urllib2 class vertex: def __init__(self, name, id, parents=[], children=[], meta = {}): From 381d28e6c8c1fdbaf82835106151edc170ae4642 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 16:50:34 +0000 Subject: [PATCH 28/99] Updated Py3 work --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 17dfc241..98613ce5 100644 --- a/README.md +++ b/README.md @@ -122,6 +122,7 @@ Work done so far: * In utils/linalg.py: * Commented out the function cholupdate(L, x) since it doesn't appear to be used. Its definitely not in the tests.s * Put the import for scipy.weave in a try/except block so that it will gracefully fail in Py3 +* Fixed a couple of urllib2 issues - had to be done mannual since 2to3 didn't help From 6554c32d23457c2757aaf6a60284ca52efc54dd6 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 16:53:54 +0000 Subject: [PATCH 29/99] Changed <> to != for Py3 compatibility --- GPy/util/multioutput.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/util/multioutput.py b/GPy/util/multioutput.py index cc9af29e..2233dbb6 100644 --- a/GPy/util/multioutput.py +++ b/GPy/util/multioutput.py @@ -51,7 +51,7 @@ def ICM(input_dim, num_outputs, kernel, W_rank=1,W=None,kappa=None,name='ICM'): :param W_rank: number tuples of the corregionalization parameters 'W' :type W_rank: integer """ - if kernel.input_dim <> input_dim: + if kernel.input_dim != input_dim: kernel.input_dim = input_dim warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.") From 786feded414e7ad00a562890eec930655c65633f Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 17:35:26 +0000 Subject: [PATCH 30/99] Import fix for Py3 --- .../latent_function_inference/__init__.py | 16 ++-- GPy/util/univariate_Gaussian.py | 73 ++++++++++--------- 2 files changed, 45 insertions(+), 44 deletions(-) diff --git a/GPy/inference/latent_function_inference/__init__.py b/GPy/inference/latent_function_inference/__init__.py index 67f57638..2d52369f 100644 --- a/GPy/inference/latent_function_inference/__init__.py +++ b/GPy/inference/latent_function_inference/__init__.py @@ -61,15 +61,15 @@ class InferenceMethodList(LatentFunctionInference, list): for inf in state: self.append(inf) -from exact_gaussian_inference import ExactGaussianInference -from laplace import Laplace +from .exact_gaussian_inference import ExactGaussianInference +from .laplace import Laplace from GPy.inference.latent_function_inference.var_dtc import VarDTC -from expectation_propagation import EP -from expectation_propagation_dtc import EPDTC -from dtc import DTC -from fitc import FITC -from var_dtc_parallel import VarDTC_minibatch -from svgp import SVGP +from .expectation_propagation import EP +from .expectation_propagation_dtc import EPDTC +from .dtc import DTC +from .fitc import FITC +from .var_dtc_parallel import VarDTC_minibatch +from .svgp import SVGP # class FullLatentFunctionData(object): # diff --git a/GPy/util/univariate_Gaussian.py b/GPy/util/univariate_Gaussian.py index 09b2e99c..977eb461 100644 --- a/GPy/util/univariate_Gaussian.py +++ b/GPy/util/univariate_Gaussian.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from scipy import weave +#from scipy import weave def std_norm_pdf(x): """Standard Gaussian density function""" @@ -37,41 +37,42 @@ def std_norm_cdf(x): cdf_x = cdf_x.reshape(x_shape) return cdf_x -def std_norm_cdf_weave(x): - """ - Cumulative standard Gaussian distribution - Based on Abramowitz, M. and Stegun, I. (1970) - - A weave implementation of std_norm_cdf, which is faster. this is unused, - because of the difficulties of a weave dependency. (see github issue #94) - - """ - #Generalize for many x - x = np.asarray(x).copy() - cdf_x = np.zeros_like(x) - N = x.size - support_code = "#include " - code = """ - - double sign, t, erf; - for (int i=0; i Date: Fri, 27 Feb 2015 17:39:15 +0000 Subject: [PATCH 31/99] Import fixes for Py3 --- GPy/inference/latent_function_inference/dtc.py | 2 +- .../latent_function_inference/exact_gaussian_inference.py | 2 +- .../latent_function_inference/expectation_propagation.py | 2 +- .../latent_function_inference/expectation_propagation_dtc.py | 2 +- GPy/inference/latent_function_inference/fitc.py | 2 +- GPy/inference/latent_function_inference/laplace.py | 2 +- GPy/inference/latent_function_inference/svgp.py | 2 +- GPy/inference/latent_function_inference/var_dtc.py | 2 +- GPy/inference/latent_function_inference/var_dtc_parallel.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/GPy/inference/latent_function_inference/dtc.py b/GPy/inference/latent_function_inference/dtc.py index 57a451b2..95600a91 100644 --- a/GPy/inference/latent_function_inference/dtc.py +++ b/GPy/inference/latent_function_inference/dtc.py @@ -1,7 +1,7 @@ # Copyright (c) 2012-2014, James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) -from posterior import Posterior +from .posterior import Posterior from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv import numpy as np from . import LatentFunctionInference diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index 1312d36a..ebf59eaa 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -1,7 +1,7 @@ # Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from posterior import Posterior +from .posterior import Posterior from ...util.linalg import pdinv, dpotrs, tdot from ...util import diag import numpy as np diff --git a/GPy/inference/latent_function_inference/expectation_propagation.py b/GPy/inference/latent_function_inference/expectation_propagation.py index 26144974..4c553145 100644 --- a/GPy/inference/latent_function_inference/expectation_propagation.py +++ b/GPy/inference/latent_function_inference/expectation_propagation.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np from ...util.linalg import pdinv,jitchol,DSYR,tdot,dtrtrs, dpotrs -from posterior import Posterior +from .posterior import Posterior from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) diff --git a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py index e25df388..86dcb691 100644 --- a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py +++ b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py @@ -6,7 +6,7 @@ from ...util import diag from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify, DSYR from ...core.parameterization.variational import VariationalPosterior from . import LatentFunctionInference -from posterior import Posterior +from .posterior import Posterior log_2_pi = np.log(2*np.pi) class EPDTC(LatentFunctionInference): diff --git a/GPy/inference/latent_function_inference/fitc.py b/GPy/inference/latent_function_inference/fitc.py index abe53f3d..7011aef8 100644 --- a/GPy/inference/latent_function_inference/fitc.py +++ b/GPy/inference/latent_function_inference/fitc.py @@ -1,7 +1,7 @@ # Copyright (c) 2012, James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) -from posterior import Posterior +from .posterior import Posterior from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv from ...util import diag import numpy as np diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index 05711b0b..5bbce35c 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -12,7 +12,7 @@ import numpy as np from ...util.linalg import mdot, jitchol, dpotrs, dtrtrs, dpotri, symmetrify, pdinv -from posterior import Posterior +from .posterior import Posterior import warnings def warning_on_one_line(message, category, filename, lineno, file=None, line=None): return ' %s:%s: %s:%s\n' % (filename, lineno, category.__name__, message) diff --git a/GPy/inference/latent_function_inference/svgp.py b/GPy/inference/latent_function_inference/svgp.py index 52db242c..9726335f 100644 --- a/GPy/inference/latent_function_inference/svgp.py +++ b/GPy/inference/latent_function_inference/svgp.py @@ -2,7 +2,7 @@ from . import LatentFunctionInference from ...util import linalg from ...util import choleskies import numpy as np -from posterior import Posterior +from .posterior import Posterior class SVGP(LatentFunctionInference): diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index a878ed18..97d8dfe3 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -1,7 +1,7 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from posterior import Posterior +from .posterior import Posterior from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify from ...util import diag from ...core.parameterization.variational import VariationalPosterior diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index cac69872..cb117af1 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -1,7 +1,7 @@ # Copyright (c) 2014, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from posterior import Posterior +from .posterior import Posterior from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri,pdinv from ...util import diag from ...core.parameterization.variational import VariationalPosterior From 4b7036bdb2e614afe5f0b65dd5e38e064ef38b7a Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 17:45:20 +0000 Subject: [PATCH 32/99] More input fixes --- GPy/inference/optimization/__init__.py | 4 ++-- GPy/inference/optimization/conjugate_gradient_descent.py | 2 +- GPy/inference/optimization/optimization.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/GPy/inference/optimization/__init__.py b/GPy/inference/optimization/__init__.py index 1a8f043b..909f897b 100644 --- a/GPy/inference/optimization/__init__.py +++ b/GPy/inference/optimization/__init__.py @@ -1,2 +1,2 @@ -from scg import SCG -from optimization import * +from .scg import SCG +from .optimization import * diff --git a/GPy/inference/optimization/conjugate_gradient_descent.py b/GPy/inference/optimization/conjugate_gradient_descent.py index 274de784..fc2d8b61 100644 --- a/GPy/inference/optimization/conjugate_gradient_descent.py +++ b/GPy/inference/optimization/conjugate_gradient_descent.py @@ -1,7 +1,7 @@ # Copyright (c) 2012-2014, Max Zwiessele # Licensed under the BSD 3-clause license (see LICENSE.txt) -from gradient_descent_update_rules import FletcherReeves, \ +from .gradient_descent_update_rules import FletcherReeves, \ PolakRibiere from Queue import Empty from multiprocessing import Value diff --git a/GPy/inference/optimization/optimization.py b/GPy/inference/optimization/optimization.py index 600de35d..2179bf5e 100644 --- a/GPy/inference/optimization/optimization.py +++ b/GPy/inference/optimization/optimization.py @@ -10,7 +10,7 @@ try: rasm_available = True except ImportError: rasm_available = False -from scg import SCG +from .scg import SCG class Optimizer(): """ From 7a9203be4d57c58dd3c5d1a9b3f8c02423368763 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 17:50:48 +0000 Subject: [PATCH 33/99] More input fixes --- GPy/inference/mcmc/__init__.py | 2 +- GPy/inference/mcmc/samplers.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/GPy/inference/mcmc/__init__.py b/GPy/inference/mcmc/__init__.py index 956448d4..8f185457 100644 --- a/GPy/inference/mcmc/__init__.py +++ b/GPy/inference/mcmc/__init__.py @@ -1 +1 @@ -from hmc import HMC +from .hmc import HMC diff --git a/GPy/inference/mcmc/samplers.py b/GPy/inference/mcmc/samplers.py index ff396a96..6459e8af 100644 --- a/GPy/inference/mcmc/samplers.py +++ b/GPy/inference/mcmc/samplers.py @@ -9,7 +9,13 @@ import sys import re import numdifftools as ndt import pdb -import cPickle + +try: + #In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used + #if available + import cPickle as pickle +except ImportError: + import pickle class Metropolis_Hastings: From 17f14537a36feea92eb0562bcaf253e97dee94cd Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 17:52:33 +0000 Subject: [PATCH 34/99] Fixed inconsistent tab error --- GPy/core/verbose_optimization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/core/verbose_optimization.py b/GPy/core/verbose_optimization.py index af64d3a8..60d8cba8 100644 --- a/GPy/core/verbose_optimization.py +++ b/GPy/core/verbose_optimization.py @@ -146,5 +146,5 @@ class VerboseOptimization(object): if not self.ipython_notebook: print() print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)) - print('Optimization status: {0:.5g}'.format(self.status)) + print('Optimization status: {0:.5g}'.format(self.status)) print() From e07d554cb5e473143ddb468dcf60f343c08bdd5e Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 17:53:29 +0000 Subject: [PATCH 35/99] Fixed inconsistent tab error --- GPy/core/verbose_optimization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/core/verbose_optimization.py b/GPy/core/verbose_optimization.py index 60d8cba8..4b1d0220 100644 --- a/GPy/core/verbose_optimization.py +++ b/GPy/core/verbose_optimization.py @@ -147,4 +147,4 @@ class VerboseOptimization(object): print() print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)) print('Optimization status: {0:.5g}'.format(self.status)) - print() + print() From 19e9c9e7358f8deb14c2947955bc5fc0e3c3a1cf Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 17:55:58 +0000 Subject: [PATCH 36/99] Import fixes for Py3 --- GPy/kern/_src/ODE_UY.py | 4 ++-- GPy/kern/_src/ODE_UYC.py | 4 ++-- GPy/kern/_src/ODE_st.py | 4 ++-- GPy/kern/_src/ODE_t.py | 4 ++-- GPy/kern/_src/add.py | 14 +++++++------- GPy/kern/_src/brownian.py | 2 +- GPy/kern/_src/coregionalize.py | 2 +- GPy/kern/_src/eq_ode2.py | 2 +- GPy/kern/_src/independent_outputs.py | 2 +- GPy/kern/_src/kern.py | 6 +++--- GPy/kern/_src/linear.py | 2 +- GPy/kern/_src/mlp.py | 2 +- GPy/kern/_src/periodic.py | 2 +- GPy/kern/_src/poly.py | 2 +- GPy/kern/_src/prod.py | 2 +- GPy/kern/_src/rbf.py | 6 +++--- GPy/kern/_src/splitKern.py | 2 +- GPy/kern/_src/static.py | 2 +- GPy/kern/_src/stationary.py | 2 +- GPy/kern/_src/symbolic.py | 2 +- GPy/kern/_src/trunclinear.py | 2 +- 21 files changed, 35 insertions(+), 35 deletions(-) diff --git a/GPy/kern/_src/ODE_UY.py b/GPy/kern/_src/ODE_UY.py index eef8609b..9c9b47be 100644 --- a/GPy/kern/_src/ODE_UY.py +++ b/GPy/kern/_src/ODE_UY.py @@ -1,11 +1,11 @@ # Copyright (c) 2013, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp import numpy as np -from independent_outputs import index_to_slices +from .independent_outputs import index_to_slices class ODE_UY(Kern): def __init__(self, input_dim, variance_U=3., variance_Y=1., lengthscale_U=1., lengthscale_Y=1., active_dims=None, name='ode_uy'): diff --git a/GPy/kern/_src/ODE_UYC.py b/GPy/kern/_src/ODE_UYC.py index 4c39a9c9..ff75a328 100644 --- a/GPy/kern/_src/ODE_UYC.py +++ b/GPy/kern/_src/ODE_UYC.py @@ -1,11 +1,11 @@ # Copyright (c) 2013, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp import numpy as np -from independent_outputs import index_to_slices +from .independent_outputs import index_to_slices class ODE_UYC(Kern): def __init__(self, input_dim, variance_U=3., variance_Y=1., lengthscale_U=1., lengthscale_Y=1., ubias =1. ,active_dims=None, name='ode_uyc'): diff --git a/GPy/kern/_src/ODE_st.py b/GPy/kern/_src/ODE_st.py index 1c3b661b..afa46d09 100644 --- a/GPy/kern/_src/ODE_st.py +++ b/GPy/kern/_src/ODE_st.py @@ -1,10 +1,10 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp import numpy as np -from independent_outputs import index_to_slices +from .independent_outputs import index_to_slices class ODE_st(Kern): diff --git a/GPy/kern/_src/ODE_t.py b/GPy/kern/_src/ODE_t.py index 268917ae..80625f51 100644 --- a/GPy/kern/_src/ODE_t.py +++ b/GPy/kern/_src/ODE_t.py @@ -1,8 +1,8 @@ -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp import numpy as np -from independent_outputs import index_to_slices +from .independent_outputs import index_to_slices class ODE_t(Kern): diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 0f612f5b..17c0027a 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -4,7 +4,7 @@ import numpy as np import itertools from ...util.caching import Cache_this -from kern import CombinationKernel +from .kern import CombinationKernel class Add(CombinationKernel): """ @@ -84,10 +84,10 @@ class Add(CombinationKernel): psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts)) #return psi2 # compute the "cross" terms - from static import White, Bias - from rbf import RBF + from .static import White, Bias + from .rbf import RBF #from rbf_inv import RBFInv - from linear import Linear + from .linear import Linear #ffrom fixed import Fixed for p1, p2 in itertools.combinations(self.parts, 2): @@ -115,7 +115,7 @@ class Add(CombinationKernel): return psi2 def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - from static import White, Bias + from .static import White, Bias for p1 in self.parts: #compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2! eff_dL_dpsi1 = dL_dpsi1.copy() @@ -131,7 +131,7 @@ class Add(CombinationKernel): p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) def gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - from static import White, Bias + from .static import White, Bias target = np.zeros(Z.shape) for p1 in self.parts: #compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2! @@ -149,7 +149,7 @@ class Add(CombinationKernel): return target def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - from static import White, Bias + from .static import White, Bias target_grads = [np.zeros(v.shape) for v in variational_posterior.parameters] for p1 in self.parameters: #compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2! diff --git a/GPy/kern/_src/brownian.py b/GPy/kern/_src/brownian.py index fd79973c..d403fce7 100644 --- a/GPy/kern/_src/brownian.py +++ b/GPy/kern/_src/brownian.py @@ -1,7 +1,7 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp import numpy as np diff --git a/GPy/kern/_src/coregionalize.py b/GPy/kern/_src/coregionalize.py index b6a3aecf..d76dde1f 100644 --- a/GPy/kern/_src/coregionalize.py +++ b/GPy/kern/_src/coregionalize.py @@ -1,7 +1,7 @@ # Copyright (c) 2012, James Hensman and Ricardo Andrade # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern +from .kern import Kern import numpy as np from scipy import weave from ...core.parameterization import Param diff --git a/GPy/kern/_src/eq_ode2.py b/GPy/kern/_src/eq_ode2.py index 59f67b8b..2d42a3e6 100644 --- a/GPy/kern/_src/eq_ode2.py +++ b/GPy/kern/_src/eq_ode2.py @@ -3,7 +3,7 @@ import numpy as np from scipy.special import wofz -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp from ...util.caching import Cache_this diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 21958267..10681d57 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern, CombinationKernel +from .kern import Kern, CombinationKernel import numpy as np import itertools diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 57b2bff5..c4fadd57 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -4,7 +4,7 @@ import sys import numpy as np from ...core.parameterization.parameterized import Parameterized -from kernel_slice_operations import KernCallsViaSlicerMeta +from .kernel_slice_operations import KernCallsViaSlicerMeta from ...util.caching import Cache_this from GPy.core.parameterization.observable_array import ObsAr @@ -178,7 +178,7 @@ class Kern(Parameterized): """ assert isinstance(other, Kern), "only kernels can be added to kernels..." - from add import Add + from .add import Add return Add([self, other], name=name) def __mul__(self, other): @@ -210,7 +210,7 @@ class Kern(Parameterized): """ assert isinstance(other, Kern), "only kernels can be multiplied to kernels..." - from prod import Prod + from .prod import Prod #kernels = [] #if isinstance(self, Prod): kernels.extend(self.parameters) #else: kernels.append(self) diff --git a/GPy/kern/_src/linear.py b/GPy/kern/_src/linear.py index 9d1a956b..e3a45c67 100644 --- a/GPy/kern/_src/linear.py +++ b/GPy/kern/_src/linear.py @@ -3,7 +3,7 @@ import numpy as np -from kern import Kern +from .kern import Kern from ...util.linalg import tdot from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp diff --git a/GPy/kern/_src/mlp.py b/GPy/kern/_src/mlp.py index 16e84363..4488ea82 100644 --- a/GPy/kern/_src/mlp.py +++ b/GPy/kern/_src/mlp.py @@ -1,7 +1,7 @@ # Copyright (c) 2013, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp import numpy as np diff --git a/GPy/kern/_src/periodic.py b/GPy/kern/_src/periodic.py index e8e16506..36fcb596 100644 --- a/GPy/kern/_src/periodic.py +++ b/GPy/kern/_src/periodic.py @@ -3,7 +3,7 @@ import numpy as np -from kern import Kern +from .kern import Kern from ...util.linalg import mdot from ...util.decorators import silence_errors from ...core.parameterization.param import Param diff --git a/GPy/kern/_src/poly.py b/GPy/kern/_src/poly.py index b90e8f8f..a5306c2a 100644 --- a/GPy/kern/_src/poly.py +++ b/GPy/kern/_src/poly.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp class Poly(Kern): diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index bff6d841..84bd1e1d 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from kern import CombinationKernel +from .kern import CombinationKernel from ...util.caching import Cache_this import itertools diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index 0c6a4aef..c6998370 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -3,9 +3,9 @@ import numpy as np -from stationary import Stationary -from psi_comp import PSICOMP_RBF -from psi_comp.rbf_psi_gpucomp import PSICOMP_RBF_GPU +from .stationary import Stationary +from .psi_comp import PSICOMP_RBF +from .psi_comp.rbf_psi_gpucomp import PSICOMP_RBF_GPU from ...util.config import * class RBF(Stationary): diff --git a/GPy/kern/_src/splitKern.py b/GPy/kern/_src/splitKern.py index 27e4f76b..18771cb0 100644 --- a/GPy/kern/_src/splitKern.py +++ b/GPy/kern/_src/splitKern.py @@ -3,7 +3,7 @@ A new kernel """ import numpy as np -from kern import Kern,CombinationKernel +from .kern import Kern,CombinationKernel from .independent_outputs import index_to_slices import itertools diff --git a/GPy/kern/_src/static.py b/GPy/kern/_src/static.py index f4223bf4..77e395fd 100644 --- a/GPy/kern/_src/static.py +++ b/GPy/kern/_src/static.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern +from .kern import Kern import numpy as np from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 0cd85b38..5052b7b0 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp from ...util.linalg import tdot diff --git a/GPy/kern/_src/symbolic.py b/GPy/kern/_src/symbolic.py index 9ca20ea5..c339893a 100644 --- a/GPy/kern/_src/symbolic.py +++ b/GPy/kern/_src/symbolic.py @@ -1,7 +1,7 @@ # Check Matthew Rocklin's blog post. import sympy as sym import numpy as np -from kern import Kern +from .kern import Kern from ...core.symbolic import Symbolic_core diff --git a/GPy/kern/_src/trunclinear.py b/GPy/kern/_src/trunclinear.py index 4ebd51b6..8c48f134 100644 --- a/GPy/kern/_src/trunclinear.py +++ b/GPy/kern/_src/trunclinear.py @@ -3,7 +3,7 @@ import numpy as np -from kern import Kern +from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp from ...util.caching import Cache_this From 7353fd557524fd05ccaca179491cb9ef48597afc Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 18:49:28 +0000 Subject: [PATCH 37/99] More import fixes for Py3 --- GPy/kern/_src/psi_comp/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/GPy/kern/_src/psi_comp/__init__.py b/GPy/kern/_src/psi_comp/__init__.py index 74aacd75..5041da50 100644 --- a/GPy/kern/_src/psi_comp/__init__.py +++ b/GPy/kern/_src/psi_comp/__init__.py @@ -4,10 +4,10 @@ from ....core.parameterization.parameter_core import Pickleable from GPy.util.caching import Cache_this from ....core.parameterization import variational -import rbf_psi_comp -import ssrbf_psi_comp -import sslinear_psi_comp -import linear_psi_comp +from . import rbf_psi_comp +from . import ssrbf_psi_comp +from . import sslinear_psi_comp +from . import linear_psi_comp class PSICOMP_RBF(Pickleable): @Cache_this(limit=2, ignore_args=(0,)) From dce76d3226f71ba8a608594c7b6e57217f310d8a Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 18:53:10 +0000 Subject: [PATCH 38/99] Fix weave import for Py3 --- GPy/kern/_src/coregionalize.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/GPy/kern/_src/coregionalize.py b/GPy/kern/_src/coregionalize.py index d76dde1f..1b16fd73 100644 --- a/GPy/kern/_src/coregionalize.py +++ b/GPy/kern/_src/coregionalize.py @@ -3,11 +3,15 @@ from .kern import Kern import numpy as np -from scipy import weave from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp from ...util.config import config # for assesing whether to use weave +try: + from scipy import weave +except ImportError: + config.set('weave', 'working', 'False') + class Coregionalize(Kern): """ Covariance function for intrinsic/linear coregionalization models From 4c3d68b761cfebc682e3692753a544131e8d6161 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 19:00:55 +0000 Subject: [PATCH 39/99] Fixed tab/space indentation issue --- GPy/core/parameterization/priors.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/GPy/core/parameterization/priors.py b/GPy/core/parameterization/priors.py index edc83c38..298ca2d2 100644 --- a/GPy/core/parameterization/priors.py +++ b/GPy/core/parameterization/priors.py @@ -549,7 +549,7 @@ class DGPLVM(Prior): M_i = np.zeros((self.classnum, self.dim)) for i in cls: # Mean of each class - class_i = cls[i] + class_i = cls[i] M_i[i] = np.mean(class_i, axis=0) return M_i @@ -663,7 +663,7 @@ class DGPLVM(Prior): # Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1)) #Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1) #Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0] - Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0] + Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0] return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw)) # This function calculates derivative of the log of prior function @@ -684,7 +684,7 @@ class DGPLVM(Prior): # Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1)) #Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1) #Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0] - Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0] + Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0] Sb_inv_N_trans = np.transpose(Sb_inv_N) Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans Sw_trans = np.transpose(Sw) @@ -742,7 +742,7 @@ class DGPLVM_T(Prior): self.datanum = lbl.shape[0] self.x_shape = x_shape self.dim = x_shape[1] - self.vec = vec + self.vec = vec def get_class_label(self, y): @@ -768,7 +768,7 @@ class DGPLVM_T(Prior): M_i = np.zeros((self.classnum, self.dim)) for i in cls: # Mean of each class - class_i = np.multiply(cls[i],vec) + class_i = np.multiply(cls[i],vec) M_i[i] = np.mean(class_i, axis=0) return M_i @@ -883,7 +883,7 @@ class DGPLVM_T(Prior): #Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1) #print 'SB_inv: ', Sb_inv_N #Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0] - Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0] + Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0] return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw)) # This function calculates derivative of the log of prior function @@ -905,7 +905,7 @@ class DGPLVM_T(Prior): #Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1) #print 'SB_inv: ',Sb_inv_N #Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0] - Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0] + Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0] Sb_inv_N_trans = np.transpose(Sb_inv_N) Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans Sw_trans = np.transpose(Sw) From 09c93e62d05e8482e3be863731c1e24679f34742 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 19:03:45 +0000 Subject: [PATCH 40/99] Print fixes for Python 3 --- GPy/testing/examples_tests.py | 26 ++++---- GPy/testing/index_operations_tests.py | 4 +- GPy/testing/kernel_tests.py | 26 ++++---- GPy/testing/likelihood_tests.py | 96 +++++++++++++-------------- GPy/testing/model_tests.py | 16 ++--- GPy/testing/mpi_tests.py | 2 +- GPy/testing/parameterized_tests.py | 8 +-- GPy/testing/prior_tests.py | 2 +- 8 files changed, 90 insertions(+), 90 deletions(-) diff --git a/GPy/testing/examples_tests.py b/GPy/testing/examples_tests.py index be26fff6..48a18119 100644 --- a/GPy/testing/examples_tests.py +++ b/GPy/testing/examples_tests.py @@ -46,20 +46,20 @@ def test_models(): for loader, module_name, is_pkg in pkgutil.iter_modules([examples_path]): # Load examples module_examples = loader.find_module(module_name).load_module(module_name) - print "MODULE", module_examples - print "Before" - print inspect.getmembers(module_examples, predicate=inspect.isfunction) + print("MODULE", module_examples) + print("Before") + print(inspect.getmembers(module_examples, predicate=inspect.isfunction)) functions = [ func for func in inspect.getmembers(module_examples, predicate=inspect.isfunction) if func[0].startswith('_') is False ][::-1] - print "After" - print functions + print("After") + print(functions) for example in functions: if example[0] in ['epomeo_gpx']: #These are the edge cases that we might want to handle specially if example[0] == 'epomeo_gpx' and not GPy.util.datasets.gpxpy_available: - print "Skipping as gpxpy is not available to parse GPS" + print("Skipping as gpxpy is not available to parse GPS") continue - print "Testing example: ", example[0] + print("Testing example: ", example[0]) # Generate model try: @@ -69,7 +69,7 @@ def test_models(): except Exception as e: failing_models[example[0]] = "Cannot make model: \n{e}".format(e=e) else: - print models + print(models) model_checkgrads.description = 'test_checkgrads_%s' % example[0] try: for model in models: @@ -89,17 +89,17 @@ def test_models(): #yield model_checkgrads, model #yield model_instance, model - print "Finished checking module {m}".format(m=module_name) + print("Finished checking module {m}".format(m=module_name)) if len(failing_models.keys()) > 0: - print "Failing models: " - print failing_models + print("Failing models: ") + print(failing_models) if len(failing_models.keys()) > 0: - print failing_models + print(failing_models) raise Exception(failing_models) if __name__ == "__main__": - print "Running unit tests, please be (very) patient..." + print("Running unit tests, please be (very) patient...") # unittest.main() test_models() diff --git a/GPy/testing/index_operations_tests.py b/GPy/testing/index_operations_tests.py index e5c2011a..e2895cd2 100644 --- a/GPy/testing/index_operations_tests.py +++ b/GPy/testing/index_operations_tests.py @@ -127,8 +127,8 @@ class Test(unittest.TestCase): self.assertEqual(self.view.size, 5) def test_print(self): - print self.param_index - print self.view + print(self.param_index) + print(self.view) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.test_index_view'] diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 3b09d6e7..771028f0 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -37,7 +37,7 @@ class Kern_check_model(GPy.core.Model): def is_positive_semi_definite(self): v = np.linalg.eig(self.kernel.K(self.X))[0] if any(v.real<=-1e-10): - print v.real.min() + print(v.real.min()) return False else: return True @@ -126,7 +126,7 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb if result and verbose: print("Check passed.") if not result: - print("Positive definite check failed for " + kern.name + " covariance function.") + print(("Positive definite check failed for " + kern.name + " covariance function.")) pass_checks = False assert(result) return False @@ -137,7 +137,7 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb if result and verbose: print("Check passed.") if not result: - print("Gradient of K(X, X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:") + print(("Gradient of K(X, X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:")) Kern_check_dK_dtheta(kern, X=X, X2=None).checkgrad(verbose=True) pass_checks = False assert(result) @@ -149,7 +149,7 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb if result and verbose: print("Check passed.") if not result: - print("Gradient of K(X, X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:") + print(("Gradient of K(X, X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:")) Kern_check_dK_dtheta(kern, X=X, X2=X2).checkgrad(verbose=True) pass_checks = False assert(result) @@ -162,11 +162,11 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb except NotImplementedError: result=True if verbose: - print("update_gradients_diag not implemented for " + kern.name) + print(("update_gradients_diag not implemented for " + kern.name)) if result and verbose: print("Check passed.") if not result: - print("Gradient of Kdiag(X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:") + print(("Gradient of Kdiag(X) wrt theta failed for " + kern.name + " covariance function. Gradient values as follows:")) Kern_check_dKdiag_dtheta(kern, X=X).checkgrad(verbose=True) pass_checks = False assert(result) @@ -182,11 +182,11 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb except NotImplementedError: result=True if verbose: - print("gradients_X not implemented for " + kern.name) + print(("gradients_X not implemented for " + kern.name)) if result and verbose: print("Check passed.") if not result: - print("Gradient of K(X, X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") + print(("Gradient of K(X, X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:")) testmodel.checkgrad(verbose=True) import ipdb;ipdb.set_trace() assert(result) @@ -203,11 +203,11 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb except NotImplementedError: result=True if verbose: - print("gradients_X not implemented for " + kern.name) + print(("gradients_X not implemented for " + kern.name)) if result and verbose: print("Check passed.") if not result: - print("Gradient of K(X, X2) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") + print(("Gradient of K(X, X2) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:")) testmodel.checkgrad(verbose=True) assert(result) pass_checks = False @@ -223,11 +223,11 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb except NotImplementedError: result=True if verbose: - print("gradients_X not implemented for " + kern.name) + print(("gradients_X not implemented for " + kern.name)) if result and verbose: print("Check passed.") if not result: - print("Gradient of Kdiag(X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:") + print(("Gradient of Kdiag(X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:")) Kern_check_dKdiag_dX(kern, X=X).checkgrad(verbose=True) pass_checks = False assert(result) @@ -404,7 +404,7 @@ class Coregionalize_weave_test(unittest.TestCase): if __name__ == "__main__": - print "Running unit tests, please be (very) patient..." + print("Running unit tests, please be (very) patient...") unittest.main() # np.random.seed(0) # N0 = 3 diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index 95929098..5feeffa4 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -44,8 +44,8 @@ def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None, The number of parameters and N is the number of data Need to take a slice out from f and a slice out of df """ - print "\n{} likelihood: {} vs {}".format(func.im_self.__class__.__name__, - func.__name__, dfunc.__name__) + print("\n{} likelihood: {} vs {}".format(func.im_self.__class__.__name__, + func.__name__, dfunc.__name__)) partial_f = dparam_partial(func, *args) partial_df = dparam_partial(dfunc, *args) gradchecking = True @@ -57,7 +57,7 @@ def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None, for fixed_val in range(dfnum): #dlik and dlik_dvar gives back 1 value for each f_ind = min(fnum, fixed_val+1) - 1 - print "fnum: {} dfnum: {} f_ind: {} fixed_val: {}".format(fnum, dfnum, f_ind, fixed_val) + print("fnum: {} dfnum: {} f_ind: {} fixed_val: {}".format(fnum, dfnum, f_ind, fixed_val)) #Make grad checker with this param moving, note that set_params is NOT being called #The parameter is being set directly with __setattr__ #Check only the parameter and function value we wish to check at a time @@ -70,12 +70,12 @@ def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None, if grad.grep_param_names(constrain_param): constraint(constrain_param, grad) else: - print "parameter didn't exist" - print constrain_param, " ", constraint + print("parameter didn't exist") + print(constrain_param, " ", constraint) if randomize: grad.randomize() if verbose: - print grad + print(grad) grad.checkgrad(verbose=1) if not grad.checkgrad(verbose=True): gradchecking = False @@ -350,8 +350,8 @@ class TestNoiseModels(object): ############# @with_setup(setUp, tearDown) def t_logpdf(self, model, Y, f): - print "\n{}".format(inspect.stack()[0][3]) - print model + print("\n{}".format(inspect.stack()[0][3])) + print(model) #print model._get_params() np.testing.assert_almost_equal( model.pdf(f.copy(), Y.copy()).prod(), @@ -360,33 +360,33 @@ class TestNoiseModels(object): @with_setup(setUp, tearDown) def t_dlogpdf_df(self, model, Y, f): - print "\n{}".format(inspect.stack()[0][3]) + print("\n{}".format(inspect.stack()[0][3])) self.description = "\n{}".format(inspect.stack()[0][3]) logpdf = functools.partial(model.logpdf, y=Y) dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y) grad = GradientChecker(logpdf, dlogpdf_df, f.copy(), 'g') grad.randomize() - print model + print(model) assert grad.checkgrad(verbose=1) @with_setup(setUp, tearDown) def t_d2logpdf_df2(self, model, Y, f): - print "\n{}".format(inspect.stack()[0][3]) + print("\n{}".format(inspect.stack()[0][3])) dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y) d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y) grad = GradientChecker(dlogpdf_df, d2logpdf_df2, f.copy(), 'g') grad.randomize() - print model + print(model) assert grad.checkgrad(verbose=1) @with_setup(setUp, tearDown) def t_d3logpdf_df3(self, model, Y, f): - print "\n{}".format(inspect.stack()[0][3]) + print("\n{}".format(inspect.stack()[0][3])) d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y) d3logpdf_df3 = functools.partial(model.d3logpdf_df3, y=Y) grad = GradientChecker(d2logpdf_df2, d3logpdf_df3, f.copy(), 'g') grad.randomize() - print model + print(model) assert grad.checkgrad(verbose=1) ############## @@ -394,8 +394,8 @@ class TestNoiseModels(object): ############## @with_setup(setUp, tearDown) def t_dlogpdf_dparams(self, model, Y, f, params, params_names, param_constraints): - print "\n{}".format(inspect.stack()[0][3]) - print model + print("\n{}".format(inspect.stack()[0][3])) + print(model) assert ( dparam_checkgrad(model.logpdf, model.dlogpdf_dtheta, params, params_names, args=(f, Y), constraints=param_constraints, @@ -404,8 +404,8 @@ class TestNoiseModels(object): @with_setup(setUp, tearDown) def t_dlogpdf_df_dparams(self, model, Y, f, params, params_names, param_constraints): - print "\n{}".format(inspect.stack()[0][3]) - print model + print("\n{}".format(inspect.stack()[0][3])) + print(model) assert ( dparam_checkgrad(model.dlogpdf_df, model.dlogpdf_df_dtheta, params, params_names, args=(f, Y), constraints=param_constraints, @@ -414,8 +414,8 @@ class TestNoiseModels(object): @with_setup(setUp, tearDown) def t_d2logpdf2_df2_dparams(self, model, Y, f, params, params_names, param_constraints): - print "\n{}".format(inspect.stack()[0][3]) - print model + print("\n{}".format(inspect.stack()[0][3])) + print(model) assert ( dparam_checkgrad(model.d2logpdf_df2, model.d2logpdf_df2_dtheta, params, params_names, args=(f, Y), constraints=param_constraints, @@ -427,7 +427,7 @@ class TestNoiseModels(object): ################ @with_setup(setUp, tearDown) def t_dlogpdf_dlink(self, model, Y, f, link_f_constraints): - print "\n{}".format(inspect.stack()[0][3]) + print("\n{}".format(inspect.stack()[0][3])) logpdf = functools.partial(model.logpdf_link, y=Y) dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y) grad = GradientChecker(logpdf, dlogpdf_dlink, f.copy(), 'g') @@ -437,13 +437,13 @@ class TestNoiseModels(object): constraint('g', grad) grad.randomize() - print grad - print model + print(grad) + print(model) assert grad.checkgrad(verbose=1) @with_setup(setUp, tearDown) def t_d2logpdf_dlink2(self, model, Y, f, link_f_constraints): - print "\n{}".format(inspect.stack()[0][3]) + print("\n{}".format(inspect.stack()[0][3])) dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y) d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y) grad = GradientChecker(dlogpdf_dlink, d2logpdf_dlink2, f.copy(), 'g') @@ -453,13 +453,13 @@ class TestNoiseModels(object): constraint('g', grad) grad.randomize() - print grad - print model + print(grad) + print(model) assert grad.checkgrad(verbose=1) @with_setup(setUp, tearDown) def t_d3logpdf_dlink3(self, model, Y, f, link_f_constraints): - print "\n{}".format(inspect.stack()[0][3]) + print("\n{}".format(inspect.stack()[0][3])) d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y) d3logpdf_dlink3 = functools.partial(model.d3logpdf_dlink3, y=Y) grad = GradientChecker(d2logpdf_dlink2, d3logpdf_dlink3, f.copy(), 'g') @@ -469,8 +469,8 @@ class TestNoiseModels(object): constraint('g', grad) grad.randomize() - print grad - print model + print(grad) + print(model) assert grad.checkgrad(verbose=1) ################# @@ -478,8 +478,8 @@ class TestNoiseModels(object): ################# @with_setup(setUp, tearDown) def t_dlogpdf_link_dparams(self, model, Y, f, params, param_names, param_constraints): - print "\n{}".format(inspect.stack()[0][3]) - print model + print("\n{}".format(inspect.stack()[0][3])) + print(model) assert ( dparam_checkgrad(model.logpdf_link, model.dlogpdf_link_dtheta, params, param_names, args=(f, Y), constraints=param_constraints, @@ -488,8 +488,8 @@ class TestNoiseModels(object): @with_setup(setUp, tearDown) def t_dlogpdf_dlink_dparams(self, model, Y, f, params, param_names, param_constraints): - print "\n{}".format(inspect.stack()[0][3]) - print model + print("\n{}".format(inspect.stack()[0][3])) + print(model) assert ( dparam_checkgrad(model.dlogpdf_dlink, model.dlogpdf_dlink_dtheta, params, param_names, args=(f, Y), constraints=param_constraints, @@ -498,8 +498,8 @@ class TestNoiseModels(object): @with_setup(setUp, tearDown) def t_d2logpdf2_dlink2_dparams(self, model, Y, f, params, param_names, param_constraints): - print "\n{}".format(inspect.stack()[0][3]) - print model + print("\n{}".format(inspect.stack()[0][3])) + print(model) assert ( dparam_checkgrad(model.d2logpdf_dlink2, model.d2logpdf_dlink2_dtheta, params, param_names, args=(f, Y), constraints=param_constraints, @@ -511,7 +511,7 @@ class TestNoiseModels(object): ################ @with_setup(setUp, tearDown) def t_laplace_fit_rbf_white(self, model, X, Y, f, step, param_vals, param_names, constraints): - print "\n{}".format(inspect.stack()[0][3]) + print("\n{}".format(inspect.stack()[0][3])) #Normalize Y = Y/Y.max() white_var = 1e-6 @@ -524,7 +524,7 @@ class TestNoiseModels(object): for constrain_param, constraint in constraints: constraint(constrain_param, m) - print m + print(m) m.randomize() #Set params @@ -533,7 +533,7 @@ class TestNoiseModels(object): m[name] = param_vals[param_num] #m.optimize(max_iters=8) - print m + print(m) #if not m.checkgrad(step=step): #m.checkgrad(verbose=1, step=step) #NOTE this test appears to be stochastic for some likelihoods (student t?) @@ -546,7 +546,7 @@ class TestNoiseModels(object): ########### @with_setup(setUp, tearDown) def t_ep_fit_rbf_white(self, model, X, Y, f, step, param_vals, param_names, constraints): - print "\n{}".format(inspect.stack()[0][3]) + print("\n{}".format(inspect.stack()[0][3])) #Normalize Y = Y/Y.max() white_var = 1e-6 @@ -561,7 +561,7 @@ class TestNoiseModels(object): constraints[param_num](name, m) m.randomize() - print m + print(m) assert m.checkgrad(verbose=1, step=step) @@ -598,7 +598,7 @@ class LaplaceTests(unittest.TestCase): self.X = None def test_gaussian_d2logpdf_df2_2(self): - print "\n{}".format(inspect.stack()[0][3]) + print("\n{}".format(inspect.stack()[0][3])) self.Y = None self.N = 2 @@ -648,16 +648,16 @@ class LaplaceTests(unittest.TestCase): m2.randomize() if debug: - print m1 - print m2 + print(m1) + print(m2) optimizer = 'scg' - print "Gaussian" + print("Gaussian") m1.optimize(optimizer, messages=debug) - print "Laplace Gaussian" + print("Laplace Gaussian") m2.optimize(optimizer, messages=debug) if debug: - print m1 - print m2 + print(m1) + print(m2) m2[:] = m1[:] @@ -706,5 +706,5 @@ class LaplaceTests(unittest.TestCase): self.assertTrue(m2.checkgrad(verbose=True)) if __name__ == "__main__": - print "Running unit tests" + print("Running unit tests") unittest.main() diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 559014f7..f9ff6402 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -153,19 +153,19 @@ class MiscTests(unittest.TestCase): def test_big_model(self): m = GPy.examples.dimensionality_reduction.mrd_simulation(optimize=0, plot=0, plot_sim=0) m.X.fix() - print m + print(m) m.unfix() m.checkgrad() - print m + print(m) m.fix() - print m + print(m) m.inducing_inputs.unfix() - print m + print(m) m.checkgrad() m.unfix() m.checkgrad() m.checkgrad() - print m + print(m) def test_model_set_params(self): m = GPy.models.GPRegression(self.X, self.Y) @@ -176,7 +176,7 @@ class MiscTests(unittest.TestCase): m['.*var'] -= .1 np.testing.assert_equal(m.kern.lengthscale, lengthscale) m.optimize() - print m + print(m) def test_model_updates(self): Y1 = np.random.normal(0, 1, (40, 13)) @@ -201,7 +201,7 @@ class MiscTests(unittest.TestCase): Y = np.sin(X) + np.random.randn(20, 1) * 0.05 m = GPy.models.GPRegression(X, Y) m.optimize() - print m + print(m) class GradientTests(np.testing.TestCase): def setUp(self): @@ -523,5 +523,5 @@ class GradientTests(np.testing.TestCase): if __name__ == "__main__": - print "Running unit tests, please be (very) patient..." + print("Running unit tests, please be (very) patient...") unittest.main() diff --git a/GPy/testing/mpi_tests.py b/GPy/testing/mpi_tests.py index 5c489032..28a23288 100644 --- a/GPy/testing/mpi_tests.py +++ b/GPy/testing/mpi_tests.py @@ -84,7 +84,7 @@ except: if __name__ == "__main__": - print "Running unit tests, please be (very) patient..." + print("Running unit tests, please be (very) patient...") try: import mpi4py unittest.main() diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 7c4f4ce2..431d535b 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -240,7 +240,7 @@ class ParameterizedTest(unittest.TestCase): self.p2.constrain_positive() m = TestLikelihood() - print m + print(m) val = m.p1.values.copy() self.assert_(m.p1.is_fixed) self.assert_(m.constraints[GPy.constraints.Logexp()].tolist(), [1]) @@ -248,9 +248,9 @@ class ParameterizedTest(unittest.TestCase): self.assertEqual(m.p1, val) def test_printing(self): - print self.test1 - print self.param - print self.test1[''] + print(self.test1) + print(self.param) + print(self.test1['']) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.test_add_parameter'] diff --git a/GPy/testing/prior_tests.py b/GPy/testing/prior_tests.py index 6a61fbb5..ca03ad93 100644 --- a/GPy/testing/prior_tests.py +++ b/GPy/testing/prior_tests.py @@ -110,5 +110,5 @@ class PriorTests(unittest.TestCase): if __name__ == "__main__": - print "Running unit tests, please be (very) patient..." + print("Running unit tests, please be (very) patient...") unittest.main() From 300bdb960bd9099e94580cbc2a739011b703d626 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 19:04:25 +0000 Subject: [PATCH 41/99] Print fixes for Python 3 --- GPy/plotting/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/plotting/__init__.py b/GPy/plotting/__init__.py index 652bc628..9dd84441 100644 --- a/GPy/plotting/__init__.py +++ b/GPy/plotting/__init__.py @@ -4,4 +4,4 @@ try: from . import matplot_dep except (ImportError, NameError): - print 'Fail to load GPy.plotting.matplot_dep.' \ No newline at end of file + print('Fail to load GPy.plotting.matplot_dep.') \ No newline at end of file From b4ad1b2d733f40a1584abd28c9dc67e8743016e4 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 19:32:38 +0000 Subject: [PATCH 42/99] Python3 compatbility fixes --- GPy/core/parameterization/index_operations.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index e5273e55..01a13c8b 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -66,7 +66,11 @@ class ParameterIndexOperations(object): self.add(t, i) def iteritems(self): - return self._properties.iteritems() + try: + return self._properties.iteritems() + except AttributeError: + #Changed this from iteritems to items for Py3 compatibility. It didn't break the test suite. + return self._properties.items() def items(self): return self._properties.items() @@ -101,7 +105,11 @@ class ParameterIndexOperations(object): return reduce(lambda a,b: a+b.size, self.iterindices(), 0) def iterindices(self): - return self._properties.itervalues() + try: + return self._properties.itervalues() + except AttributeError: + #Changed this from itervalues to values for Py3 compatibility. It didn't break the test suite. + return self._properties.values() def indices(self): return self._properties.values() From 58225c018828cc6afa01f7f968eb085701e6ea1e Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 27 Feb 2015 19:36:56 +0000 Subject: [PATCH 43/99] import reduce from functools for py3 compatibility --- GPy/core/parameterization/parameter_core.py | 1 + 1 file changed, 1 insertion(+) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 02cb0a12..b38d9678 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -18,6 +18,7 @@ import numpy as np import re import logging from .updateable import Updateable +from functools import reduce class HierarchyError(Exception): """ From 1c6cfe2d81437d264ee72871ebf732dfafe7ceb3 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 11:54:11 +0000 Subject: [PATCH 44/99] Changed metaclass syntax to be Py3 compatible. This breaks Py2 compatibility --- GPy/core/parameterization/parameterized.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 62914636..db945016 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -27,7 +27,7 @@ class ParametersChangedMeta(type): self.parameters_changed() return self -class Parameterized(Parameterizable): +class Parameterized(Parameterizable,metaclass=ParametersChangedMeta): """ Parameterized class @@ -73,6 +73,7 @@ class Parameterized(Parameterizable): # Metaclass for parameters changed after init. # This makes sure, that parameters changed will always be called after __init__ # **Never** call parameters_changed() yourself + #This is ignored in Python 3 -- you need to put the meta class in the __metaclass__ = ParametersChangedMeta #=========================================================================== def __init__(self, name=None, parameters=[], *a, **kw): From 6b1e20027a8f17e4be97394e9c608686628c677e Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 12:20:41 +0000 Subject: [PATCH 45/99] reduce fix for Python 3 --- GPy/util/caching.py | 1 + 1 file changed, 1 insertion(+) diff --git a/GPy/util/caching.py b/GPy/util/caching.py index b1419aec..196ce343 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -2,6 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) from ..core.parameterization.observable import Observable import collections, weakref +from functools import reduce class Cacher(object): def __init__(self, operation, limit=5, ignore_args=(), force_kwargs=()): From 8c552c2509a89bdc198e70a90c8db1d52dc5d78b Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 12:39:39 +0000 Subject: [PATCH 46/99] Fixed string encoding for Python 3 --- GPy/util/linalg.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 9fd44a8d..88ecf011 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -353,11 +353,11 @@ def tdot_blas(mat, out=None): # of C order. However, I tried that and had errors with large matrices: # http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot_broken.py mat = np.asfortranarray(mat) - TRANS = c_char('n') + TRANS = c_char('n'.encode('ascii')) N = c_int(mat.shape[0]) K = c_int(mat.shape[1]) LDA = c_int(mat.shape[0]) - UPLO = c_char('l') + UPLO = c_char('l'.encode('ascii')) ALPHA = c_double(1.0) A = mat.ctypes.data_as(ctypes.c_void_p) BETA = c_double(0.0) From 79f4b26f4d6bec363c2cbe3857ed6844193c7501 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 12:41:23 +0000 Subject: [PATCH 47/99] Fixed integer division for Python 3 compat --- GPy/util/linalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 88ecf011..7f1a28f3 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -362,7 +362,7 @@ def tdot_blas(mat, out=None): A = mat.ctypes.data_as(ctypes.c_void_p) BETA = c_double(0.0) C = out.ctypes.data_as(ctypes.c_void_p) - LDC = c_int(np.max(out.strides) / 8) + LDC = c_int(np.max(out.strides) // 8) dsyrk(byref(UPLO), byref(TRANS), byref(N), byref(K), byref(ALPHA), A, byref(LDA), byref(BETA), C, byref(LDC)) From b4a3253e26926125896e9208d05e9cc04f316884 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 18:22:44 +0000 Subject: [PATCH 48/99] Ensure that object.__new__ never gets called with arguments --- GPy/core/parameterization/priors.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/GPy/core/parameterization/priors.py b/GPy/core/parameterization/priors.py index 298ca2d2..432e2473 100644 --- a/GPy/core/parameterization/priors.py +++ b/GPy/core/parameterization/priors.py @@ -15,8 +15,12 @@ class Prior(object): _instance = None def __new__(cls, *args, **kwargs): if not cls._instance or cls._instance.__class__ is not cls: - cls._instance = super(Prior, cls).__new__(cls, *args, **kwargs) - return cls._instance + newfunc = super(Prior, cls).__new__ + if newfunc is object.__new__: + cls._instance = newfunc(cls) + else: + cls._instance = newfunc(cls, *args, **kwargs) + return cls._instance def pdf(self, x): return np.exp(self.lnpdf(x)) @@ -52,7 +56,11 @@ class Gaussian(Prior): for instance in cls._instances: if instance().mu == mu and instance().sigma == sigma: return instance() - o = super(Prior, cls).__new__(cls, mu, sigma) + newfunc = super(Prior, cls).__new__ + if newfunc is object.__new__: + o = newfunc(cls) + else: + o = newfunc(cls, mu, sigma) cls._instances.append(weakref.ref(o)) return cls._instances[-1]() From a6e28205e11df95348d148b22af9550f5381eee2 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 18:36:52 +0000 Subject: [PATCH 49/99] Ensure that object.__new__ never gets called with arguments --- GPy/core/parameterization/priors.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/GPy/core/parameterization/priors.py b/GPy/core/parameterization/priors.py index 432e2473..6c7f655f 100644 --- a/GPy/core/parameterization/priors.py +++ b/GPy/core/parameterization/priors.py @@ -148,7 +148,11 @@ class LogGaussian(Gaussian): for instance in cls._instances: if instance().mu == mu and instance().sigma == sigma: return instance() - o = super(Prior, cls).__new__(cls, mu, sigma) + newfunc = super(Prior, cls).__new__ + if newfunc is object.__new__: + o = newfunc(cls) + else: + o = newfunc(cls, mu, sigma) cls._instances.append(weakref.ref(o)) return cls._instances[-1]() From 1c6796e73d6c0bf2a56c9d679e5f01cd38aec7f7 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 18:40:25 +0000 Subject: [PATCH 50/99] import reduce from functools for Py3 compat --- GPy/core/parameterization/index_operations.py | 1 + 1 file changed, 1 insertion(+) diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index 01a13c8b..1e97f488 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -4,6 +4,7 @@ import numpy from numpy.lib.function_base import vectorize from .lists_and_dicts import IntArrayDict +from functools import reduce def extract_properties_to_index(index, props): prop_index = dict() From 358488cf5d05e64ddc07c38c7e125aab01548220 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 18:50:58 +0000 Subject: [PATCH 51/99] Ensure that object.__new__ never gets called with arguments --- GPy/core/parameterization/priors.py | 6 +++++- GPy/core/parameterization/transformations.py | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/GPy/core/parameterization/priors.py b/GPy/core/parameterization/priors.py index 6c7f655f..a4bbecb3 100644 --- a/GPy/core/parameterization/priors.py +++ b/GPy/core/parameterization/priors.py @@ -270,7 +270,11 @@ class Gamma(Prior): for instance in cls._instances: if instance().a == a and instance().b == b: return instance() - o = super(Prior, cls).__new__(cls, a, b) + newfunc = super(Prior, cls).__new__ + if newfunc is object.__new__: + o = newfunc(cls) + else: + o = newfunc(cls, a, b) cls._instances.append(weakref.ref(o)) return cls._instances[-1]() diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py index 05051c92..7e15cee9 100644 --- a/GPy/core/parameterization/transformations.py +++ b/GPy/core/parameterization/transformations.py @@ -468,7 +468,11 @@ class Logistic(Transformation): for instance in cls._instances: if instance().lower == lower and instance().upper == upper: return instance() - o = super(Transformation, cls).__new__(cls, lower, upper, *args, **kwargs) + newfunc = super(Transformation, cls).__new__ + if newfunc is object.__new__: + o = newfunc(cls) + else: + o = newfunc(cls, lower, upper, *args, **kwargs) cls._instances.append(weakref.ref(o)) return cls._instances[-1]() def __init__(self, lower, upper): From e0d0f2e633c18dd1e6ddc324957624105c1128d3 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 18:59:14 +0000 Subject: [PATCH 52/99] from functools import reduce for Py3 compat --- GPy/kern/_src/add.py | 1 + GPy/kern/_src/kern.py | 1 + GPy/testing/parameterized_tests.py | 1 + GPy/testing/pickle_tests.py | 1 + 4 files changed, 4 insertions(+) diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 17c0027a..82c84c52 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -5,6 +5,7 @@ import numpy as np import itertools from ...util.caching import Cache_this from .kern import CombinationKernel +from functools import reduce class Add(CombinationKernel): """ diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index c4fadd57..6ccd315b 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -7,6 +7,7 @@ from ...core.parameterization.parameterized import Parameterized from .kernel_slice_operations import KernCallsViaSlicerMeta from ...util.caching import Cache_this from GPy.core.parameterization.observable_array import ObsAr +from functools import reduce diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 431d535b..1ab0fd32 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -12,6 +12,7 @@ from GPy.core.parameterization.transformations import NegativeLogexp, Logistic from GPy.core.parameterization.parameterized import Parameterized from GPy.core.parameterization.param import Param from GPy.core.parameterization.index_operations import ParameterIndexOperations +from functools import reduce class ArrayCoreTest(unittest.TestCase): def setUp(self): diff --git a/GPy/testing/pickle_tests.py b/GPy/testing/pickle_tests.py index c79e9914..777d0d6b 100644 --- a/GPy/testing/pickle_tests.py +++ b/GPy/testing/pickle_tests.py @@ -19,6 +19,7 @@ from GPy.kern._src.static import Bias, White from GPy.examples.dimensionality_reduction import mrd_simulation from GPy.core.parameterization.variational import NormalPosterior from GPy.models.gp_regression import GPRegression +from functools import reduce def toy_model(): X = np.linspace(0,1,50)[:, None] From 40f5f4b865d961e2a9449d976371ebc0d556f94f Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 19:08:01 +0000 Subject: [PATCH 53/99] has_key has been removed from Python 3 --- GPy/core/parameterization/parameter_core.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index b38d9678..7af40860 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -139,9 +139,9 @@ class Pickleable(object): which = self which.traverse_parents(parents.append) # collect parents for p in parents: - if not memo.has_key(id(p)):memo[id(p)] = None # set all parents to be None, so they will not be copied - if not memo.has_key(id(self.gradient)):memo[id(self.gradient)] = None # reset the gradient - if not memo.has_key(id(self._fixes_)):memo[id(self._fixes_)] = None # fixes have to be reset, as this is now highest parent + if not id(p) in memo :memo[id(p)] = None # set all parents to be None, so they will not be copied + if not id(self.gradient) in memo:memo[id(self.gradient)] = None # reset the gradient + if not id(self._fixes_) in memo :memo[id(self._fixes_)] = None # fixes have to be reset, as this is now highest parent copy = copy.deepcopy(self, memo) # and start the copy copy._parent_index_ = None copy._trigger_params_changed() From 8d66b7b4f03217368258e67581b881b0bc0d8a78 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 28 Feb 2015 19:15:46 +0000 Subject: [PATCH 54/99] Need to explicitly turn a range object to a list in for these tests --- GPy/testing/parameterized_tests.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 1ab0fd32..f3e0863f 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -121,15 +121,15 @@ class ParameterizedTest(unittest.TestCase): def test_default_constraints(self): self.assertIs(self.rbf.variance.constraints._param_index_ops, self.rbf.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) - self.assertListEqual(self.rbf.constraints.indices()[0].tolist(), range(2)) + self.assertListEqual(self.rbf.constraints.indices()[0].tolist(), list(range(2))) from GPy.core.parameterization.transformations import Logexp kern = self.test1.kern self.test1.unlink_parameter(kern) - self.assertListEqual(kern.constraints[Logexp()].tolist(), range(3)) + self.assertListEqual(kern.constraints[Logexp()].tolist(), list(range(3))) def test_constraints(self): self.rbf.constrain(GPy.transformations.Square(), False) - self.assertListEqual(self.test1.constraints[GPy.transformations.Square()].tolist(), range(self.param.size, self.param.size+self.rbf.size)) + self.assertListEqual(self.test1.constraints[GPy.transformations.Square()].tolist(), list(range(self.param.size, self.param.size+self.rbf.size))) self.assertListEqual(self.test1.constraints[GPy.transformations.Logexp()].tolist(), [self.param.size+self.rbf.size]) self.test1.kern.unlink_parameter(self.rbf) @@ -182,8 +182,8 @@ class ParameterizedTest(unittest.TestCase): def test_add_parameter_in_hierarchy(self): self.test1.kern.rbf.link_parameter(Param("NEW", np.random.rand(2), NegativeLogexp()), 1) - self.assertListEqual(self.test1.constraints[NegativeLogexp()].tolist(), range(self.param.size+1, self.param.size+1 + 2)) - self.assertListEqual(self.test1.constraints[GPy.transformations.Logistic(0,1)].tolist(), range(self.param.size)) + self.assertListEqual(self.test1.constraints[NegativeLogexp()].tolist(), list(range(self.param.size+1, self.param.size+1 + 2))) + self.assertListEqual(self.test1.constraints[GPy.transformations.Logistic(0,1)].tolist(), list(range(self.param.size))) self.assertListEqual(self.test1.constraints[GPy.transformations.Logexp(0,1)].tolist(), np.r_[50, 53:55].tolist()) def test_regular_expression_misc(self): From 560950466d63eaa7b78d8a8215bdb8f5e228b818 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sun, 1 Mar 2015 09:24:22 +0000 Subject: [PATCH 55/99] itertools fixes from 2to3 --- GPy/core/model.py | 2 +- GPy/core/parameterization/param.py | 14 +++++++------- GPy/core/parameterization/parameterized.py | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 08a4ea25..9521733c 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -364,7 +364,7 @@ class Model(Parameterized): gradient = self._grads(x).copy() np.where(gradient == 0, 1e-312, gradient) ret = True - for nind, xind in itertools.izip(param_index, transformed_index): + for nind, xind in zip(param_index, transformed_index): xx = x.copy() xx[xind] += step f1 = self._objective(xx) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index fbbb59ed..c7e76c98 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -279,7 +279,7 @@ class Param(Parameterizable, ObsAr): .tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;} .tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;} .tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;} -"""] + [''] + [header] + ["".format(x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in itertools.izip(indices, vals, constr_matrix, ties, prirs)] + ["
{i}{x}{c}{p}{t}
"]) +"""] + [''] + [header] + ["".format(x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in zip(indices, vals, constr_matrix, ties, prirs)] + ["
{i}{x}{c}{p}{t}
"]) def __str__(self, constr_matrix=None, indices=None, prirs=None, ties=None, lc=None, lx=None, li=None, lp=None, lt=None, only_name=False): filter_ = self._current_slice_ @@ -300,7 +300,7 @@ class Param(Parameterizable, ObsAr): if only_name: header = header_format.format(lc, lx, li, lt, lp, ' ', x=self.hierarchy_name(), c=sep*lc, i=sep*li, t=sep*lt, p=sep*lp) # nice header for printing else: header = header_format.format(lc, lx, li, lt, lp, ' ', x=self.hierarchy_name(), c=__constraints_name__, i=__index_name__, t=__tie_name__, p=__priors_name__) # nice header for printing if not ties: ties = itertools.cycle(['']) - return "\n".join([header] + [" {i!s:^{3}s} | {x: >{1}.{2}g} | {c:^{0}s} | {p:^{5}s} | {t:^{4}s} ".format(lc, lx, __precision__, li, lt, lp, x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in itertools.izip(indices, vals, constr_matrix, ties, prirs)]) # return all the constraints with right indices + return "\n".join([header] + [" {i!s:^{3}s} | {x: >{1}.{2}g} | {c:^{0}s} | {p:^{5}s} | {t:^{4}s} ".format(lc, lx, __precision__, li, lt, lp, x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in zip(indices, vals, constr_matrix, ties, prirs)]) # return all the constraints with right indices # except: return super(Param, self).__str__() class ParamConcatenation(object): @@ -429,14 +429,14 @@ class ParamConcatenation(object): params = self.params constr_matrices, ties_matrices, prior_matrices = zip(*map(f, params)) indices = [p._indices() for p in params] - lc = max([p._max_len_names(cm, __constraints_name__) for p, cm in itertools.izip(params, constr_matrices)]) + lc = max([p._max_len_names(cm, __constraints_name__) for p, cm in zip(params, constr_matrices)]) lx = max([p._max_len_values() for p in params]) - li = max([p._max_len_index(i) for p, i in itertools.izip(params, indices)]) - lt = max([p._max_len_names(tm, __tie_name__) for p, tm in itertools.izip(params, ties_matrices)]) - lp = max([p._max_len_names(pm, __constraints_name__) for p, pm in itertools.izip(params, prior_matrices)]) + li = max([p._max_len_index(i) for p, i in zip(params, indices)]) + lt = max([p._max_len_names(tm, __tie_name__) for p, tm in zip(params, ties_matrices)]) + lp = max([p._max_len_names(pm, __constraints_name__) for p, pm in zip(params, prior_matrices)]) strings = [] start = True - for p, cm, i, tm, pm in itertools.izip(params,constr_matrices,indices,ties_matrices,prior_matrices): + for p, cm, i, tm, pm in zip(params,constr_matrices,indices,ties_matrices,prior_matrices): strings.append(p.__str__(constr_matrix=cm, indices=i, prirs=pm, ties=tm, lc=lc, lx=lx, li=li, lp=lp, lt=lt, only_name=(1-start))) start = False return "\n".join(strings) diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index db945016..27ecbc1c 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -269,7 +269,7 @@ class Parameterized(Parameterizable,metaclass=ParametersChangedMeta): """ if not isinstance(regexp, _pattern_type): regexp = compile(regexp) found_params = [] - for n, p in itertools.izip(self.parameter_names(False, False, True), self.flattened_parameters): + for n, p in zip(self.parameter_names(False, False, True), self.flattened_parameters): if regexp.match(n) is not None: found_params.append(p) return found_params @@ -380,7 +380,7 @@ class Parameterized(Parameterizable,metaclass=ParametersChangedMeta): pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]]) format_spec = "{{name:<{0}s}}{{desc:>{1}s}}{{const:^{2}s}}{{pri:^{3}s}}{{t:^{4}s}}".format(nl, sl, cl, pl, tl) to_print = [] - for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs): + for n, d, c, t, p in zip(names, desc, constrs, ts, prirs): to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p)) sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3) if header: @@ -415,7 +415,7 @@ class Parameterized(Parameterizable,metaclass=ParametersChangedMeta): pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]]) format_spec = " \033[1m{{name:<{0}s}}\033[0;0m | {{desc:>{1}s}} | {{const:^{2}s}} | {{pri:^{3}s}} | {{t:^{4}s}}".format(nl, sl, cl, pl, tl) to_print = [] - for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs): + for n, d, c, t, p in zip(names, desc, constrs, ts, prirs): to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p)) sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3) if header: From a0dc90596c348bb75459a8175baf4f6916362208 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sun, 1 Mar 2015 10:17:21 +0000 Subject: [PATCH 56/99] Commented out weave functions for Py3 support --- GPy/util/choleskies.py | 108 ++++++++++++++++++++--------------------- 1 file changed, 52 insertions(+), 56 deletions(-) diff --git a/GPy/util/choleskies.py b/GPy/util/choleskies.py index 606229f7..c8c8227f 100644 --- a/GPy/util/choleskies.py +++ b/GPy/util/choleskies.py @@ -2,10 +2,9 @@ # Licensed under the GNU GPL version 3.0 import numpy as np -from scipy import weave +#from scipy import weave from . import linalg - def safe_root(N): i = np.sqrt(N) j = int(i) @@ -13,58 +12,58 @@ def safe_root(N): raise ValueError("N is not square!") return j -def flat_to_triang(flat): - """take a matrix N x D and return a M X M x D array where +#def flat_to_triang(flat): +# """take a matrix N x D and return a M X M x D array where +# +# N = M(M+1)/2 +# +# the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat. +# """ +# N, D = flat.shape +# M = (-1 + safe_root(8*N+1))/2 +# ret = np.zeros((M, M, D)) +# flat = np.ascontiguousarray(flat) +# +# code = """ +# int count = 0; +# for(int m=0; m Date: Sun, 1 Mar 2015 10:18:27 +0000 Subject: [PATCH 57/99] Commented out weave functions for Py3 support --- GPy/kern/_src/stationary.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 5052b7b0..0465a556 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -8,10 +8,15 @@ from ...core.parameterization.transformations import Logexp from ...util.linalg import tdot from ... import util import numpy as np -from scipy import integrate, weave +from scipy import integrate from ...util.config import config # for assesing whether to use weave from ...util.caching import Cache_this +try: + from scipy import weave +except ImportError: + config.set('weave', 'working', 'False') + class Stationary(Kern): """ Stationary kernels (covariance functions). @@ -167,9 +172,9 @@ class Stationary(Kern): except: print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n") config.set('weave', 'working', 'False') - self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)]) + self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in range(self.input_dim)]) else: - self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)]) + self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in range(self.input_dim)]) else: r = self._scaled_dist(X, X2) self.lengthscale.gradient = -np.sum(dL_dr*r)/self.lengthscale @@ -234,7 +239,7 @@ class Stationary(Kern): #the lower memory way with a loop ret = np.empty(X.shape, dtype=np.float64) - for q in xrange(self.input_dim): + for q in range(self.input_dim): np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=ret[:,q]) ret /= self.lengthscale**2 From 153a110a1dbf1f4a9b03b66085c4eb3f50d5d88b Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sun, 1 Mar 2015 10:25:52 +0000 Subject: [PATCH 58/99] Updated README.m to refelect recent Py3 work --- README.md | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 98613ce5..a28bc827 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,16 @@ A Gaussian processes framework in Python. Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GPy.png) +### Moving to Python 3 +Work is underway to make GPy run on Python 3. + +* Python 2.x compatibility is currently broken in this fork +* Running the 'dict' fixer from 2to3 caused more problems than it fixed! Trying to figure out why. +* The test suite runs but with fewer tests for some reason -- not sure why yet. +* Many tests in the suite fail! Don't even think about using this fork for production use +* All weave functions not covered by the test suite are simply commented out. Can add equivalents later as test functions become available +* Examples that required optimised versions of functions for speed reasons would be valued + ### Citation @Misc{gpy2014, @@ -109,24 +119,7 @@ Run nosetests from the root directory of the repository: or from within IPython import GPy; GPy.tests() - -### Moving to Python 3 -Work is underway to make GPy run on Python 3. We are not there yet! Changes performed so far have retained compatibility with Python 2.6 and above. - -Work done so far: - -* Used 2to3 to fix relative imports -* Used 2to3 to convert print from statement to function. Some advanced uses of print meant that this could not be done in a way that retained compatibility with old versions of Python. The oldest version of Python that is supported by this version is 2.6 due to the required future imports. -* Used 2to3 to convert exceptions to Python 3 friendly versions. There are a few oustanding string exceptions to take care of that 2to3 doesn't handle. Will need to do these manually -* Handled the different imports required for ConfigParser/configparser in Py2/Py3 -* In utils/linalg.py: - * Commented out the function cholupdate(L, x) since it doesn't appear to be used. Its definitely not in the tests.s - * Put the import for scipy.weave in a try/except block so that it will gracefully fail in Py3 -* Fixed a couple of urllib2 issues - had to be done mannual since 2to3 didn't help - - - - + ## Funding Acknowledgements From 6aca7c2765ef4e81d93e929510d12778a5ed5331 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Mon, 2 Mar 2015 19:39:50 +0000 Subject: [PATCH 59/99] Changed refereences to iteritems() to items() for Py3 compat --- GPy/core/parameterization/index_operations.py | 56 ++++++++++++------- GPy/core/parameterization/param.py | 12 +++- GPy/core/parameterization/parameter_core.py | 34 ++++++++--- 3 files changed, 70 insertions(+), 32 deletions(-) diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index 1e97f488..4050dc55 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -63,16 +63,15 @@ class ParameterIndexOperations(object): def __init__(self, constraints=None): self._properties = IntArrayDict() if constraints is not None: - for t, i in constraints.iteritems(): + #python 3 fix + #for t, i in constraints.iteritems(): + for t, i in constraints.items(): self.add(t, i) - def iteritems(self): - try: - return self._properties.iteritems() - except AttributeError: - #Changed this from iteritems to items for Py3 compatibility. It didn't break the test suite. - return self._properties.items() - + #iteritems has gone in python 3 + #def iteritems(self): + # return self._properties.iteritems() + def items(self): return self._properties.items() @@ -159,14 +158,18 @@ class ParameterIndexOperations(object): return numpy.array([]).astype(int) def update(self, parameter_index_view, offset=0): - for i, v in parameter_index_view.iteritems(): + #py3 fix + #for i, v in parameter_index_view.iteritems(): + for i, v in parameter_index_view.items(): self.add(i, v+offset) def copy(self): return self.__deepcopy__(None) def __deepcopy__(self, memo): - return ParameterIndexOperations(dict(self.iteritems())) + #py3 fix + #return ParameterIndexOperations(dict(self.iteritems())) + return ParameterIndexOperations(dict(self.items())) def __getitem__(self, prop): return self._properties[prop] @@ -204,22 +207,25 @@ class ParameterIndexOperationsView(object): def _filter_index(self, ind): return ind[(ind >= self._offset) * (ind < (self._offset + self._size))] - self._offset - - def iteritems(self): - for i, ind in self._param_index_ops.iteritems(): + #iteritems has gone in python 3. It has been renamed items() + def items(self): + for i, ind in self._param_index_ops.items(): ind2 = self._filter_index(ind) if ind2.size > 0: yield i, ind2 - - def items(self): - return [[i,v] for i,v in self.iteritems()] + + #Python 3 items() is now implemented as per py2 iteritems + #def items(self): + # return [[i,v] for i,v in self.iteritems()] def properties(self): return [i for i in self.iterproperties()] def iterproperties(self): - for i, _ in self.iteritems(): + #py3 fix + #for i, _ in self.iteritems(): + for i, _ in self.items(): yield i @@ -239,7 +245,9 @@ class ParameterIndexOperationsView(object): def iterindices(self): - for _, ind in self.iteritems(): + #py3 fix + #for _, ind in self.iteritems(): + for _, ind in self.items(): yield ind @@ -295,10 +303,14 @@ class ParameterIndexOperationsView(object): def __str__(self, *args, **kwargs): import pprint - return pprint.pformat(dict(self.iteritems())) + #py3 fixes + #return pprint.pformat(dict(self.iteritems())) + return pprint.pformat(dict(self.items())) def update(self, parameter_index_view, offset=0): - for i, v in parameter_index_view.iteritems(): + #py3 fixes + #for i, v in parameter_index_view.iteritems(): + for i, v in parameter_index_view.items(): self.add(i, v+offset) @@ -306,6 +318,8 @@ class ParameterIndexOperationsView(object): return self.__deepcopy__(None) def __deepcopy__(self, memo): - return ParameterIndexOperations(dict(self.iteritems())) + #py3 fix + #return ParameterIndexOperations(dict(self.iteritems())) + return ParameterIndexOperations(dict(self.items())) pass diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index c7e76c98..2d8c4d78 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -207,10 +207,14 @@ class Param(Parameterizable, ObsAr): return 0 @property def _constraints_str(self): - return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.iteritems()))] + #py3 fix + #return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.iteritems()))] + return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.items()))] @property def _priors_str(self): - return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.iteritems()))] + #py3 fix + #return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.iteritems()))] + return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.items()))] @property def _ties_str(self): return [''] @@ -336,7 +340,9 @@ class ParamConcatenation(object): level += 1 parent = parent._parent_ import operator - self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1))) + #py3 fix + #self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1))) + self.parents = map(lambda x: x[0], sorted(parents.tems(), key=operator.itemgetter(1))) #=========================================================================== # Get/set items, enable broadcasting #=========================================================================== diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 7af40860..c72661e4 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -164,7 +164,9 @@ class Pickleable(object): '_Cacher_wrap__cachers', # never pickle cachers ] dc = dict() - for k,v in self.__dict__.iteritems(): + #py3 fix + #for k,v in self.__dict__.iteritems(): + for k,v in self.__dict__.items(): if k not in ignore_list: dc[k] = v return dc @@ -427,7 +429,9 @@ class Indexable(Nameable, Updateable): """evaluate the prior""" if self.priors.size > 0: x = self.param_array - return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()), 0) + #py3 fix + #return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()), 0) + return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.items()), 0) return 0. def _log_prior_gradients(self): @@ -435,7 +439,9 @@ class Indexable(Nameable, Updateable): if self.priors.size > 0: x = self.param_array ret = np.zeros(x.size) - [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()] + #py3 fix + #[np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()] + [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()] return ret return 0. @@ -613,7 +619,9 @@ class OptimizationHandlable(Indexable): if not self._optimizer_copy_transformed: self._optimizer_copy_.flat = self.param_array.flat - [np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] + #py3 fix + #[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__] + [np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.items() if c != __fixed__] if self.has_parent() and (self.constraints[__fixed__].size != 0 or self._has_ties()): fixes = np.ones(self.size).astype(bool) fixes[self.constraints[__fixed__]] = FIXED @@ -642,11 +650,15 @@ class OptimizationHandlable(Indexable): if f is None: self.param_array.flat = p [np.put(self.param_array, ind, c.f(self.param_array.flat[ind])) - for c, ind in self.constraints.iteritems() if c != __fixed__] + #py3 fix + #for c, ind in self.constraints.iteritems() if c != __fixed__] + for c, ind in self.constraints.items() if c != __fixed__] else: self.param_array.flat[f] = p [np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]])) - for c, ind in self.constraints.iteritems() if c != __fixed__] + #py3 fix + #for c, ind in self.constraints.iteritems() if c != __fixed__] + for c, ind in self.constraints.items() if c != __fixed__] #self._highest_parent_.tie.propagate_val() self._optimizer_copy_transformed = False @@ -681,7 +693,9 @@ class OptimizationHandlable(Indexable): constraint to it. """ self._highest_parent_.tie.collate_gradient() - [np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__] + #py3 fix + #[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__] + [np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__] if self._has_fixes(): return g[self._fixes_] return g @@ -691,6 +705,8 @@ class OptimizationHandlable(Indexable): constraint to it. """ self._highest_parent_.tie.collate_gradient() + #py3 fix + #[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__] [np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__] if self._has_fixes(): return g[self._fixes_] return g @@ -751,7 +767,9 @@ class OptimizationHandlable(Indexable): self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...) # now draw from prior where possible x = self.param_array.copy() - [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None] + #Py3 fix + #[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None] + [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.items() if not p is None] unfixlist = np.ones((self.size,),dtype=np.bool) unfixlist[self.constraints[__fixed__]] = False self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist] From 46fc08a448a28c8690f41fa2ef8b5bfd8f7ebd05 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Mon, 2 Mar 2015 19:43:26 +0000 Subject: [PATCH 60/99] cPickle fix for Py3 --- GPy/core/parameterization/parameter_core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index c72661e4..1e101a9d 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -110,7 +110,10 @@ class Pickleable(object): it properly. :param protocol: pickling protocol to use, python-pickle for details. """ - import cPickle as pickle + try: #Py2 + import cPickle as pickle + except ImportError: #Py3 + import pickle if isinstance(f, str): with open(f, 'wb') as f: pickle.dump(self, f, protocol) From 82722305c35c89c1dbec75348bad3b79ea07e951 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Mon, 2 Mar 2015 19:50:41 +0000 Subject: [PATCH 61/99] Changed refereences to iteritems() to items() for Py3 compat --- GPy/core/parameterization/param.py | 2 +- GPy/core/parameterization/parameter_core.py | 2 +- GPy/testing/pickle_tests.py | 17 +++++++++++++---- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 2d8c4d78..09369efa 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -342,7 +342,7 @@ class ParamConcatenation(object): import operator #py3 fix #self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1))) - self.parents = map(lambda x: x[0], sorted(parents.tems(), key=operator.itemgetter(1))) + self.parents = map(lambda x: x[0], sorted(parents.items(), key=operator.itemgetter(1))) #=========================================================================== # Get/set items, enable broadcasting #=========================================================================== diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 1e101a9d..bfe325a3 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -710,7 +710,7 @@ class OptimizationHandlable(Indexable): self._highest_parent_.tie.collate_gradient() #py3 fix #[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__] - [np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__] + [np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__] if self._has_fixes(): return g[self._fixes_] return g diff --git a/GPy/testing/pickle_tests.py b/GPy/testing/pickle_tests.py index 777d0d6b..251b7cce 100644 --- a/GPy/testing/pickle_tests.py +++ b/GPy/testing/pickle_tests.py @@ -29,7 +29,9 @@ def toy_model(): class ListDictTestCase(unittest.TestCase): def assertListDictEquals(self, d1, d2, msg=None): - for k,v in d1.iteritems(): + #py3 fix + #for k,v in d1.iteritems(): + for k,v in d1.items(): self.assertListEqual(list(v), list(d2[k]), msg) def assertArrayListEquals(self, l1, l2): for a1, a2 in itertools.izip(l1,l2): @@ -39,8 +41,13 @@ class Test(ListDictTestCase): def test_parameter_index_operations(self): pio = ParameterIndexOperations(dict(test1=np.array([4,3,1,6,4]), test2=np.r_[2:130])) piov = ParameterIndexOperationsView(pio, 20, 250) - self.assertListDictEquals(dict(piov.items()), dict(piov.copy().iteritems())) - self.assertListDictEquals(dict(pio.iteritems()), dict(pio.copy().items())) + #py3 fix + #self.assertListDictEquals(dict(piov.items()), dict(piov.copy().iteritems())) + self.assertListDictEquals(dict(piov.items()), dict(piov.copy().items())) + + #py3 fix + #self.assertListDictEquals(dict(pio.iteritems()), dict(pio.copy().items())) + self.assertListDictEquals(dict(pio.items()), dict(pio.copy().items())) self.assertArrayListEquals(pio.copy().indices(), pio.indices()) self.assertArrayListEquals(piov.copy().indices(), piov.indices()) @@ -55,7 +62,9 @@ class Test(ListDictTestCase): pickle.dump(piov, f) f.seek(0) pio2 = pickle.load(f) - self.assertListDictEquals(dict(piov.items()), dict(pio2.iteritems())) + #py3 fix + #self.assertListDictEquals(dict(piov.items()), dict(pio2.iteritems())) + self.assertListDictEquals(dict(piov.items()), dict(pio2.items())) def test_param(self): param = Param('test', np.arange(4*2).reshape(4,2)) From 5607bd9a193ce45de825d0c88d787aae6abc59b7 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Mon, 2 Mar 2015 20:13:21 +0000 Subject: [PATCH 62/99] Various Py3 related import fixes --- GPy/core/model.py | 1 + GPy/core/parameterization/param.py | 1 + GPy/models/mrd.py | 4 ++-- GPy/testing/pickle_tests.py | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 9521733c..097f7b5a 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -13,6 +13,7 @@ import itertools import sys from .verbose_optimization import VerboseOptimization # import numdifftools as ndt +from functools import reduce class Model(Parameterized): _fail_count = 0 # Count of failed optimization steps (see objective) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index 09369efa..1838f2bf 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -6,6 +6,7 @@ import numpy np = numpy from .parameter_core import Parameterizable, adjust_name_for_printing, Pickleable from .observable_array import ObsAr +from functools import reduce ###### printing __constraints_name__ = "Constraint" diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index f3e643c9..f56873fa 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -82,7 +82,7 @@ class MRD(BayesianGPLVMMiniBatch): assert len(self.names) == len(self.Ylist), "one name per dataset, or None if Ylist is a dict" if inference_method is None: - self.inference_method = InferenceMethodList([VarDTC() for _ in xrange(len(self.Ylist))]) + self.inference_method = InferenceMethodList([VarDTC() for _ in range(len(self.Ylist))]) else: assert isinstance(inference_method, InferenceMethodList), "please provide one inference method per Y in the list and provide it as InferenceMethodList, inference_method given: {}".format(inference_method) self.inference_method = inference_method @@ -338,4 +338,4 @@ class MRD(BayesianGPLVMMiniBatch): super(MRD, self).__setstate__(state) self.kern = self.bgplvms[0].kern self.likelihood = self.bgplvms[0].likelihood - self.parameters_changed() \ No newline at end of file + self.parameters_changed() diff --git a/GPy/testing/pickle_tests.py b/GPy/testing/pickle_tests.py index 251b7cce..fd1bf93c 100644 --- a/GPy/testing/pickle_tests.py +++ b/GPy/testing/pickle_tests.py @@ -34,7 +34,7 @@ class ListDictTestCase(unittest.TestCase): for k,v in d1.items(): self.assertListEqual(list(v), list(d2[k]), msg) def assertArrayListEquals(self, l1, l2): - for a1, a2 in itertools.izip(l1,l2): + for a1, a2 in zip(l1,l2): np.testing.assert_array_equal(a1, a2) class Test(ListDictTestCase): From 3faf345969b8aff9e859d707de3557004421327f Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Mon, 2 Mar 2015 20:22:04 +0000 Subject: [PATCH 63/99] Import fixes for Py3 --- GPy/kern/_src/periodic.py | 1 + GPy/kern/_src/prod.py | 1 + GPy/util/pca.py | 5 +++-- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/GPy/kern/_src/periodic.py b/GPy/kern/_src/periodic.py index 36fcb596..23818007 100644 --- a/GPy/kern/_src/periodic.py +++ b/GPy/kern/_src/periodic.py @@ -8,6 +8,7 @@ from ...util.linalg import mdot from ...util.decorators import silence_errors from ...core.parameterization.param import Param from ...core.parameterization.transformations import Logexp +from functools import reduce class Periodic(Kern): def __init__(self, input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name): diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index 84bd1e1d..27a15aab 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -5,6 +5,7 @@ import numpy as np from .kern import CombinationKernel from ...util.caching import Cache_this import itertools +from functools import reduce class Prod(CombinationKernel): """ diff --git a/GPy/util/pca.py b/GPy/util/pca.py index f87b9807..7168a28f 100644 --- a/GPy/util/pca.py +++ b/GPy/util/pca.py @@ -13,6 +13,7 @@ except: from numpy.linalg.linalg import LinAlgError from operator import setitem import itertools +from functools import reduce class PCA(object): """ @@ -47,7 +48,7 @@ class PCA(object): X_ = numpy.ma.masked_array(X, inan) self.mu = X_.mean(0).base self.sigma = X_.std(0).base - reduce(lambda y,x: setitem(x[0], x[1], x[2]), itertools.izip(X.T, inan.T, self.mu), None) + reduce(lambda y,x: setitem(x[0], x[1], x[2]), zip(X.T, inan.T, self.mu), None) X = X - self.mu X = X / numpy.where(self.sigma == 0, 1e-30, self.sigma) return X @@ -138,4 +139,4 @@ class PCA(object): pylab.tight_layout() except: pass - return plots \ No newline at end of file + return plots From 7eff1d984f2019ba56a799234c961b2354ed85b0 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Mon, 2 Mar 2015 20:35:19 +0000 Subject: [PATCH 64/99] Fixed 'dict changed size' errors --- GPy/core/parameterization/index_operations.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index 4050dc55..3d6ce64d 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -87,7 +87,7 @@ class ParameterIndexOperations(object): ind[toshift] += size def shift_left(self, start, size): - for v, ind in self.items(): + for v, ind in list(self.items()): todelete = (ind>=start) * (ind 0: yield i, ind2 From 57dd29a0f9826d3ecc360fc6119efbeb05bf8d39 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Mon, 2 Mar 2015 20:36:10 +0000 Subject: [PATCH 65/99] Updated README now that dict issues are fixed --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index a28bc827..e488fd50 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,6 @@ Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GP Work is underway to make GPy run on Python 3. * Python 2.x compatibility is currently broken in this fork -* Running the 'dict' fixer from 2to3 caused more problems than it fixed! Trying to figure out why. * The test suite runs but with fewer tests for some reason -- not sure why yet. * Many tests in the suite fail! Don't even think about using this fork for production use * All weave functions not covered by the test suite are simply commented out. Can add equivalents later as test functions become available From a4c8bb58074f3de548f0e20f4492586aeda2af1b Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Tue, 3 Mar 2015 09:31:52 +0000 Subject: [PATCH 66/99] Changed refereences to iteritems() to items() for Py3 compat --- GPy/testing/index_operations_tests.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/GPy/testing/index_operations_tests.py b/GPy/testing/index_operations_tests.py index e2895cd2..a97f1beb 100644 --- a/GPy/testing/index_operations_tests.py +++ b/GPy/testing/index_operations_tests.py @@ -121,7 +121,9 @@ class Test(unittest.TestCase): self.assertListEqual(removed.tolist(), [0, 2]) def test_misc(self): - for k,v in self.param_index.copy()._properties.iteritems(): + #py3 fix + #for k,v in self.param_index.copy()._properties.iteritems(): + for k,v in self.param_index.copy()._properties.items(): self.assertListEqual(self.param_index[k].tolist(), v.tolist()) self.assertEqual(self.param_index.size, 8) self.assertEqual(self.view.size, 5) From fc43f6d3137f55e1290f6b93f5d44afd0c6f9035 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Tue, 3 Mar 2015 09:53:27 +0000 Subject: [PATCH 67/99] Map fix for Python 3 --- GPy/core/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 097f7b5a..6f6f0ee8 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -341,7 +341,7 @@ class Model(Parameterized): cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))]) cols = np.array(cols) + 5 header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))] - header_string = map(lambda x: '|'.join(x), [header_string]) + header_string = list(map(lambda x: '|'.join(x), [header_string])) separator = '-' * len(header_string[0]) print('\n'.join([header_string[0], separator])) if target_param is None: From 317706dfd07a8b4017ce851e602444c26860742d Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Tue, 3 Mar 2015 10:39:19 +0000 Subject: [PATCH 68/99] Removed debugger set up command --- GPy/testing/kernel_tests.py | 1 - 1 file changed, 1 deletion(-) diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 771028f0..b51034d9 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -188,7 +188,6 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb if not result: print(("Gradient of K(X, X) wrt X failed for " + kern.name + " covariance function. Gradient values as follows:")) testmodel.checkgrad(verbose=True) - import ipdb;ipdb.set_trace() assert(result) pass_checks = False return False From 189647032a0d300468123f88687e4a397027c068 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Tue, 3 Mar 2015 11:05:59 +0000 Subject: [PATCH 69/99] iterkeys fix for Python 3 --- GPy/core/parameterization/index_operations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index 3d6ce64d..e4803f37 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -79,7 +79,7 @@ class ParameterIndexOperations(object): return self._properties.keys() def iterproperties(self): - return self._properties.iterkeys() + return iter(self._properties) def shift_right(self, start, size): for ind in self.iterindices(): From fff110ca1983989a2b4523d8928f9bc57d838469 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Tue, 3 Mar 2015 17:51:54 +0000 Subject: [PATCH 70/99] im_self->__self__ fix for python 3 --- GPy/kern/_src/independent_outputs.py | 4 +++- GPy/testing/likelihood_tests.py | 6 +++--- GPy/testing/parameterized_tests.py | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 10681d57..2a0c2a33 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -94,7 +94,9 @@ class IndependentOutputs(CombinationKernel): else: slices2 = index_to_slices(X2[:,self.index_dim]) [[[collate_grads(kern, i, dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for i,(kern,slices_i,slices_j) in enumerate(zip(kerns,slices,slices2))] - if self.single_kern: kern.gradient = target + + if self.single_kern: + kern.gradient = target else:[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))] def gradients_X(self,dL_dK, X, X2=None): diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index 5feeffa4..3fe01c46 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -29,7 +29,7 @@ def dparam_partial(inst_func, *args): def param_func(param_val, param_name, inst_func, args): #inst_func.im_self._set_params(param) #inst_func.im_self.add_parameter(Param(param_name, param_val)) - inst_func.im_self[param_name] = param_val + inst_func.__self__[param_name] = param_val return inst_func(*args) return functools.partial(param_func, inst_func=inst_func, args=args) @@ -44,7 +44,7 @@ def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None, The number of parameters and N is the number of data Need to take a slice out from f and a slice out of df """ - print("\n{} likelihood: {} vs {}".format(func.im_self.__class__.__name__, + print("\n{} likelihood: {} vs {}".format(func.__self__.__class__.__name__, func.__name__, dfunc.__name__)) partial_f = dparam_partial(func, *args) partial_df = dparam_partial(dfunc, *args) @@ -278,7 +278,7 @@ class TestNoiseModels(object): #} } - for name, attributes in noise_models.iteritems(): + for name, attributes in noise_models.items(): model = attributes["model"] if "grad_params" in attributes: params = attributes["grad_params"] diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index f3e0863f..0fb129ff 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -108,7 +108,7 @@ class ParameterizedTest(unittest.TestCase): self.assertListEqual(self.white._fixes_.tolist(), [FIXED]) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) - self.assertListEqual(self.test1.constraints[Logexp()].tolist(), range(self.param.size, self.param.size+self.rbf.size)) + self.assertListEqual(self.test1.constraints[Logexp()].tolist(), list(range(self.param.size, self.param.size+self.rbf.size))) def test_remove_parameter_param_array_grad_array(self): val = self.test1.kern.param_array.copy() From 4642f5ac2b4a044d78ab4e55ff15107b892945f2 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Tue, 3 Mar 2015 17:54:05 +0000 Subject: [PATCH 71/99] types.TupleType -> tuple fix for python 3 --- GPy/util/linalg.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 7f1a28f3..ec66cc09 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -214,12 +214,12 @@ def mdot(*args): def _mdot_r(a, b): """Recursive helper for mdot""" - if type(a) == types.TupleType: + if type(a) == tuple: if len(a) > 1: a = mdot(*a) else: a = a[0] - if type(b) == types.TupleType: + if type(b) == tuple: if len(b) > 1: b = mdot(*b) else: @@ -362,7 +362,7 @@ def tdot_blas(mat, out=None): A = mat.ctypes.data_as(ctypes.c_void_p) BETA = c_double(0.0) C = out.ctypes.data_as(ctypes.c_void_p) - LDC = c_int(np.max(out.strides) // 8) + LDC = c_int(np.max(out.strides) / 8) dsyrk(byref(UPLO), byref(TRANS), byref(N), byref(K), byref(ALPHA), A, byref(LDA), byref(BETA), C, byref(LDC)) From 35aec1c6d0a5ee4749972dba2e4bbc06a06fa53b Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Tue, 3 Mar 2015 20:47:09 +0000 Subject: [PATCH 72/99] Various Python 3 fixes --- GPy/core/parameterization/parameter_core.py | 2 +- GPy/util/linalg.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index bfe325a3..195a80c1 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -581,7 +581,7 @@ class Indexable(Nameable, Updateable): if len(transforms) == 0: transforms = which.properties() removed = np.empty((0,), dtype=int) - for t in transforms: + for t in list(transforms): unconstrained = which.remove(t, self._raveled_index()) removed = np.union1d(removed, unconstrained) if t is __fixed__: diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index ec66cc09..2813a30a 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -362,7 +362,7 @@ def tdot_blas(mat, out=None): A = mat.ctypes.data_as(ctypes.c_void_p) BETA = c_double(0.0) C = out.ctypes.data_as(ctypes.c_void_p) - LDC = c_int(np.max(out.strides) / 8) + LDC = c_int(np.max(out.strides) // 8) dsyrk(byref(UPLO), byref(TRANS), byref(N), byref(K), byref(ALPHA), A, byref(LDA), byref(BETA), C, byref(LDC)) @@ -389,7 +389,7 @@ def DSYR_blas(A, x, alpha=1.): """ N = c_int(A.shape[0]) LDA = c_int(A.shape[0]) - UPLO = c_char('l') + UPLO = c_char('l'.encode('ascii')) ALPHA = c_double(alpha) A_ = A.ctypes.data_as(ctypes.c_void_p) x_ = x.ctypes.data_as(ctypes.c_void_p) From 273beca272f41835431fad699b1f68104e1df749 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Wed, 4 Mar 2015 03:08:15 +0000 Subject: [PATCH 73/99] Python 3 metaclass fix --- GPy/kern/_src/kern.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 6ccd315b..2e8ebcb0 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -11,10 +11,11 @@ from functools import reduce -class Kern(Parameterized): +class Kern(Parameterized,metaclass=KernCallsViaSlicerMeta): #=========================================================================== # This adds input slice support. The rather ugly code for slicing can be # found in kernel_slice_operations + # __mataclass__ is ignored in Python 3 - needs to be put in the function definiton __metaclass__ = KernCallsViaSlicerMeta #=========================================================================== _support_GPU=False From 6d2393ae907393ba69b4ab1556e6dc67778bca5e Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Wed, 4 Mar 2015 03:22:44 +0000 Subject: [PATCH 74/99] Various Py3 fixes --- GPy/core/parameterization/priors.py | 18 +++++++++--------- GPy/inference/optimization/stochastics.py | 2 +- GPy/kern/_src/add.py | 2 +- GPy/models/sparse_gp_minibatch.py | 6 +++--- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/GPy/core/parameterization/priors.py b/GPy/core/parameterization/priors.py index a4bbecb3..38cb0d19 100644 --- a/GPy/core/parameterization/priors.py +++ b/GPy/core/parameterization/priors.py @@ -414,7 +414,7 @@ class DGPLVM_KFDA(Prior): def compute_cls(self, x): cls = {} # Appending each data point to its proper class - for j in xrange(self.datanum): + for j in range(self.datanum): class_label = self.get_class_label(self.lbl[j]) if class_label not in cls: cls[class_label] = [] @@ -553,7 +553,7 @@ class DGPLVM(Prior): def compute_cls(self, x): cls = {} # Appending each data point to its proper class - for j in xrange(self.datanum): + for j in range(self.datanum): class_label = self.get_class_label(self.lbl[j]) if class_label not in cls: cls[class_label] = [] @@ -572,7 +572,7 @@ class DGPLVM(Prior): # Adding data points as tuple to the dictionary so that we can access indices def compute_indices(self, x): data_idx = {} - for j in xrange(self.datanum): + for j in range(self.datanum): class_label = self.get_class_label(self.lbl[j]) if class_label not in data_idx: data_idx[class_label] = [] @@ -591,7 +591,7 @@ class DGPLVM(Prior): else: lst_idx = [] # Here we put indices of each class in to the list called lst_idx_all - for m in xrange(len(data_idx[i])): + for m in range(len(data_idx[i])): lst_idx.append(data_idx[i][m][0]) lst_idx_all.append(lst_idx) return lst_idx_all @@ -627,7 +627,7 @@ class DGPLVM(Prior): # pdb.set_trace() # Calculating Bi B_i[i] = (M_i[i] - M_0).reshape(1, self.dim) - for k in xrange(self.datanum): + for k in range(self.datanum): for i in data_idx: N_i = float(len(data_idx[i])) if k in lst_idx_all[i]: @@ -772,7 +772,7 @@ class DGPLVM_T(Prior): def compute_cls(self, x): cls = {} # Appending each data point to its proper class - for j in xrange(self.datanum): + for j in range(self.datanum): class_label = self.get_class_label(self.lbl[j]) if class_label not in cls: cls[class_label] = [] @@ -791,7 +791,7 @@ class DGPLVM_T(Prior): # Adding data points as tuple to the dictionary so that we can access indices def compute_indices(self, x): data_idx = {} - for j in xrange(self.datanum): + for j in range(self.datanum): class_label = self.get_class_label(self.lbl[j]) if class_label not in data_idx: data_idx[class_label] = [] @@ -810,7 +810,7 @@ class DGPLVM_T(Prior): else: lst_idx = [] # Here we put indices of each class in to the list called lst_idx_all - for m in xrange(len(data_idx[i])): + for m in range(len(data_idx[i])): lst_idx.append(data_idx[i][m][0]) lst_idx_all.append(lst_idx) return lst_idx_all @@ -846,7 +846,7 @@ class DGPLVM_T(Prior): # pdb.set_trace() # Calculating Bi B_i[i] = (M_i[i] - M_0).reshape(1, self.dim) - for k in xrange(self.datanum): + for k in range(self.datanum): for i in data_idx: N_i = float(len(data_idx[i])) if k in lst_idx_all[i]: diff --git a/GPy/inference/optimization/stochastics.py b/GPy/inference/optimization/stochastics.py index dc71d539..f1532bc5 100644 --- a/GPy/inference/optimization/stochastics.py +++ b/GPy/inference/optimization/stochastics.py @@ -30,7 +30,7 @@ class SparseGPMissing(StochasticStorage): Thus, we can just make sure the loop goes over self.d every time. """ - self.d = xrange(model.Y_normalized.shape[1]) + self.d = range(model.Y_normalized.shape[1]) class SparseGPStochastics(StochasticStorage): """ diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 82c84c52..77f0d76e 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -165,7 +165,7 @@ class Add(CombinationKernel): else: eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.psi1(Z, variational_posterior) * 2. grads = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) - [np.add(target_grads[i],grads[i],target_grads[i]) for i in xrange(len(grads))] + [np.add(target_grads[i],grads[i],target_grads[i]) for i in range(len(grads))] return target_grads def add(self, other): diff --git a/GPy/models/sparse_gp_minibatch.py b/GPy/models/sparse_gp_minibatch.py index d3bbe5fe..10c54d49 100644 --- a/GPy/models/sparse_gp_minibatch.py +++ b/GPy/models/sparse_gp_minibatch.py @@ -82,7 +82,7 @@ class SparseGPMiniBatch(SparseGP): m_f = lambda i: "Precomputing Y for missing data: {: >7.2%}".format(float(i+1)/overall) message = m_f(-1) print(message, end=' ') - for d in xrange(overall): + for d in range(overall): self.Ylist.append(self.Y_normalized[self.ninan[:, d], d][:, None]) print(' '*(len(message)+1) + '\r', end=' ') message = m_f(d) @@ -182,11 +182,11 @@ class SparseGPMiniBatch(SparseGP): full_values[key][value_indices[key]] += current_values[key] """ for key in current_values.keys(): - if value_indices is not None and value_indices.has_key(key): + if value_indices is not None and key in value_indices: index = value_indices[key] else: index = slice(None) - if full_values.has_key(key): + if key in full_values: full_values[key][index] += current_values[key] else: full_values[key] = current_values[key] From 3e25098710f345064f306ea87ba8184aa6f98df2 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 6 Mar 2015 13:06:42 +0000 Subject: [PATCH 75/99] Fixed leaky comprehension behaviour for Py3 --- GPy/models/mrd.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index f56873fa..0028078f 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -74,6 +74,8 @@ class MRD(BayesianGPLVMMiniBatch): self.logger.debug("creating observable arrays") self.Ylist = [ObsAr(Y) for Y in Ylist] + #The next line is a fix for Python 3. It replicates the python 2 behaviour from the above comprehension + Y = Ylist[-1] if Ynames is None: self.logger.debug("creating Ynames") From f627c0b1cd66cd9689562d90eb4fac8a3c505f87 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 6 Mar 2015 14:48:19 +0000 Subject: [PATCH 76/99] 2to3 itertools fixer --- GPy/models/mrd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 0028078f..f6e8c408 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -139,7 +139,7 @@ class MRD(BayesianGPLVMMiniBatch): self.bgplvms = [] - for i, n, k, l, Y, im, bs in itertools.izip(itertools.count(), Ynames, kernels, likelihoods, Ylist, self.inference_method, batchsize): + for i, n, k, l, Y, im, bs in zip(itertools.count(), Ynames, kernels, likelihoods, Ylist, self.inference_method, batchsize): assert Y.shape[0] == self.num_data, "All datasets need to share the number of datapoints, and those have to correspond to one another" md = np.isnan(Y).any() spgp = BayesianGPLVMMiniBatch(Y, input_dim, X, X_variance, @@ -166,7 +166,7 @@ class MRD(BayesianGPLVMMiniBatch): self._log_marginal_likelihood = 0 self.Z.gradient[:] = 0. self.X.gradient[:] = 0. - for b, i in itertools.izip(self.bgplvms, self.inference_method): + for b, i in zip(self.bgplvms, self.inference_method): self._log_marginal_likelihood += b._log_marginal_likelihood self.logger.info('working on im <{}>'.format(hex(id(i)))) @@ -197,7 +197,7 @@ class MRD(BayesianGPLVMMiniBatch): elif init in "PCA_single": X = np.zeros((Ylist[0].shape[0], self.input_dim)) fracs = [] - for qs, Y in itertools.izip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist): + for qs, Y in zip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist): x,frcs = initialize_latent('PCA', len(qs), Y) X[:, qs] = x fracs.append(frcs) From 028fa93d3664cf4c6792909c8cb7b6a4f92507ea Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 6 Mar 2015 17:07:35 +0000 Subject: [PATCH 77/99] kern fix. All tests now pass --- GPy/kern/_src/independent_outputs.py | 8 ++++---- GPy/models/mrd.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index 2a0c2a33..aa9dca80 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -94,10 +94,10 @@ class IndependentOutputs(CombinationKernel): else: slices2 = index_to_slices(X2[:,self.index_dim]) [[[collate_grads(kern, i, dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for i,(kern,slices_i,slices_j) in enumerate(zip(kerns,slices,slices2))] - if self.single_kern: - kern.gradient = target - else:[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))] + self.kern.gradient = target + else: + [kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))] def gradients_X(self,dL_dK, X, X2=None): target = np.zeros(X.shape) @@ -144,7 +144,7 @@ class IndependentOutputs(CombinationKernel): if self.single_kern: target[:] += kern.gradient else: target[i][:] += kern.gradient [[collate_grads(kern, i, dL_dKdiag[s], X[s,:]) for s in slices_i] for i, (kern, slices_i) in enumerate(zip(kerns, slices))] - if self.single_kern: kern.gradient = target + if self.single_kern: self.kern.gradient = target else:[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))] class Hierarchical(CombinationKernel): diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index f6e8c408..be01b769 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -329,9 +329,9 @@ class MRD(BayesianGPLVMMiniBatch): def __getstate__(self): state = super(MRD, self).__getstate__() - if state.has_key('kern'): + if 'kern' in state: del state['kern'] - if state.has_key('likelihood'): + if 'likelihood' in state: del state['likelihood'] return state From 5eeb2f18e92b5edbf32f037cc387569f82c4d04c Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 7 Mar 2015 07:35:55 +0000 Subject: [PATCH 78/99] Updated README.md for recent Py3 work --- README.md | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index e488fd50..2268f2cb 100644 --- a/README.md +++ b/README.md @@ -10,14 +10,24 @@ A Gaussian processes framework in Python. Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GPy.png) -### Moving to Python 3 +### Python 3 Compatibility Work is underway to make GPy run on Python 3. * Python 2.x compatibility is currently broken in this fork -* The test suite runs but with fewer tests for some reason -- not sure why yet. -* Many tests in the suite fail! Don't even think about using this fork for production use -* All weave functions not covered by the test suite are simply commented out. Can add equivalents later as test functions become available -* Examples that required optimised versions of functions for speed reasons would be valued +* All tests in the testsuite now run on Python3. To see this for yourself, in Ubuntu 14.04, you can do + + git clone https://github.com/mikecroucher/GPy.git + cd GPy + git checkout devel + nosetests3 GPy/testing + +nosetests3 is Ubuntu's way of reffering to the Python 3 version of nosetests. You install it with + + sudo apt-get install python3-nose + +* Test coverage is less than 100% so it is expected that there is still more work to be done. We need more tests and examples to try out. +* All weave functions not covered by the test suite are *simply commented out*. Can add equivalents later as test functions become available +* A set of benchmarks would be useful! ### Citation From cf1c382acde91048f5bb4cd9496b62d5ddc6aa2c Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 7 Mar 2015 07:49:59 +0000 Subject: [PATCH 79/99] xrange fixes for Python 3 --- GPy/core/parameterization/ties_and_remappings.py | 6 +++--- GPy/examples/dimensionality_reduction.py | 4 ++-- GPy/inference/latent_function_inference/posterior.py | 6 +++--- .../latent_function_inference/var_dtc_parallel.py | 2 +- GPy/inference/mcmc/hmc.py | 6 +++--- GPy/kern/_src/coregionalize.py | 2 +- GPy/kern/_src/splitKern.py | 4 ++-- GPy/models/ss_gplvm.py | 2 +- GPy/models/ss_mrd.py | 10 +++++----- GPy/plotting/matplot_dep/img_plots.py | 4 ++-- GPy/plotting/matplot_dep/maps.py | 2 +- GPy/plotting/matplot_dep/visualize.py | 2 +- GPy/util/choleskies.py | 4 ++-- GPy/util/parallel.py | 4 ++-- 14 files changed, 29 insertions(+), 29 deletions(-) diff --git a/GPy/core/parameterization/ties_and_remappings.py b/GPy/core/parameterization/ties_and_remappings.py index bafa8a98..527bc47c 100644 --- a/GPy/core/parameterization/ties_and_remappings.py +++ b/GPy/core/parameterization/ties_and_remappings.py @@ -185,7 +185,7 @@ class Tie(Parameterized): def _check_change(self): changed = False if self.tied_param is not None: - for i in xrange(self.tied_param.size): + for i in range(self.tied_param.size): b0 = self.label_buf==self.label_buf[self.buf_idx[i]] b = self._highest_parent_.param_array[b0]!=self.tied_param[i] if b.sum()==0: @@ -212,11 +212,11 @@ class Tie(Parameterized): if self.tied_param is not None: self.tied_param.gradient = 0. [np.put(self.tied_param.gradient, i, self._highest_parent_.gradient[self.label_buf==self.label_buf[self.buf_idx[i]]].sum()) - for i in xrange(self.tied_param.size)] + for i in range(self.tied_param.size)] def propagate_val(self): if self.tied_param is not None: - for i in xrange(self.tied_param.size): + for i in range(self.tied_param.size): self._highest_parent_.param_array[self.label_buf==self.label_buf[self.buf_idx[i]]] = self.tied_param[i] diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index fe1fa1e5..46107a71 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -653,7 +653,7 @@ def ssgplvm_simulation_linear(): def sample_X(Q, pi): x = np.empty(Q) dies = np.random.rand(Q) - for q in xrange(Q): + for q in range(Q): if dies[q] < pi: x[q] = np.random.randn() else: @@ -663,7 +663,7 @@ def ssgplvm_simulation_linear(): Y = np.empty((N, D)) X = np.empty((N, Q)) # Generate data from random sampled weight matrices - for n in xrange(N): + for n in range(N): X[n] = sample_X(Q, pi) w = np.random.randn(D, Q) Y[n] = np.dot(w, X[n]) diff --git a/GPy/inference/latent_function_inference/posterior.py b/GPy/inference/latent_function_inference/posterior.py index 73d65df6..ea608cce 100644 --- a/GPy/inference/latent_function_inference/posterior.py +++ b/GPy/inference/latent_function_inference/posterior.py @@ -107,7 +107,7 @@ class Posterior(object): if self._precision is None: cov = np.atleast_3d(self.covariance) self._precision = np.zeros(cov.shape) # if one covariance per dimension - for p in xrange(cov.shape[-1]): + for p in range(cov.shape[-1]): self._precision[:,:,p] = pdinv(cov[:,:,p])[0] return self._precision @@ -125,7 +125,7 @@ class Posterior(object): if self._woodbury_inv is not None: winv = np.atleast_3d(self._woodbury_inv) self._woodbury_chol = np.zeros(winv.shape) - for p in xrange(winv.shape[-1]): + for p in range(winv.shape[-1]): self._woodbury_chol[:,:,p] = pdinv(winv[:,:,p])[2] #Li = jitchol(self._woodbury_inv) #self._woodbury_chol, _ = dtrtri(Li) @@ -160,7 +160,7 @@ class Posterior(object): elif self._covariance is not None: B = np.atleast_3d(self._K) - np.atleast_3d(self._covariance) self._woodbury_inv = np.empty_like(B) - for i in xrange(B.shape[-1]): + for i in range(B.shape[-1]): tmp, _ = dpotrs(self.K_chol, B[:,:,i]) self._woodbury_inv[:,:,i], _ = dpotrs(self.K_chol, tmp.T) return self._woodbury_inv diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index cb117af1..6f98668f 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -92,7 +92,7 @@ class VarDTC_minibatch(LatentFunctionInference): psi0_full = 0. YRY_full = 0. - for n_start in xrange(0,num_data,batchsize): + for n_start in range(0,num_data,batchsize): n_end = min(batchsize+n_start, num_data) if batchsize==num_data: Y_slice = Y diff --git a/GPy/inference/mcmc/hmc.py b/GPy/inference/mcmc/hmc.py index ec6399b6..fcc72591 100644 --- a/GPy/inference/mcmc/hmc.py +++ b/GPy/inference/mcmc/hmc.py @@ -39,7 +39,7 @@ class HMC: :rtype: numpy.ndarray """ params = np.empty((num_samples,self.p.size)) - for i in xrange(num_samples): + for i in range(num_samples): self.p[:] = np.random.multivariate_normal(np.zeros(self.p.size),self.M) H_old = self._computeH() theta_old = self.model.optimizer_array.copy() @@ -59,7 +59,7 @@ class HMC: return params def _update(self, hmc_iters): - for i in xrange(hmc_iters): + for i in range(hmc_iters): self.p[:] += -self.stepsize/2.*self.model._transform_gradients(self.model.objective_function_gradients()) self.model.optimizer_array = self.model.optimizer_array + self.stepsize*np.dot(self.Minv, self.p) self.p[:] += -self.stepsize/2.*self.model._transform_gradients(self.model.objective_function_gradients()) @@ -82,7 +82,7 @@ class HMC_shortcut: def sample(self, m_iters=1000, hmc_iters=20): params = np.empty((m_iters,self.p.size)) - for i in xrange(m_iters): + for i in range(m_iters): # sample a stepsize from the uniform distribution stepsize = np.exp(np.random.rand()*(self.stepsize_range[1]-self.stepsize_range[0])+self.stepsize_range[0]) self.p[:] = np.random.multivariate_normal(np.zeros(self.p.size),self.M) diff --git a/GPy/kern/_src/coregionalize.py b/GPy/kern/_src/coregionalize.py index 1b16fd73..5b91de1c 100644 --- a/GPy/kern/_src/coregionalize.py +++ b/GPy/kern/_src/coregionalize.py @@ -166,7 +166,7 @@ class Coregionalize(Kern): def update_gradients_diag(self, dL_dKdiag, X): index = np.asarray(X, dtype=np.int).flatten() - dL_dKdiag_small = np.array([dL_dKdiag[index==i].sum() for i in xrange(self.output_dim)]) + dL_dKdiag_small = np.array([dL_dKdiag[index==i].sum() for i in range(self.output_dim)]) self.W.gradient = 2.*self.W*dL_dKdiag_small[:, None] self.kappa.gradient = dL_dKdiag_small diff --git a/GPy/kern/_src/splitKern.py b/GPy/kern/_src/splitKern.py index 18771cb0..051e492b 100644 --- a/GPy/kern/_src/splitKern.py +++ b/GPy/kern/_src/splitKern.py @@ -104,7 +104,7 @@ class SplitKern(CombinationKernel): assert len(slices2)<=2, 'The Split kernel only support two different indices' target = np.zeros((X.shape[0], X2.shape[0])) # diagonal blocks - [[target.__setitem__((s,s2), self.kern.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices[i], slices2[i])] for i in xrange(min(len(slices),len(slices2)))] + [[target.__setitem__((s,s2), self.kern.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices[i], slices2[i])] for i in range(min(len(slices),len(slices2)))] if len(slices)>1: [target.__setitem__((s,s2), self.kern_cross.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices[1], slices2[0])] if len(slices2)>1: @@ -135,7 +135,7 @@ class SplitKern(CombinationKernel): else: assert dL_dK.shape==(X.shape[0],X2.shape[0]) slices2 = index_to_slices(X2[:,self.index_dim]) - [[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s,s2 in itertools.product(slices[i], slices2[i])] for i in xrange(min(len(slices),len(slices2)))] + [[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s,s2 in itertools.product(slices[i], slices2[i])] for i in range(min(len(slices),len(slices2)))] if len(slices)>1: [collate_grads(dL_dK[s,s2], X[s], X2[s2], True) for s,s2 in itertools.product(slices[1], slices2[0])] if len(slices2)>1: diff --git a/GPy/models/ss_gplvm.py b/GPy/models/ss_gplvm.py index a61ad2a0..b8e1c72d 100644 --- a/GPy/models/ss_gplvm.py +++ b/GPy/models/ss_gplvm.py @@ -71,7 +71,7 @@ class SSGPLVM(SparseGP_MPI): self.link_parameter(self.X, index=0) if self.group_spike: - [self.X.gamma[:,i].tie('tieGamma'+str(i)) for i in xrange(self.X.gamma.shape[1])] # Tie columns together + [self.X.gamma[:,i].tie('tieGamma'+str(i)) for i in range(self.X.gamma.shape[1])] # Tie columns together def set_X_gradients(self, X, X_grad): """Set the gradients of the posterior distribution of X in its specific form.""" diff --git a/GPy/models/ss_mrd.py b/GPy/models/ss_mrd.py index 036ac095..bd2efce0 100644 --- a/GPy/models/ss_mrd.py +++ b/GPy/models/ss_mrd.py @@ -19,10 +19,10 @@ class SSMRD(Model): name='model_'+str(i)) for i,y in enumerate(Ylist)] self.add_parameters(*(self.models)) - [[[self.models[m].X.mean[i,j:j+1].tie('mean_'+str(i)+'_'+str(j)) for m in xrange(len(self.models))] for j in xrange(self.models[0].X.mean.shape[1])] - for i in xrange(self.models[0].X.mean.shape[0])] - [[[self.models[m].X.variance[i,j:j+1].tie('var_'+str(i)+'_'+str(j)) for m in xrange(len(self.models))] for j in xrange(self.models[0].X.variance.shape[1])] - for i in xrange(self.models[0].X.variance.shape[0])] + [[[self.models[m].X.mean[i,j:j+1].tie('mean_'+str(i)+'_'+str(j)) for m in range(len(self.models))] for j in range(self.models[0].X.mean.shape[1])] + for i in range(self.models[0].X.mean.shape[0])] + [[[self.models[m].X.variance[i,j:j+1].tie('var_'+str(i)+'_'+str(j)) for m in range(len(self.models))] for j in range(self.models[0].X.variance.shape[1])] + for i in range(self.models[0].X.variance.shape[0])] self.updates = True @@ -31,4 +31,4 @@ class SSMRD(Model): self._log_marginal_likelihood = sum([m._log_marginal_likelihood for m in self.models]) def log_likelihood(self): - return self._log_marginal_likelihood \ No newline at end of file + return self._log_marginal_likelihood diff --git a/GPy/plotting/matplot_dep/img_plots.py b/GPy/plotting/matplot_dep/img_plots.py index 453a904d..5346545d 100644 --- a/GPy/plotting/matplot_dep/img_plots.py +++ b/GPy/plotting/matplot_dep/img_plots.py @@ -50,8 +50,8 @@ def plot_2D_images(figure, arr, symmetric=False, pad=None, zoom=None, mode=None, buf = np.ones((y_size*fig_nrows+pad*(fig_nrows-1), x_size*fig_ncols+pad*(fig_ncols-1), 3),dtype=arr.dtype) - for y in xrange(fig_nrows): - for x in xrange(fig_ncols): + for y in range(fig_nrows): + for x in range(fig_ncols): if y*fig_ncols+x Date: Sat, 7 Mar 2015 08:42:05 +0000 Subject: [PATCH 80/99] Fix README.md formatting --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2268f2cb..8c6c4397 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ Work is underway to make GPy run on Python 3. * Python 2.x compatibility is currently broken in this fork * All tests in the testsuite now run on Python3. To see this for yourself, in Ubuntu 14.04, you can do + git clone https://github.com/mikecroucher/GPy.git cd GPy git checkout devel From f6b71629e70644e640744dfd856cbd12316bf4d6 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Sat, 7 Mar 2015 08:43:13 +0000 Subject: [PATCH 81/99] Fix README.md formatting --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 8c6c4397..60dcbe24 100644 --- a/README.md +++ b/README.md @@ -14,8 +14,9 @@ Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GP Work is underway to make GPy run on Python 3. * Python 2.x compatibility is currently broken in this fork -* All tests in the testsuite now run on Python3. To see this for yourself, in Ubuntu 14.04, you can do +* All tests in the testsuite now run on Python3. +To see this for yourself, in Ubuntu 14.04, you can do git clone https://github.com/mikecroucher/GPy.git cd GPy From e5587cf234684d095516a5881f4d829deceb2cd2 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Fri, 13 Mar 2015 14:43:49 +0000 Subject: [PATCH 82/99] Used 'six' to support Py3 and Py2 simultaneously --- GPy/core/parameterization/parameterized.py | 10 ++++++---- GPy/kern/_src/kern.py | 11 ++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 27ecbc1c..691bf4a7 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -1,7 +1,7 @@ # Copyright (c) 2014, Max Zwiessele, James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) - +import six # For metaclass support in Python 2 and 3 simultaneously import numpy; np = numpy import itertools from re import compile, _pattern_type @@ -27,7 +27,8 @@ class ParametersChangedMeta(type): self.parameters_changed() return self -class Parameterized(Parameterizable,metaclass=ParametersChangedMeta): +@six.add_metaclass(ParametersChangedMeta) +class Parameterized(Parameterizable): """ Parameterized class @@ -73,8 +74,9 @@ class Parameterized(Parameterizable,metaclass=ParametersChangedMeta): # Metaclass for parameters changed after init. # This makes sure, that parameters changed will always be called after __init__ # **Never** call parameters_changed() yourself - #This is ignored in Python 3 -- you need to put the meta class in the - __metaclass__ = ParametersChangedMeta + #This is ignored in Python 3 -- you need to put the meta class in the function definition. + #__metaclass__ = ParametersChangedMeta + #The six module is used to support both Python 2 and 3 simultaneously #=========================================================================== def __init__(self, name=None, parameters=[], *a, **kw): super(Parameterized, self).__init__(name=name, *a, **kw) diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 2e8ebcb0..e63ddad4 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -8,15 +8,16 @@ from .kernel_slice_operations import KernCallsViaSlicerMeta from ...util.caching import Cache_this from GPy.core.parameterization.observable_array import ObsAr from functools import reduce +import six - - -class Kern(Parameterized,metaclass=KernCallsViaSlicerMeta): +@six.add_metaclass(KernCallsViaSlicerMeta) +class Kern(Parameterized): #=========================================================================== # This adds input slice support. The rather ugly code for slicing can be # found in kernel_slice_operations - # __mataclass__ is ignored in Python 3 - needs to be put in the function definiton - __metaclass__ = KernCallsViaSlicerMeta + # __meataclass__ is ignored in Python 3 - needs to be put in the function definiton + #__metaclass__ = KernCallsViaSlicerMeta + #Here, we use the Python module six to support Py3 and Py2 simultaneously #=========================================================================== _support_GPU=False def __init__(self, input_dim, active_dims, name, useGPU=False, *a, **kw): From 27c65003d25bcf79825c8847fe173254e225ed44 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Wed, 1 Apr 2015 13:23:06 +0100 Subject: [PATCH 83/99] Working in Py2 but broken in Py3 --- GPy/util/choleskies.py | 104 ++++++++++++++++++++--------------------- GPy/util/misc.py | 2 +- 2 files changed, 53 insertions(+), 53 deletions(-) diff --git a/GPy/util/choleskies.py b/GPy/util/choleskies.py index 7e068933..b64beae1 100644 --- a/GPy/util/choleskies.py +++ b/GPy/util/choleskies.py @@ -2,7 +2,7 @@ # Licensed under the GNU GPL version 3.0 import numpy as np -#from scipy import weave +from scipy import weave from . import linalg def safe_root(N): @@ -12,58 +12,58 @@ def safe_root(N): raise ValueError("N is not square!") return j -#def flat_to_triang(flat): -# """take a matrix N x D and return a M X M x D array where -# -# N = M(M+1)/2 -# -# the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat. -# """ -# N, D = flat.shape -# M = (-1 + safe_root(8*N+1))/2 -# ret = np.zeros((M, M, D)) -# flat = np.ascontiguousarray(flat) -# -# code = """ -# int count = 0; -# for(int m=0; m 1 and df_dg.shape[-1] > 1: - import ipdb; ipdb.set_trace() # XXX BREAKPOINT + #import ipdb; ipdb.set_trace() # XXX BREAKPOINT raise NotImplementedError('Not implemented for matricies yet') return df_dg * dg_dx From 985b2ea70c2fb7358e7101309c4472733f269834 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Wed, 1 Apr 2015 15:42:49 +0100 Subject: [PATCH 84/99] Added (SLOW) Pure Python implementations of flat_to_triang and triang_to_flat --- .../var_dtc_parallel.py | 2 +- GPy/testing/mapping_tests.py | 7 +-- GPy/util/choleskies.py | 47 +++++++++++++++++-- GPy/util/linalg.py | 4 +- 4 files changed, 48 insertions(+), 12 deletions(-) diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index c546a4a1..4b884d4c 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -170,7 +170,7 @@ class VarDTC_minibatch(LatentFunctionInference): Kmm = kern.K(Z).copy() diag.add(Kmm, self.const_jitter) if not np.isfinite(Kmm).all(): - print Kmm + print(Kmm) Lm = jitchol(Kmm) LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right') diff --git a/GPy/testing/mapping_tests.py b/GPy/testing/mapping_tests.py index 2e32dad3..2ff0e2d8 100644 --- a/GPy/testing/mapping_tests.py +++ b/GPy/testing/mapping_tests.py @@ -26,11 +26,6 @@ class MappingGradChecker(GPy.core.Model): self.mapping.update_gradients(self.dL_dY, self.X) - - - - - class MappingTests(unittest.TestCase): def test_kernelmapping(self): @@ -68,5 +63,5 @@ class MappingTests(unittest.TestCase): if __name__ == "__main__": - print "Running unit tests, please be (very) patient..." + print("Running unit tests, please be (very) patient...") unittest.main() diff --git a/GPy/util/choleskies.py b/GPy/util/choleskies.py index b64beae1..37ac7211 100644 --- a/GPy/util/choleskies.py +++ b/GPy/util/choleskies.py @@ -2,8 +2,13 @@ # Licensed under the GNU GPL version 3.0 import numpy as np -from scipy import weave from . import linalg +from .config import config + +try: + from scipy import weave +except ImportError: + config.set('weave', 'working', 'False') def safe_root(N): i = np.sqrt(N) @@ -12,12 +17,13 @@ def safe_root(N): raise ValueError("N is not square!") return j -def flat_to_triang(flat): +def _flat_to_triang_weave(flat): """take a matrix N x D and return a M X M x D array where N = M(M+1)/2 the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat. + This is the weave implementation """ N, D = flat.shape M = (-1 + safe_root(8*N+1))/2 @@ -41,7 +47,24 @@ def flat_to_triang(flat): weave.inline(code, ['flat', 'ret', 'D', 'M']) return ret -def triang_to_flat(L): +def _flat_to_triang_pure(flat_mat): + N, D = flat_mat.shape + M = (-1 + safe_root(8*N+1))//2 + ret = np.zeros((M, M, D)) + count = 0 + for m in range(M): + for mm in range(m+1): + for d in range(D): + ret.flat[d + m*D*M + mm*D] = flat_mat.flat[count]; + count = count+1 + return ret + +if config.getboolean('weave', 'working'): + flat_to_triang = _flat_to_triang_weave +else: + flat_to_triang = _flat_to_triang_pure + +def _triang_to_flat_weave(L): M, _, D = L.shape L = np.ascontiguousarray(L) # should do nothing if L was created by flat_to_triang @@ -65,6 +88,24 @@ def triang_to_flat(L): weave.inline(code, ['flat', 'L', 'D', 'M']) return flat +def _triang_to_flat_pure(L): + M, _, D = L.shape + + N = M*(M+1)//2 + flat = np.empty((N, D)) + count = 0; + for m in range(M): + for mm in range(m+1): + for d in range(D): + flat.flat[count] = L.flat[d + m*D*M + mm*D]; + count = count +1 + return flat + +if config.getboolean('weave', 'working'): + triang_to_flat = _triang_to_flat_weave +else: + triang_to_flat = _triang_to_flat_pure + def triang_to_cov(L): return np.dstack([np.dot(L[:,:,i], L[:,:,i].T) for i in range(L.shape[-1])]) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index ec83810f..8ac5418f 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -102,14 +102,14 @@ def jitchol(A, maxtries=5): num_tries = 1 while num_tries <= maxtries and np.isfinite(jitter): try: - print jitter + print(jitter) L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True) return L except: jitter *= 10 finally: num_tries += 1 - raise linalg.LinAlgError, "not positive definite, even with jitter." + raise linalg.LinAlgError("not positive definite, even with jitter.") import traceback try: raise except: From 620a7842b3f908cdab26391529e0e32591978f99 Mon Sep 17 00:00:00 2001 From: Mike Croucher Date: Thu, 2 Apr 2015 07:35:24 +0100 Subject: [PATCH 85/99] Fix printing error --- GPy/core/verbose_optimization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/core/verbose_optimization.py b/GPy/core/verbose_optimization.py index a2c1598e..a5fb019e 100644 --- a/GPy/core/verbose_optimization.py +++ b/GPy/core/verbose_optimization.py @@ -151,7 +151,7 @@ class VerboseOptimization(object): if not self.ipython_notebook: print() print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)) - print('Optimization status: {0:.5g}'.format(self.status)) + print('Optimization status: {0}'.format(self.status)) print() elif self.clear: self.hor_align.close() From e658637c18acdb59ee5f4ceacce106e2b38cb6b4 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Wed, 8 Apr 2015 10:57:20 +0100 Subject: [PATCH 86/99] Added Y_metadata to log_predictive_density --- GPy/likelihoods/likelihood.py | 41 +++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 1295245c..4f3f2e37 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -70,7 +70,7 @@ class Likelihood(Parameterized): """ raise NotImplementedError - def log_predictive_density(self, y_test, mu_star, var_star): + def log_predictive_density(self, y_test, mu_star, var_star, Y_metadata=None): """ Calculation of the log predictive density @@ -87,13 +87,46 @@ class Likelihood(Parameterized): assert y_test.shape==mu_star.shape assert y_test.shape==var_star.shape assert y_test.shape[1] == 1 - def integral_generator(y, m, v): + + flat_y_test = y_test.flatten() + flat_mu_star = mu_star.flatten() + flat_var_star = var_star.flatten() + + if Y_metadata is not None: + #Need to zip individual elements of Y_metadata aswell + Y_metadata_flat = {} + if Y_metadata is not None: + for key, val in Y_metadata.items(): + Y_metadata_flat[key] = np.atleast_1d(val).reshape(-1,1) + + zipped_values = [] + + for i in range(y_test.shape[0]): + y_m = {} + for key, val in Y_metadata_flat.items(): + if np.isscalar(val) or val.shape[0] == 1: + y_m[key] = val + else: + #Won't broadcast yet + y_m[key] = val[i] + zipped_values.append((flat_y_test[i], flat_mu_star[i], flat_var_star[i], y_m)) + else: + #Otherwise just pass along None's + zipped_values = zip(flat_y_test, flat_mu_star, flat_var_star, [None]*y_test.shape[0]) + + def integral_generator(y, m, v, y_m): """Generate a function which can be integrated to give p(Y*|Y) = int p(Y*|f*)p(f*|Y) df*""" def f(f_star): - return self.pdf(f_star, y)*np.exp(-(1./(2*v))*np.square(m-f_star)) + #exponent = np.exp(-(1./(2*v))*np.square(m-f_star)) + #from GPy.util.misc import safe_exp + #exponent = safe_exp(exponent) + #return self.pdf(f_star, y, y_m)*exponent + + #More stable in the log space + return np.exp(self.logpdf(f_star, y, y_m) -(1./(2*v))*np.square(m-f_star)) return f - scaled_p_ystar, accuracy = zip(*[quad(integral_generator(y, m, v), -np.inf, np.inf) for y, m, v in zip(y_test.flatten(), mu_star.flatten(), var_star.flatten())]) + scaled_p_ystar, accuracy = zip(*[quad(integral_generator(y, m, v, y_m), -np.inf, np.inf) for y, m, v, y_m in zipped_values]) scaled_p_ystar = np.array(scaled_p_ystar).reshape(-1,1) p_ystar = scaled_p_ystar/np.sqrt(2*np.pi*var_star) return np.log(p_ystar) From 1e30ffd73038168e6e793c4315aefc74c129ada3 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Thu, 9 Apr 2015 15:42:02 +0100 Subject: [PATCH 87/99] speed ups for normal cdf --- GPy/inference/optimization/optimization.py | 4 ++ GPy/likelihoods/bernoulli.py | 15 +++-- GPy/likelihoods/likelihood.py | 12 +++- GPy/likelihoods/link_functions.py | 25 +++----- GPy/testing/model_tests.py | 2 +- GPy/util/misc.py | 4 +- GPy/util/univariate_Gaussian.py | 72 ++-------------------- 7 files changed, 38 insertions(+), 96 deletions(-) diff --git a/GPy/inference/optimization/optimization.py b/GPy/inference/optimization/optimization.py index aa9be793..5aa2ed03 100644 --- a/GPy/inference/optimization/optimization.py +++ b/GPy/inference/optimization/optimization.py @@ -140,6 +140,10 @@ class opt_lbfgsb(Optimizer): self.funct_eval = opt_result[2]['funcalls'] self.status = rcstrings[opt_result[2]['warnflag']] + #a more helpful error message is available in opt_result in the Error case + if opt_result[2]['warnflag']==2: + self.status = 'Error' + opt_result[2]['task'] + class opt_simplex(Optimizer): def __init__(self, *args, **kwargs): Optimizer.__init__(self, *args, **kwargs) diff --git a/GPy/likelihoods/bernoulli.py b/GPy/likelihoods/bernoulli.py index f5690aa4..2febda96 100644 --- a/GPy/likelihoods/bernoulli.py +++ b/GPy/likelihoods/bernoulli.py @@ -2,10 +2,10 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf +from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf + import link_functions from likelihood import Likelihood -from scipy import stats class Bernoulli(Likelihood): """ @@ -81,19 +81,18 @@ class Bernoulli(Likelihood): if isinstance(self.gp_link, link_functions.Probit): if gh_points is None: - gh_x, gh_w = np.polynomial.hermite.hermgauss(20) + gh_x, gh_w = self._gh_points() else: gh_x, gh_w = gh_points - from scipy import stats shape = m.shape m,v,Y = m.flatten(), v.flatten(), Y.flatten() Ysign = np.where(Y==1,1,-1) X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + (m*Ysign)[:,None] - p = stats.norm.cdf(X) + p = std_norm_cdf(X) p = np.clip(p, 1e-9, 1.-1e-9) # for numerical stability - N = stats.norm.pdf(X) + N = std_norm_pdf(X) F = np.log(p).dot(gh_w) NoverP = N/p dF_dm = (NoverP*Ysign[:,None]).dot(gh_w) @@ -106,10 +105,10 @@ class Bernoulli(Likelihood): def predictive_mean(self, mu, variance, Y_metadata=None): if isinstance(self.gp_link, link_functions.Probit): - return stats.norm.cdf(mu/np.sqrt(1+variance)) + return std_norm_cdf(mu/np.sqrt(1+variance)) elif isinstance(self.gp_link, link_functions.Heaviside): - return stats.norm.cdf(mu/np.sqrt(variance)) + return std_norm_cdf(mu/np.sqrt(variance)) else: raise NotImplementedError diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 4f3f2e37..9f2f3e7a 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt) +# Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np @@ -165,6 +165,13 @@ class Likelihood(Parameterized): return z, mean, variance + #only compute gh points if required + __gh_points = None + def _gh_points(self): + if self.__gh_points is None: + self.__gh_points = np.polynomial.hermite.hermgauss(20) + return self.__gh_points + def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None): """ Use Gauss-Hermite Quadrature to compute @@ -177,10 +184,9 @@ class Likelihood(Parameterized): if no gh_points are passed, we construct them using defualt options """ - #May be broken if gh_points is None: - gh_x, gh_w = np.polynomial.hermite.hermgauss(20) + gh_x, gh_w = self._gh_points() else: gh_x, gh_w = gh_points diff --git a/GPy/likelihoods/link_functions.py b/GPy/likelihoods/link_functions.py index a4ddc760..6b297f92 100644 --- a/GPy/likelihoods/link_functions.py +++ b/GPy/likelihoods/link_functions.py @@ -1,10 +1,9 @@ -# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt) +# Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from scipy import stats +from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf import scipy as sp -from GPy.util.univariate_Gaussian import std_norm_pdf,std_norm_cdf,inv_std_norm_cdf _exp_lim_val = np.finfo(np.float64).max _lim_val = np.log(_exp_lim_val) @@ -64,13 +63,12 @@ class Identity(GPTransformation): def d3transf_df3(self,f): return np.zeros_like(f) - class Probit(GPTransformation): """ .. math:: g(f) = \\Phi^{-1} (mu) - + """ def transf(self,f): return std_norm_cdf(f) @@ -79,13 +77,10 @@ class Probit(GPTransformation): return std_norm_pdf(f) def d2transf_df2(self,f): - #FIXME return -f * std_norm_pdf(f) def d3transf_df3(self,f): - #FIXME - f2 = f**2 - return -(1/(np.sqrt(2*np.pi)))*np.exp(-0.5*(f2))*(1-f2) + return (np.square(f)-1.)*std_norm_pdf(f) class Cloglog(GPTransformation): @@ -98,7 +93,7 @@ class Cloglog(GPTransformation): or f = \log (-\log(1-p)) - + """ def transf(self,f): return 1-np.exp(-np.exp(f)) @@ -123,16 +118,16 @@ class Log(GPTransformation): """ def transf(self,f): - return np.exp(np.clip(f, -_lim_val, _lim_val)) + return np.exp(np.clip(f, -np.inf, _lim_val)) def dtransf_df(self,f): - return np.exp(np.clip(f, -_lim_val, _lim_val)) + return np.exp(np.clip(f, -np.inf, _lim_val)) def d2transf_df2(self,f): - return np.exp(np.clip(f, -_lim_val, _lim_val)) + return np.exp(np.clip(f, -np.inf, _lim_val)) def d3transf_df3(self,f): - return np.exp(np.clip(f, -_lim_val, _lim_val)) + return np.exp(np.clip(f, -np.inf, _lim_val)) class Log_ex_1(GPTransformation): """ @@ -174,7 +169,7 @@ class Heaviside(GPTransformation): .. math:: - g(f) = I_{x \\in A} + g(f) = I_{x \\geq 0} """ def transf(self,f): diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 559014f7..5950de08 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -476,7 +476,7 @@ class GradientTests(np.testing.TestCase): likelihood = GPy.likelihoods.MixedNoise(likelihoods_list=likelihoods_list) m = GPy.core.SparseGP(X, Y, X[np.random.choice(num_obs, 10)], kern, likelihood, - GPy.inference.latent_function_inference.VarDTC(), + inference_method=GPy.inference.latent_function_inference.VarDTC(), Y_metadata=Y_metadata) self.assertTrue(m.checkgrad()) diff --git a/GPy/util/misc.py b/GPy/util/misc.py index 99bd62b3..84bf4dc1 100644 --- a/GPy/util/misc.py +++ b/GPy/util/misc.py @@ -23,7 +23,7 @@ def chain_1(df_dg, dg_dx): """ if np.all(dg_dx==1.): return df_dg - if len(df_dg) > 1 and df_dg.shape[-1] > 1: + if len(df_dg) > 1 and len(df_dg.shape)>1 and df_dg.shape[-1] > 1: import ipdb; ipdb.set_trace() # XXX BREAKPOINT raise NotImplementedError('Not implemented for matricies yet') return df_dg * dg_dx @@ -37,7 +37,7 @@ def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2): """ if np.all(dg_dx==1.) and np.all(d2g_dx2 == 0): return d2f_dg2 - if len(d2f_dg2) > 1 and d2f_dg2.shape[-1] > 1: + if len(d2f_dg2) > 1 and len(d2f_dg2.shape)>1 and d2f_dg2.shape[-1] > 1: raise NotImplementedError('Not implemented for matricies yet') #dg_dx_2 = np.clip(dg_dx, 1e-12, _lim_val_square)**2 dg_dx_2 = dg_dx**2 diff --git a/GPy/util/univariate_Gaussian.py b/GPy/util/univariate_Gaussian.py index 09b2e99c..79864f86 100644 --- a/GPy/util/univariate_Gaussian.py +++ b/GPy/util/univariate_Gaussian.py @@ -1,77 +1,15 @@ # Copyright (c) 2012, 2013 Ricardo Andrade +# Copyright (c) 2015 James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np from scipy import weave +from scipy.special import ndtr as std_norm_cdf +#define a standard normal pdf +_sqrt_2pi = np.sqrt(2*np.pi) def std_norm_pdf(x): - """Standard Gaussian density function""" - return 1./np.sqrt(2.*np.pi)*np.exp(-.5*x**2) - -def std_norm_cdf(x): - """ - Cumulative standard Gaussian distribution - Based on Abramowitz, M. and Stegun, I. (1970) - """ - x_shape = np.asarray(x).shape - - if len(x_shape) == 0 or x_shape[0] == 1: - sign = np.sign(x) - x *= sign - x /= np.sqrt(2.) - t = 1.0/(1.0 + 0.3275911*x) - erf = 1. - np.exp(-x**2)*t*(0.254829592 + t*(-0.284496736 + t*(1.421413741 + t*(-1.453152027 + t*(1.061405429))))) - cdf_x = 0.5*(1.0 + sign*erf) - return cdf_x - else: - x = np.atleast_1d(x).copy() - cdf_x = np.zeros_like(x) - sign = np.ones_like(x) - neg_x_ind = x<0 - sign[neg_x_ind] = -1.0 - x[neg_x_ind] = -x[neg_x_ind] - x /= np.sqrt(2.) - t = 1.0/(1.0 + 0.3275911*x) - erf = 1. - np.exp(-x**2)*t*(0.254829592 + t*(-0.284496736 + t*(1.421413741 + t*(-1.453152027 + t*(1.061405429))))) - cdf_x = 0.5*(1.0 + sign*erf) - cdf_x = cdf_x.reshape(x_shape) - return cdf_x - -def std_norm_cdf_weave(x): - """ - Cumulative standard Gaussian distribution - Based on Abramowitz, M. and Stegun, I. (1970) - - A weave implementation of std_norm_cdf, which is faster. this is unused, - because of the difficulties of a weave dependency. (see github issue #94) - - """ - #Generalize for many x - x = np.asarray(x).copy() - cdf_x = np.zeros_like(x) - N = x.size - support_code = "#include " - code = """ - - double sign, t, erf; - for (int i=0; i Date: Fri, 10 Apr 2015 09:58:11 +0100 Subject: [PATCH 88/99] Added numerical clipping --- GPy/util/misc.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/GPy/util/misc.py b/GPy/util/misc.py index 66b7b3b9..37e19b9f 100644 --- a/GPy/util/misc.py +++ b/GPy/util/misc.py @@ -2,13 +2,14 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np +from scipy.special import cbrt from .config import * _lim_val = np.finfo(np.float64).max _lim_val_exp = np.log(_lim_val) _lim_val_square = np.sqrt(_lim_val) -_lim_val_cube = np.power(_lim_val, -3) +_lim_val_cube = cbrt(_lim_val) def safe_exp(f): clip_f = np.clip(f, -np.inf, _lim_val_exp) @@ -39,8 +40,8 @@ def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2): return d2f_dg2 if len(d2f_dg2) > 1 and d2f_dg2.shape[-1] > 1: raise NotImplementedError('Not implemented for matricies yet') - #dg_dx_2 = np.clip(dg_dx, 1e-12, _lim_val_square)**2 - dg_dx_2 = dg_dx**2 + dg_dx_2 = np.clip(dg_dx, -np.inf, _lim_val_square)**2 + #dg_dx_2 = dg_dx**2 return d2f_dg2*(dg_dx_2) + df_dg*d2g_dx2 def chain_3(d3f_dg3, dg_dx, d2f_dg2, d2g_dx2, df_dg, d3g_dx3): @@ -55,8 +56,8 @@ def chain_3(d3f_dg3, dg_dx, d2f_dg2, d2g_dx2, df_dg, d3g_dx3): if ( (len(d2f_dg2) > 1 and d2f_dg2.shape[-1] > 1) or (len(d3f_dg3) > 1 and d3f_dg3.shape[-1] > 1)): raise NotImplementedError('Not implemented for matricies yet') - #dg_dx_3 = np.clip(dg_dx, 1e-12, _lim_val_cube)**3 - dg_dx_3 = dg_dx**3 + dg_dx_3 = np.clip(dg_dx, -np.inf, _lim_val_cube)**3 + #dg_dx_3 = dg_dx**3 return d3f_dg3*(dg_dx_3) + 3*d2f_dg2*dg_dx*d2g_dx2 + df_dg*d3g_dx3 def opt_wrapper(m, **kwargs): From ef2b11c799070fec46e20aa8d63aa323a6be6d8a Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 10 Apr 2015 10:40:18 +0100 Subject: [PATCH 89/99] Minor commenting changes --- GPy/core/gp.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index bbd3939b..1d2c9e2d 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -296,7 +296,7 @@ class GP(Model): :type size: int. :param full_cov: whether to return the full covariance matrix, or just the diagonal. :type full_cov: bool. - :returns: Ysim: set of simulations + :returns: fsim: set of simulations :rtype: np.ndarray (N x samples) """ m, v = self._raw_predict(X, full_cov=full_cov) @@ -304,11 +304,11 @@ class GP(Model): m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v) v = v.reshape(m.size,-1) if len(v.shape)==3 else v if not full_cov: - Ysim = np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T + fsim = np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T else: - Ysim = np.random.multivariate_normal(m.flatten(), v, size).T + fsim = np.random.multivariate_normal(m.flatten(), v, size).T - return Ysim + return fsim def posterior_samples(self, X, size=10, full_cov=False, Y_metadata=None): """ @@ -324,7 +324,7 @@ class GP(Model): :type noise_model: integer. :returns: Ysim: set of simulations, a Numpy array (N x samples). """ - Ysim = self.posterior_samples_f(X, size, full_cov=full_cov) + fsim = self.posterior_samples_f(X, size, full_cov=full_cov) Ysim = self.likelihood.samples(Ysim, Y_metadata) return Ysim From 034d141d6353ab22d5d07ece83dd69cab0de93ae Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 10 Apr 2015 10:43:00 +0100 Subject: [PATCH 90/99] Fix typo --- GPy/core/gp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 1d2c9e2d..dc1519e1 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -325,7 +325,7 @@ class GP(Model): :returns: Ysim: set of simulations, a Numpy array (N x samples). """ fsim = self.posterior_samples_f(X, size, full_cov=full_cov) - Ysim = self.likelihood.samples(Ysim, Y_metadata) + Ysim = self.likelihood.samples(fsim, Y_metadata) return Ysim From 5c9587404d27af60e5be0df5f630d1d4e02fd064 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 10 Apr 2015 14:58:02 +0100 Subject: [PATCH 91/99] Added some numerical stability to link functions with tests for link functions --- GPy/likelihoods/link_functions.py | 59 +++++++----- GPy/testing/link_function_tests.py | 143 +++++++++++++++++++++++++++++ GPy/util/misc.py | 22 ++++- 3 files changed, 197 insertions(+), 27 deletions(-) create mode 100644 GPy/testing/link_function_tests.py diff --git a/GPy/likelihoods/link_functions.py b/GPy/likelihoods/link_functions.py index 60e260e7..03495c7e 100644 --- a/GPy/likelihoods/link_functions.py +++ b/GPy/likelihoods/link_functions.py @@ -5,9 +5,8 @@ import numpy as np from scipy import stats import scipy as sp from GPy.util.univariate_Gaussian import std_norm_pdf,std_norm_cdf,inv_std_norm_cdf - -_exp_lim_val = np.finfo(np.float64).max -_lim_val = np.log(_exp_lim_val) +from scipy.special import cbrt +from ..util.misc import safe_exp, safe_square, safe_cube, safe_quad, safe_three_times class GPTransformation(object): """ @@ -70,7 +69,7 @@ class Probit(GPTransformation): .. math:: g(f) = \\Phi^{-1} (mu) - + """ def transf(self,f): return std_norm_cdf(f) @@ -84,7 +83,7 @@ class Probit(GPTransformation): def d3transf_df3(self,f): #FIXME - f2 = f**2 + f2 = safe_square(f) return -(1/(np.sqrt(2*np.pi)))*np.exp(-0.5*(f2))*(1-f2) @@ -98,22 +97,26 @@ class Cloglog(GPTransformation): or f = \log (-\log(1-p)) - + """ def transf(self,f): - return 1-np.exp(-np.exp(f)) + ef = safe_exp(f) + return 1-np.exp(-ef) def dtransf_df(self,f): - return np.exp(f-np.exp(f)) + ef = safe_exp(f) + return np.exp(f-ef) def d2transf_df2(self,f): - ef = np.exp(f) + ef = safe_exp(f) return -np.exp(f-ef)*(ef-1.) def d3transf_df3(self,f): - ef = np.exp(f) - return np.exp(f-ef)*(1.-3*ef + ef**2) - + ef = safe_exp(f) + ef2 = safe_square(ef) + three_times_ef = safe_three_times(ef) + r_val = np.exp(f-ef)*(1.-three_times_ef + ef2) + return r_val class Log(GPTransformation): """ @@ -123,16 +126,16 @@ class Log(GPTransformation): """ def transf(self,f): - return np.exp(np.clip(f, -_lim_val, _lim_val)) + return safe_exp(f) def dtransf_df(self,f): - return np.exp(np.clip(f, -_lim_val, _lim_val)) + return safe_exp(f) def d2transf_df2(self,f): - return np.exp(np.clip(f, -_lim_val, _lim_val)) + return safe_exp(f) def d3transf_df3(self,f): - return np.exp(np.clip(f, -_lim_val, _lim_val)) + return safe_exp(f) class Log_ex_1(GPTransformation): """ @@ -142,17 +145,20 @@ class Log_ex_1(GPTransformation): """ def transf(self,f): - return np.log(1.+np.exp(f)) + return np.log1p(safe_exp(f)) def dtransf_df(self,f): - return np.exp(f)/(1.+np.exp(f)) + ef = safe_exp(f) + return ef/(1.+ef) def d2transf_df2(self,f): - aux = np.exp(f)/(1.+np.exp(f)) + ef = safe_exp(f) + aux = ef/(1.+ef) return aux*(1.-aux) def d3transf_df3(self,f): - aux = np.exp(f)/(1.+np.exp(f)) + ef = safe_exp(f) + aux = ef/(1.+ef) daux_df = aux*(1.-aux) return daux_df - (2.*aux*daux_df) @@ -160,14 +166,17 @@ class Reciprocal(GPTransformation): def transf(self,f): return 1./f - def dtransf_df(self,f): - return -1./(f**2) + def dtransf_df(self, f): + f2 = safe_square(f) + return -1./f2 - def d2transf_df2(self,f): - return 2./(f**3) + def d2transf_df2(self, f): + f3 = safe_cube(f) + return 2./f3 def d3transf_df3(self,f): - return -6./(f**4) + f4 = safe_quad(f) + return -6./f4 class Heaviside(GPTransformation): """ diff --git a/GPy/testing/link_function_tests.py b/GPy/testing/link_function_tests.py new file mode 100644 index 00000000..fb8fba99 --- /dev/null +++ b/GPy/testing/link_function_tests.py @@ -0,0 +1,143 @@ +import numpy as np +import scipy as sp +from scipy.special import cbrt +from GPy.models import GradientChecker +_lim_val = np.finfo(np.float64).max +_lim_val_exp = np.log(_lim_val) +_lim_val_square = np.sqrt(_lim_val) +_lim_val_cube = cbrt(_lim_val) +from GPy.likelihoods.link_functions import Identity, Probit, Cloglog, Log, Log_ex_1, Reciprocal, Heaviside + +class LinkFunctionTests(np.testing.TestCase): + def setUp(self): + self.small_f = np.array([[-1e-4]]) + self.zero_f = np.array([[1e-4]]) + self.mid_f = np.array([[5.0]]) + self.large_f = np.array([[1e4]]) + self.f_lower_lim = np.array(-np.inf) + self.f_upper_lim = np.array(np.inf) + + def check_gradient(self, link_func, lim_of_inf, test_lim=False): + grad = GradientChecker(link_func.transf, link_func.dtransf_df, x0=self.mid_f) + self.assertTrue(grad.checkgrad(verbose=True)) + grad2 = GradientChecker(link_func.dtransf_df, link_func.d2transf_df2, x0=self.mid_f) + self.assertTrue(grad2.checkgrad(verbose=True)) + grad3 = GradientChecker(link_func.d2transf_df2, link_func.d3transf_df3, x0=self.mid_f) + self.assertTrue(grad3.checkgrad(verbose=True)) + + grad = GradientChecker(link_func.transf, link_func.dtransf_df, x0=self.small_f) + self.assertTrue(grad.checkgrad(verbose=True)) + grad2 = GradientChecker(link_func.dtransf_df, link_func.d2transf_df2, x0=self.small_f) + self.assertTrue(grad2.checkgrad(verbose=True)) + grad3 = GradientChecker(link_func.d2transf_df2, link_func.d3transf_df3, x0=self.small_f) + self.assertTrue(grad3.checkgrad(verbose=True)) + + grad = GradientChecker(link_func.transf, link_func.dtransf_df, x0=self.zero_f) + self.assertTrue(grad.checkgrad(verbose=True)) + grad2 = GradientChecker(link_func.dtransf_df, link_func.d2transf_df2, x0=self.zero_f) + self.assertTrue(grad2.checkgrad(verbose=True)) + grad3 = GradientChecker(link_func.d2transf_df2, link_func.d3transf_df3, x0=self.zero_f) + self.assertTrue(grad3.checkgrad(verbose=True)) + + #Do a limit test if the large f value is too large + large_f = np.clip(self.large_f, -np.inf, lim_of_inf-1e-3) + grad = GradientChecker(link_func.transf, link_func.dtransf_df, x0=large_f) + self.assertTrue(grad.checkgrad(verbose=True)) + grad2 = GradientChecker(link_func.dtransf_df, link_func.d2transf_df2, x0=large_f) + self.assertTrue(grad2.checkgrad(verbose=True)) + grad3 = GradientChecker(link_func.d2transf_df2, link_func.d3transf_df3, x0=large_f) + self.assertTrue(grad3.checkgrad(verbose=True)) + + if test_lim: + print "Testing limits" + #Remove some otherwise we are too close to the limit for gradcheck to work effectively + lim_of_inf = lim_of_inf - 1e-4 + grad = GradientChecker(link_func.transf, link_func.dtransf_df, x0=lim_of_inf) + self.assertTrue(grad.checkgrad(verbose=True)) + grad2 = GradientChecker(link_func.dtransf_df, link_func.d2transf_df2, x0=lim_of_inf) + self.assertTrue(grad2.checkgrad(verbose=True)) + grad3 = GradientChecker(link_func.d2transf_df2, link_func.d3transf_df3, x0=lim_of_inf) + self.assertTrue(grad3.checkgrad(verbose=True)) + + def check_overflow(self, link_func, lim_of_inf): + #Check that it does something sensible beyond this limit, + #note this is not checking the value is correct, just that it isn't nan + beyond_lim_of_inf = lim_of_inf + 100.0 + self.assertFalse(np.isinf(link_func.transf(beyond_lim_of_inf))) + self.assertFalse(np.isinf(link_func.dtransf_df(beyond_lim_of_inf))) + self.assertFalse(np.isinf(link_func.d2transf_df2(beyond_lim_of_inf))) + + self.assertFalse(np.isnan(link_func.transf(beyond_lim_of_inf))) + self.assertFalse(np.isnan(link_func.dtransf_df(beyond_lim_of_inf))) + self.assertFalse(np.isnan(link_func.d2transf_df2(beyond_lim_of_inf))) + + def test_log_overflow(self): + link = Log() + lim_of_inf = _lim_val_exp + + np.testing.assert_almost_equal(np.exp(self.mid_f), link.transf(self.mid_f)) + assert np.isinf(np.exp(np.log(self.f_upper_lim))) + #Check the clipping works + np.testing.assert_almost_equal(link.transf(self.f_lower_lim), 0, decimal=5) + #Need to look at most significant figures here rather than the decimals + np.testing.assert_approx_equal(link.transf(self.f_upper_lim), _lim_val, significant=5) + self.check_overflow(link, lim_of_inf) + + #Check that it would otherwise fail + beyond_lim_of_inf = lim_of_inf + 10.0 + old_err_state = np.seterr(over='ignore') + self.assertTrue(np.isinf(np.exp(beyond_lim_of_inf))) + np.seterr(**old_err_state) + + def test_log_ex_1_overflow(self): + link = Log_ex_1() + lim_of_inf = _lim_val_exp + + np.testing.assert_almost_equal(np.log1p(np.exp(self.mid_f)), link.transf(self.mid_f)) + assert np.isinf(np.log1p(np.exp(np.log(self.f_upper_lim)))) + #Check the clipping works + np.testing.assert_almost_equal(link.transf(self.f_lower_lim), 0, decimal=5) + #Need to look at most significant figures here rather than the decimals + np.testing.assert_approx_equal(link.transf(self.f_upper_lim), np.log1p(_lim_val), significant=5) + self.check_overflow(link, lim_of_inf) + + #Check that it would otherwise fail + beyond_lim_of_inf = lim_of_inf + 10.0 + old_err_state = np.seterr(over='ignore') + self.assertTrue(np.isinf(np.log1p(np.exp(beyond_lim_of_inf)))) + np.seterr(**old_err_state) + + + def test_log_gradients(self): + # transf dtransf_df d2transf_df2 d3transf_df3 + link = Log() + lim_of_inf = _lim_val_exp + self.check_gradient(link, lim_of_inf, test_lim=True) + + def test_identity_gradients(self): + link = Identity() + lim_of_inf = _lim_val + #FIXME: Should be able to think of a way to test the limits of this + self.check_gradient(link, lim_of_inf, test_lim=False) + + def test_probit_gradients(self): + link = Probit() + lim_of_inf = _lim_val + self.check_gradient(link, lim_of_inf, test_lim=True) + + def test_Cloglog_gradients(self): + link = Cloglog() + lim_of_inf = _lim_val_exp + self.check_gradient(link, lim_of_inf, test_lim=True) + + def test_Log_ex_1_gradients(self): + link = Log_ex_1() + lim_of_inf = _lim_val_exp + self.check_gradient(link, lim_of_inf, test_lim=True) + self.check_overflow(link, lim_of_inf) + + def test_reciprocal_gradients(self): + link = Reciprocal() + lim_of_inf = _lim_val + #Does not work with much smaller values, and values closer to zero than 1e-5 + self.check_gradient(link, lim_of_inf, test_lim=True) diff --git a/GPy/util/misc.py b/GPy/util/misc.py index 37e19b9f..3b88da48 100644 --- a/GPy/util/misc.py +++ b/GPy/util/misc.py @@ -6,15 +6,33 @@ from scipy.special import cbrt from .config import * _lim_val = np.finfo(np.float64).max - _lim_val_exp = np.log(_lim_val) _lim_val_square = np.sqrt(_lim_val) -_lim_val_cube = cbrt(_lim_val) +#_lim_val_cube = cbrt(_lim_val) +_lim_val_cube = np.nextafter(_lim_val**(1/3.0), -np.inf) +_lim_val_quad = np.nextafter(_lim_val**(1/4.0), -np.inf) +_lim_val_three_times = np.nextafter(_lim_val/3.0, -np.inf) def safe_exp(f): clip_f = np.clip(f, -np.inf, _lim_val_exp) return np.exp(clip_f) +def safe_square(f): + f = np.clip(f, -np.inf, _lim_val_square) + return f**2 + +def safe_cube(f): + f = np.clip(f, -np.inf, _lim_val_cube) + return f**3 + +def safe_quad(f): + f = np.clip(f, -np.inf, _lim_val_quad) + return f**4 + +def safe_three_times(f): + f = np.clip(f, -np.inf, _lim_val_three_times) + return 3*f + def chain_1(df_dg, dg_dx): """ Generic chaining function for first derivative From 8f34bed6d76f47c79324a22ffb3b4f59aa20508e Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 10 Apr 2015 15:03:07 +0100 Subject: [PATCH 92/99] Fix for model gradients --- GPy/core/model.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 4108e72c..937d30e5 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -256,7 +256,7 @@ class Model(Parameterized): else: optimizer = optimization.get_optimizer(optimizer) opt = optimizer(start, model=self, max_iters=max_iters, **kwargs) - + with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook) as vo: opt.run(f_fp=self._objective_grads, f=self._objective, fp=self._grads) vo.finish(opt) @@ -371,7 +371,12 @@ class Model(Parameterized): f1 = self._objective(xx) xx[xind] -= 2.*step f2 = self._objective(xx) - df_ratio = np.abs((f1 - f2) / min(f1, f2)) + #Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall + #the same + if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15: + df_ratio = np.abs((f1 - f2) / min(f1, f2)) + else: + df_ratio = 1.0 df_unstable = df_ratio < df_tolerance numerical_gradient = (f1 - f2) / (2 * step) if np.all(gradient[xind] == 0): ratio = (f1 - f2) == gradient[xind] From dff9ca8e6b7084fd030872000b4670b45bdc4b62 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 10 Apr 2015 15:24:28 +0100 Subject: [PATCH 93/99] Added hessian and skew gradient checkers, some block functions --- GPy/kern/_src/independent_outputs.py | 12 +- GPy/models/bayesian_gplvm.py | 5 +- GPy/models/gradient_checker.py | 260 +++++++++++++++++++++++++++ GPy/util/block_matrices.py | 64 +++++-- 4 files changed, 323 insertions(+), 18 deletions(-) diff --git a/GPy/kern/_src/independent_outputs.py b/GPy/kern/_src/independent_outputs.py index aa9dca80..6f8b7be1 100644 --- a/GPy/kern/_src/independent_outputs.py +++ b/GPy/kern/_src/independent_outputs.py @@ -8,7 +8,7 @@ import itertools def index_to_slices(index): """ - take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index. + take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index. e.g. >>> index = np.asarray([0,0,0,1,1,1,2,2,2]) @@ -79,10 +79,10 @@ class IndependentOutputs(CombinationKernel): def update_gradients_full(self,dL_dK,X,X2=None): slices = index_to_slices(X[:,self.index_dim]) - if self.single_kern: + if self.single_kern: target = np.zeros(self.kern.size) kerns = itertools.repeat(self.kern) - else: + else: kerns = self.kern target = [np.zeros(kern.size) for kern, _ in zip(kerns, slices)] def collate_grads(kern, i, dL, X, X2): @@ -94,7 +94,7 @@ class IndependentOutputs(CombinationKernel): else: slices2 = index_to_slices(X2[:,self.index_dim]) [[[collate_grads(kern, i, dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for i,(kern,slices_i,slices_j) in enumerate(zip(kerns,slices,slices2))] - if self.single_kern: + if self.single_kern: self.kern.gradient = target else: [kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))] @@ -104,12 +104,14 @@ class IndependentOutputs(CombinationKernel): kerns = itertools.repeat(self.kern) if self.single_kern else self.kern if X2 is None: # TODO: make use of index_to_slices + # FIXME: Broken as X is already sliced out + print "Warning, gradients_X may not be working, I believe X has already been sliced out by the slicer!" values = np.unique(X[:,self.index_dim]) slices = [X[:,self.index_dim]==i for i in values] [target.__setitem__(s, kern.gradients_X(dL_dK[s,s],X[s],None)) for kern, s in zip(kerns, slices)] #slices = index_to_slices(X[:,self.index_dim]) - #[[np.add(target[s], kern.gradients_X(dL_dK[s,s], X[s]), out=target[s]) + #[[np.add(target[s], kern.gradients_X(dL_dK[s,s], X[s]), out=target[s]) # for s in slices_i] for kern, slices_i in zip(kerns, slices)] #import ipdb;ipdb.set_trace() #[[(np.add(target[s ], kern.gradients_X(dL_dK[s ,ss],X[s ], X[ss]), out=target[s ]), diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 7cbd69eb..e0f6c0bc 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -24,7 +24,7 @@ class BayesianGPLVM(SparseGP_MPI): def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10, Z=None, kernel=None, inference_method=None, likelihood=None, name='bayesian gplvm', mpi_comm=None, normalizer=None, - missing_data=False, stochastic=False, batchsize=1): + missing_data=False, stochastic=False, batchsize=1, Y_metadata=None): self.logger = logging.getLogger(self.__class__.__name__) if X is None: @@ -69,6 +69,7 @@ class BayesianGPLVM(SparseGP_MPI): name=name, inference_method=inference_method, normalizer=normalizer, mpi_comm=mpi_comm, variational_prior=self.variational_prior, + Y_metadata=None ) self.link_parameter(self.X, index=0) @@ -83,7 +84,7 @@ class BayesianGPLVM(SparseGP_MPI): def parameters_changed(self): super(BayesianGPLVM,self).parameters_changed() if isinstance(self.inference_method, VarDTC_minibatch): - return + return kl_fctr = 1. self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X) diff --git a/GPy/models/gradient_checker.py b/GPy/models/gradient_checker.py index 74026f8e..c2cde834 100644 --- a/GPy/models/gradient_checker.py +++ b/GPy/models/gradient_checker.py @@ -5,6 +5,8 @@ from ..core.model import Model import itertools import numpy from ..core.parameterization import Param +np = numpy +from ..util.block_matrices import get_blocks, get_block_shapes, unblock, get_blocks_3d, get_block_shapes_3d def get_shape(x): if isinstance(x, numpy.ndarray): @@ -111,3 +113,261 @@ class GradientChecker(Model): #for name, shape in zip(self.names, self.shapes): #_param_names.extend(map(lambda nameshape: ('_'.join(nameshape)).strip('_'), itertools.izip(itertools.repeat(name), itertools.imap(lambda t: '_'.join(map(str, t)), itertools.product(*map(lambda xi: range(xi), shape)))))) #return _param_names + + +class HessianChecker(GradientChecker): + + def __init__(self, f, df, ddf, x0, names=None, *args, **kwargs): + """ + :param f: Function (only used for numerical hessian gradient) + :param df: Gradient of function to check + :param ddf: Analytical gradient function + :param x0: + Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names). + Can be a list of arrays, if takes a list of arrays. This list will be passed + to f and df in the same order as given here. + If only one argument, make sure not to pass a list!!! + + :type x0: [array-like] | array-like | float | int + :param names: + Names to print, when performing gradcheck. If a list was passed to x0 + a list of names with the same length is expected. + :param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs) + + """ + super(HessianChecker, self).__init__(df, ddf, x0, names=names, *args, **kwargs) + self._f = f + self._df = df + self._ddf = ddf + + def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False): + """ + Overwrite checkgrad method to check whole block instead of looping through + + Shows diagnostics using matshow instead + + :param verbose: If True, print a "full" checking of each parameter + :type verbose: bool + :param step: The size of the step around which to linearise the objective + :type step: float (default 1e-6) + :param tolerance: the tolerance allowed (see note) + :type tolerance: float (default 1e-3) + + Note:- + The gradient is considered correct if the ratio of the analytical + and numerical gradients is within of unity. + """ + try: + import numdifftools as nd + except: + raise ImportError("Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests") + + if target_param: + raise NotImplementedError('Only basic functionality is provided with this gradchecker') + + #Repeat for each parameter, not the nicest but shouldn't be many cases where there are many + #variables + current_index = 0 + for name, shape in zip(self.names, self.shapes): + current_size = numpy.prod(shape) + x = self.optimizer_array.copy() + #x = self._get_params_transformed().copy() + x = x[current_index:current_index + current_size].reshape(shape) + + # Check gradients + analytic_hess = self._ddf(x) + if analytic_hess.shape[1] == 1: + analytic_hess = numpy.diagflat(analytic_hess) + + #From the docs: + #x0 : vector location + #at which to differentiate fun + #If x0 is an N x M array, then fun is assumed to be a function + #of N*M variables., thus we must have it flat, not (N,1), but just (N,) + #numeric_hess_partial = nd.Hessian(self._f, vectorized=False) + numeric_hess_partial = nd.Jacobian(self._df, vectorized=False) + #numeric_hess_partial = nd.Derivative(self._df, vectorized=True) + numeric_hess = numeric_hess_partial(x) + + check_passed = self.checkgrad_block(analytic_hess, numeric_hess, verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=plot) + current_index += current_size + return check_passed + + def checkgrad_block(self, analytic_hess, numeric_hess, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False): + """ + Checkgrad a block matrix + """ + if analytic_hess.dtype is np.dtype('object'): + #Make numeric hessian also into a block matrix + real_size = get_block_shapes(analytic_hess) + num_elements = np.sum(real_size) + if (num_elements, num_elements) == numeric_hess.shape: + #If the sizes are the same we assume they are the same + #(we have not fixed any values so the numeric is the whole hessian) + numeric_hess = get_blocks(numeric_hess, real_size) + else: + #Make a fake empty matrix and fill out the correct block + tmp_numeric_hess = get_blocks(np.zeros((num_elements, num_elements)), real_size) + tmp_numeric_hess[block_indices] = numeric_hess.copy() + numeric_hess = tmp_numeric_hess + + if block_indices is not None: + #Extract the right block + analytic_hess = analytic_hess[block_indices] + numeric_hess = numeric_hess[block_indices] + else: + #Unblock them if they are in blocks and you aren't checking a single block (checking whole hessian) + if analytic_hess.dtype is np.dtype('object'): + analytic_hess = unblock(analytic_hess) + numeric_hess = unblock(numeric_hess) + + ratio = numeric_hess / (numpy.where(analytic_hess==0, 1e-10, analytic_hess)) + difference = numpy.abs(analytic_hess - numeric_hess) + + check_passed = numpy.all((numpy.abs(1 - ratio)) < tolerance) or numpy.allclose(numeric_hess, analytic_hess, atol = tolerance) + + if verbose: + if block_indices: + print "\nBlock {}".format(block_indices) + else: + print "\nAll blocks" + + header = ['Checked', 'Max-Ratio', 'Min-Ratio', 'Min-Difference', 'Max-Difference'] + header_string = map(lambda x: ' | '.join(header), [header]) + separator = '-' * len(header_string[0]) + print '\n'.join([header_string[0], separator]) + min_r = '%.6f' % float(numpy.min(ratio)) + max_r = '%.6f' % float(numpy.max(ratio)) + max_d = '%.6f' % float(numpy.max(difference)) + min_d = '%.6f' % float(numpy.min(difference)) + cols = [max_r, min_r, min_d, max_d] + + if check_passed: + checked = "\033[92m True \033[0m" + else: + checked = "\033[91m False \033[0m" + + grad_string = "{} | {} | {} | {} | {} ".format(checked, cols[0], cols[1], cols[2], cols[3]) + print grad_string + + if plot: + import pylab as pb + fig, axes = pb.subplots(2, 2) + max_lim = numpy.max(numpy.vstack((analytic_hess, numeric_hess))) + min_lim = numpy.min(numpy.vstack((analytic_hess, numeric_hess))) + msa = axes[0,0].matshow(analytic_hess, vmin=min_lim, vmax=max_lim) + axes[0,0].set_title('Analytic hessian') + axes[0,0].xaxis.set_ticklabels([None]) + axes[0,0].yaxis.set_ticklabels([None]) + axes[0,0].xaxis.set_ticks([None]) + axes[0,0].yaxis.set_ticks([None]) + msn = axes[0,1].matshow(numeric_hess, vmin=min_lim, vmax=max_lim) + pb.colorbar(msn, ax=axes[0,1]) + axes[0,1].set_title('Numeric hessian') + axes[0,1].xaxis.set_ticklabels([None]) + axes[0,1].yaxis.set_ticklabels([None]) + axes[0,1].xaxis.set_ticks([None]) + axes[0,1].yaxis.set_ticks([None]) + msr = axes[1,0].matshow(ratio) + pb.colorbar(msr, ax=axes[1,0]) + axes[1,0].set_title('Ratio') + axes[1,0].xaxis.set_ticklabels([None]) + axes[1,0].yaxis.set_ticklabels([None]) + axes[1,0].xaxis.set_ticks([None]) + axes[1,0].yaxis.set_ticks([None]) + msd = axes[1,1].matshow(difference) + pb.colorbar(msd, ax=axes[1,1]) + axes[1,1].set_title('difference') + axes[1,1].xaxis.set_ticklabels([None]) + axes[1,1].yaxis.set_ticklabels([None]) + axes[1,1].xaxis.set_ticks([None]) + axes[1,1].yaxis.set_ticks([None]) + if block_indices: + fig.suptitle("Block: {}".format(block_indices)) + pb.show() + + return check_passed + +class SkewChecker(HessianChecker): + + def __init__(self, df, ddf, dddf, x0, names=None, *args, **kwargs): + """ + :param df: gradient of function + :param ddf: Gradient of function to check (hessian) + :param dddf: Analytical gradient function (third derivative) + :param x0: + Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names). + Can be a list of arrays, if takes a list of arrays. This list will be passed + to f and df in the same order as given here. + If only one argument, make sure not to pass a list!!! + + :type x0: [array-like] | array-like | float | int + :param names: + Names to print, when performing gradcheck. If a list was passed to x0 + a list of names with the same length is expected. + :param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs) + + """ + super(SkewChecker, self).__init__(df, ddf, dddf, x0, names=names, *args, **kwargs) + + def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False, super_plot=False): + """ + Gradient checker that just checks each hessian individually + + super_plot will plot the hessian wrt every parameter, plot will just do the first one + """ + try: + import numdifftools as nd + except: + raise ImportError("Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests") + + if target_param: + raise NotImplementedError('Only basic functionality is provided with this gradchecker') + + #Repeat for each parameter, not the nicest but shouldn't be many cases where there are many + #variables + current_index = 0 + for name, n_shape in zip(self.names, self.shapes): + current_size = numpy.prod(n_shape) + x = self.optimizer_array.copy() + #x = self._get_params_transformed().copy() + x = x[current_index:current_index + current_size].reshape(n_shape) + + # Check gradients + #Actually the third derivative + analytic_hess = self._ddf(x) + + #Can only calculate jacobian for one variable at a time + #From the docs: + #x0 : vector location + #at which to differentiate fun + #If x0 is an N x M array, then fun is assumed to be a function + #of N*M variables., thus we must have it flat, not (N,1), but just (N,) + #numeric_hess_partial = nd.Hessian(self._f, vectorized=False) + #Actually _df is already the hessian + numeric_hess_partial = nd.Jacobian(self._df, vectorized=True) + numeric_hess = numeric_hess_partial(x) + + print "Done making numerical hessian" + if analytic_hess.dtype is np.dtype('object'): + #Blockify numeric_hess aswell + blocksizes, pagesizes = get_block_shapes_3d(analytic_hess) + #HACK + real_block_size = np.sum(blocksizes) + numeric_hess = numeric_hess.reshape(real_block_size, real_block_size, pagesizes) + #numeric_hess = get_blocks_3d(numeric_hess, blocksizes)#, pagesizes) + else: + numeric_hess = numeric_hess.reshape(*analytic_hess.shape) + + #Check every block individually (for ease) + check_passed = [False]*numeric_hess.shape[2] + for block_ind in xrange(numeric_hess.shape[2]): + #Unless super_plot is set, just plot the first one + p = True if (plot and block_ind == numeric_hess.shape[2]-1) or super_plot else False + if verbose: + print "Checking derivative of hessian wrt parameter number {}".format(block_ind) + check_passed[block_ind] = self.checkgrad_block(analytic_hess[:,:,block_ind], numeric_hess[:,:,block_ind], verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=p) + + current_index += current_size + return np.all(check_passed) + diff --git a/GPy/util/block_matrices.py b/GPy/util/block_matrices.py index a047abc6..e1e04aaa 100644 --- a/GPy/util/block_matrices.py +++ b/GPy/util/block_matrices.py @@ -1,9 +1,37 @@ -# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Copyright (c) 2014-2015, Alan Saul # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np +def get_blocks_3d(A, blocksizes, pagesizes=None): + """ + Given a 3d matrix, make a block matrix, where the first and second dimensions are blocked according + to blocksizes, and the pages are blocked using pagesizes + """ + assert (A.shape[0]==A.shape[1]) and len(A.shape)==3, "can't blockify this non-square matrix, may need to use 2d version" + N = np.sum(blocksizes) + assert A.shape[0] == N, "bad blocksizes" + num_blocks = len(blocksizes) + if pagesizes == None: + #Assume each page of A should be its own dimension + pagesizes = range(A.shape[2])#[0]*A.shape[2] + num_pages = len(pagesizes) + B = np.empty(shape=(num_blocks, num_blocks, num_pages), dtype=np.object) + count_k = 0 + #for Bk, k in enumerate(pagesizes): + for Bk in pagesizes: + count_i = 0 + for Bi, i in enumerate(blocksizes): + count_j = 0 + for Bj, j in enumerate(blocksizes): + #We want to have it count_k:count_k + k but its annoying as it makes a NxNx1 array is page sizes are set to 1 + B[Bi, Bj, Bk] = A[count_i:count_i + i, count_j:count_j + j, Bk] + count_j += j + count_i += i + #count_k += k + return B + def get_blocks(A, blocksizes): - assert (A.shape[0]==A.shape[1]) and len(A.shape)==2, "can;t blockify this non-square matrix" + assert (A.shape[0]==A.shape[1]) and len(A.shape)==2, "can't blockify this non-square matrix" N = np.sum(blocksizes) assert A.shape[0] == N, "bad blocksizes" num_blocks = len(blocksizes) @@ -17,6 +45,11 @@ def get_blocks(A, blocksizes): count_i += i return B +def get_block_shapes_3d(B): + assert B.dtype is np.dtype('object'), "Must be a block matrix" + #FIXME: This isn't general AT ALL... + return get_block_shapes(B[:,:,0]), B.shape[2] + def get_block_shapes(B): assert B.dtype is np.dtype('object'), "Must be a block matrix" return [B[b,b].shape[0] for b in range(0, B.shape[0])] @@ -35,7 +68,7 @@ def unblock(B): count_i += i return A -def block_dot(A, B): +def block_dot(A, B, diagonal=False): """ Element wise dot product on block matricies @@ -48,21 +81,30 @@ def block_dot(A, B): +-------------+ +------+------+ +-------+-------+ ..Note + If any block of either (A or B) are stored as 1d vectors then we assume + that it denotes a diagonal matrix efficient dot product using numpy + broadcasting will be used, i.e. A11*B11 + If either (A or B) of the diagonal matrices are stored as vectors then a more efficient dot product using numpy broadcasting will be used, i.e. A11*B11 """ #Must have same number of blocks and be a block matrix assert A.dtype is np.dtype('object'), "Must be a block matrix" assert B.dtype is np.dtype('object'), "Must be a block matrix" - Ashape = A.shape - Bshape = B.shape - assert Ashape == Bshape - def f(A,B): - if Ashape[0] == Ashape[1] or Bshape[0] == Bshape[1]: - #FIXME: Careful if one is transpose of other, would make a matrix - return A*B + assert A.shape == B.shape + def f(C,D): + """ + C is an element of A, D is the associated element of B + """ + Cshape = C.shape + Dshape = D.shape + if diagonal and (len(Cshape) == 1 or len(Dshape) == 1\ + or C.shape[0] != C.shape[1] or D.shape[0] != D.shape[1]): + print "Broadcasting, C: {} D:{}".format(C.shape, D.shape) + return C*D else: - return np.dot(A,B) + print "Dotting, C: {} C:{}".format(C.shape, D.shape) + return np.dot(C,D) dot = np.vectorize(f, otypes = [np.object]) return dot(A,B) From f4cf052bce227730625a8a61e251dc57706adea2 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 10 Apr 2015 15:44:15 +0100 Subject: [PATCH 94/99] Added option to plot the transformed link function (posterior once the link function has been applied) --- GPy/core/gp.py | 19 +++--- GPy/plotting/matplot_dep/models_plots.py | 75 ++++++++++++++++++++---- 2 files changed, 77 insertions(+), 17 deletions(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index dc1519e1..75e5d49a 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -6,14 +6,13 @@ import sys from .. import kern from .model import Model from .parameterization import ObsAr -from .model import Model from .mapping import Mapping -from .parameterization import ObsAr from .. import likelihoods from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation from .parameterization.variational import VariationalPosterior import logging +import warnings from GPy.util.normalizer import MeanNorm logger = logging.getLogger("GP") @@ -65,10 +64,14 @@ class GP(Model): self.Y = ObsAr(Y) self.Y_normalized = self.Y - assert Y.shape[0] == self.num_data + if Y.shape[0] != self.num_data: + #There can be cases where we want inputs than outputs, for example if we have multiple latent + #function values + warnings.warn("There are more rows in your input data X, \ + than in your output data Y, be VERY sure this is what you want") _, self.output_dim = self.Y.shape - #TODO: check the type of this is okay? + assert ((Y_metadata is None) or isinstance(Y_metadata, dict)) self.Y_metadata = Y_metadata assert isinstance(kernel, kern.Kern) @@ -326,14 +329,14 @@ class GP(Model): """ fsim = self.posterior_samples_f(X, size, full_cov=full_cov) Ysim = self.likelihood.samples(fsim, Y_metadata) - return Ysim def plot_f(self, plot_limits=None, which_data_rows='all', which_data_ycols='all', fixed_inputs=[], levels=20, samples=0, fignum=None, ax=None, resolution=None, plot_raw=True, - linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx'): + linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx', + apply_link=False): """ Plot the GP's view of the world, where the data is normalized and before applying a likelihood. This is a call to plot with plot_raw=True. @@ -370,6 +373,8 @@ class GP(Model): :type Y_metadata: dict :param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx') :type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib. + :param apply_link: if there is a link function of the likelihood, plot the link(f*) rather than f* + :type apply_link: boolean """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import models_plots @@ -382,7 +387,7 @@ class GP(Model): which_data_ycols, fixed_inputs, levels, samples, fignum, ax, resolution, plot_raw=plot_raw, Y_metadata=Y_metadata, - data_symbol=data_symbol, **kw) + data_symbol=data_symbol, apply_link=apply_link, **kw) def plot(self, plot_limits=None, which_data_rows='all', which_data_ycols='all', fixed_inputs=[], diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 5cdf69fc..0cda12f1 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) try: @@ -16,7 +16,8 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', which_data_ycols='all', fixed_inputs=[], levels=20, samples=0, fignum=None, ax=None, resolution=None, plot_raw=False, - linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None, data_symbol='kx'): + linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None, data_symbol='kx', + apply_link=False, samples_f=0, plot_uncertain_inputs=True): """ Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. @@ -38,7 +39,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', :type resolution: int :param levels: number of levels to plot in a contour plot. :type levels: int - :param samples: the number of a posteriori samples to plot + :param samples: the number of a posteriori samples to plot p(y*|y) :type samples: int :param fignum: figure to plot on. :type fignum: figure number @@ -49,6 +50,10 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', :type linecol: :param fillcol: color of fill :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure + :param apply_link: apply the link function if plotting f (default false) + :type apply_link: boolean + :param samples_f: the number of posteriori f samples to plot p(f*|y) + :type samples_f: int """ #deal with optional arguments if which_data_rows == 'all': @@ -88,8 +93,14 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', #make a prediction on the frame and plot it if plot_raw: m, v = model._raw_predict(Xgrid) - lower = m - 2*np.sqrt(v) - upper = m + 2*np.sqrt(v) + if apply_link: + lower = model.likelihood.gp_link.transf(m - 2*np.sqrt(v)) + upper = model.likelihood.gp_link.transf(m + 2*np.sqrt(v)) + #Once transformed this is now the median of the function + m = model.likelihood.gp_link.transf(m) + else: + lower = m - 2*np.sqrt(v) + upper = m + 2*np.sqrt(v) else: if isinstance(model,GPCoregionalizedRegression) or isinstance(model,SparseGPCoregionalizedRegression): meta = {'output_index': Xgrid[:,-1:].astype(np.int)} @@ -110,13 +121,31 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', plots['posterior_samples'] = ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) #ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs. + if samples_f: #NOTE not tested with fixed_inputs + Fsim = model.posterior_samples_f(Xgrid, samples_f) + for fi in Fsim.T: + plots['posterior_samples_f'] = ax.plot(Xnew, fi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) + #ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs. + #add error bars for uncertain (if input uncertainty is being modelled) - if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs(): - plots['xerrorbar'] = ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(), - xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()), - ecolor='k', fmt=None, elinewidth=.5, alpha=.5) - + if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs() and plot_uncertain_inputs: + if plot_raw: + #add error bars for uncertain (if input uncertainty is being modelled), for plot_f + #Hack to plot error bars on latent function, rather than on the data + vs = model.X.mean.values.copy() + for i,v in fixed_inputs: + vs[:,i] = v + m_X, _ = model._raw_predict(vs) + if apply_link: + m_X = model.likelihood.gp_link.transf(m_X) + plots['xerrorbar'] = ax.errorbar(X[which_data_rows, free_dims].flatten(), m_X[which_data_rows, which_data_ycols].flatten(), + xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()), + ecolor='k', fmt=None, elinewidth=.5, alpha=.5) + else: + plots['xerrorbar'] = ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(), + xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()), + ecolor='k', fmt=None, elinewidth=.5, alpha=.5) #set the limits of the plot to some sensible values ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper)) @@ -186,3 +215,29 @@ def plot_fit_f(model, *args, **kwargs): """ kwargs['plot_raw'] = True plot_fit(model,*args, **kwargs) + +def fixed_inputs(model, non_fixed_inputs, fix_routine='median'): + """ + Convenience function for returning back fixed_inputs where the other inputs + are fixed using fix_routine + :param model: model + :type model: Model + :param non_fixed_inputs: dimensions of non fixed inputs + :type non_fixed_inputs: list + :param fix_routine: fixing routine to use, 'mean', 'median', 'zero' + :type fix_routine: string + """ + f_inputs = [] + if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs(): + X = model.X.mean.values.copy() + else: + X = model.X.values.copy() + for i in range(X.shape[1]): + if i not in non_fixed_inputs: + if fix_routine == 'mean': + f_inputs.append( (i, np.mean(X[:,i])) ) + if fix_routine == 'median': + f_inputs.append( (i, np.median(X[:,i])) ) + elif fix_routine == 'zero': + f_inputs.append( (i, 0) ) + return f_inputs From c76b7bbb9347729e216127f894284fc33a4bc0ae Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Fri, 10 Apr 2015 17:58:51 +0100 Subject: [PATCH 95/99] Added to init --- GPy/models/__init__.py | 2 +- GPy/models/bayesian_gplvm.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index 8f8fd838..0d18eb47 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -11,7 +11,7 @@ from .sparse_gplvm import SparseGPLVM from .warped_gp import WarpedGP from .bayesian_gplvm import BayesianGPLVM from .mrd import MRD -from .gradient_checker import GradientChecker +from .gradient_checker import GradientChecker, HessianChecker, SkewChecker from .ss_gplvm import SSGPLVM from .gp_coregionalized_regression import GPCoregionalizedRegression from .sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index e0f6c0bc..3ac703fe 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -69,7 +69,7 @@ class BayesianGPLVM(SparseGP_MPI): name=name, inference_method=inference_method, normalizer=normalizer, mpi_comm=mpi_comm, variational_prior=self.variational_prior, - Y_metadata=None + Y_metadata=Y_metadata ) self.link_parameter(self.X, index=0) From b6761c21d7c87c9eeabd01410d7e2588612490a3 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Tue, 14 Apr 2015 14:17:08 +0100 Subject: [PATCH 96/99] Added LOO for laplace and exact inference on training data, Gaussian logpdf appeared to be wrong, now fixed --- .../exact_gaussian_inference.py | 14 +++++ .../latent_function_inference/laplace.py | 62 +++++++++++++++++++ GPy/likelihoods/gaussian.py | 5 +- 3 files changed, 77 insertions(+), 4 deletions(-) diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index 2a0a2592..76b10f08 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -64,3 +64,17 @@ class ExactGaussianInference(LatentFunctionInference): dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK),Y_metadata) return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha} + + def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None): + """ + Leave one out error as found in + "Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models" + Vehtari et al. 2014. + """ + g = posterior.woodbury_vector + c = posterior.woodbury_inv + c_diag = np.diag(c)[:, None] + neg_log_marginal_LOO = 0.5*np.log(2*np.pi) - 0.5*np.log(c_diag) + 0.5*(g**2)/c_diag + #believe from Predictive Approaches for Choosing Hyperparameters in Gaussian Processes + #this is the negative marginal LOO + return -neg_log_marginal_LOO diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index c6921f57..19d53505 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -19,6 +19,7 @@ def warning_on_one_line(message, category, filename, lineno, file=None, line=Non warnings.formatwarning = warning_on_one_line from scipy import optimize from . import LatentFunctionInference +from scipy.integrate import quad class Laplace(LatentFunctionInference): @@ -39,6 +40,67 @@ class Laplace(LatentFunctionInference): self.first_run = True self._previous_Ki_fhat = None + def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None): + """ + Leave one out log predictive density as found in + "Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models" + Vehtari et al. 2014. + """ + Ki_f_init = np.zeros_like(Y) + + if K is None: + K = kern.K(X) + + f_hat, _ = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata) + W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata) + logpdf_dfhat = likelihood.dlogpdf_df(f_hat, Y, Y_metadata=Y_metadata) + + K_Wi_i, _, _, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave) + + #Eq 37 + posterior_cav_var = 1./(1./np.diag(Ki_W_i) - 1./np.diag(W))[:, None] + posterior_cav_mean = f_hat - posterior_cav_var*logpdf_dfhat + + flat_y = Y.flatten() + flat_mu = posterior_cav_mean.flatten() + flat_var = posterior_cav_var.flatten() + + if Y_metadata is not None: + #Need to zip individual elements of Y_metadata aswell + Y_metadata_flat = {} + if Y_metadata is not None: + for key, val in Y_metadata.items(): + Y_metadata_flat[key] = np.atleast_1d(val).reshape(-1, 1) + + zipped_values = [] + + for i in range(Y.shape[0]): + y_m = {} + for key, val in Y_metadata_flat.items(): + if np.isscalar(val) or val.shape[0] == 1: + y_m[key] = val + else: + #Won't broadcast yet + y_m[key] = val[i] + zipped_values.append((flat_y[i], flat_mu[i], flat_var[i], y_m)) + else: + #Otherwise just pass along None's + zipped_values = zip(flat_y, flat_mu, flat_var, [None]*Y.shape[0]) + + def integral_generator(yi, mi, vi, yi_m): + def f(fi_star): + #More stable in the log space + return np.exp(likelihood.logpdf(fi_star, yi, yi_m) + - 0.5*np.log(2*np.pi*vi) + - 0.5*np.square(mi-fi_star)/vi) + return f + + #Eq 25 + p_ystar, _ = zip(*[quad(integral_generator(y, m, v, yi_m), -np.inf, np.inf) + for y, m, v, yi_m in zipped_values]) + p_ystar = np.array(p_ystar).reshape(-1, 1) + return np.log(p_ystar) + def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None): """ Returns a Posterior class containing essential quantities of the posterior diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 9ecf7dbf..9abb8cde 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -132,10 +132,8 @@ class Gaussian(Likelihood): :returns: log likelihood evaluated for this point :rtype: float """ - N = y.shape[0] ln_det_cov = np.log(self.variance) - - return -0.5*((y-link_f)**2/self.variance + ln_det_cov + np.log(2.*np.pi)) + return -(1.0/(2*self.variance))*((y-link_f)**2) - 0.5*ln_det_cov - 0.5*np.log(2.*np.pi) def dlogpdf_dlink(self, link_f, y, Y_metadata=None): """ @@ -220,7 +218,6 @@ class Gaussian(Likelihood): """ e = y - link_f s_4 = 1.0/(self.variance**2) - N = y.shape[0] dlik_dsigma = -0.5/self.variance + 0.5*s_4*np.square(e) return dlik_dsigma From ab5f3591035c333079499a19a6d35357a9ce7dd0 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Tue, 14 Apr 2015 16:27:58 +0100 Subject: [PATCH 97/99] Changed LOO implementation for Eq 30 instead of 37 --- .../latent_function_inference/laplace.py | 22 ++++++++++++++----- GPy/likelihoods/likelihood.py | 6 ++--- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index 19d53505..ed21f094 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -57,9 +57,20 @@ class Laplace(LatentFunctionInference): K_Wi_i, _, _, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave) - #Eq 37 - posterior_cav_var = 1./(1./np.diag(Ki_W_i) - 1./np.diag(W))[:, None] - posterior_cav_mean = f_hat - posterior_cav_var*logpdf_dfhat + W = np.diagflat(W) + + #Eq 14, and 16 + var_site = 1./np.diag(W)[:, None] + mu_site = f_hat + var_site*logpdf_dfhat + prec_site = 1./var_site + #Eq 19 + marginal_cov = Ki_W_i + marginal_mu = marginal_cov.dot(np.diagflat(prec_site)).dot(mu_site) + marginal_var = np.diag(marginal_cov)[:, None] + #Eq 30 with using site parameters instead of Gaussian site parameters + #(var_site instead of sigma^{2} ) + posterior_cav_var = 1./(1./marginal_var - 1./var_site) + posterior_cav_mean = posterior_cav_var*((1./marginal_var)*marginal_mu - (1./var_site)*Y) flat_y = Y.flatten() flat_mu = posterior_cav_mean.flatten() @@ -90,12 +101,13 @@ class Laplace(LatentFunctionInference): def integral_generator(yi, mi, vi, yi_m): def f(fi_star): #More stable in the log space - return np.exp(likelihood.logpdf(fi_star, yi, yi_m) + p_fi = np.exp(likelihood.logpdf(fi_star, yi, yi_m) - 0.5*np.log(2*np.pi*vi) - 0.5*np.square(mi-fi_star)/vi) + return p_fi return f - #Eq 25 + #Eq 30 p_ystar, _ = zip(*[quad(integral_generator(y, m, v, yi_m), -np.inf, np.inf) for y, m, v, yi_m in zipped_values]) p_ystar = np.array(p_ystar).reshape(-1, 1) diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index f4b31091..470f5059 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -539,9 +539,9 @@ class Likelihood(Parameterized): #Parameters are stacked vertically. Must be listed in same order as 'get_param_names' # ensure we have gradients for every parameter we want to optimize - assert dlogpdf_dtheta.shape[0] == self.size #f, d x num_param array - assert dlogpdf_df_dtheta.shape[0] == self.size #f x d x num_param matrix or just f x num_param - assert d2logpdf_df2_dtheta.shape[0] == self.size #f x num_param matrix or f x d x num_param matrix, f x f x num_param or f x f x d x num_param + assert dlogpdf_dtheta.shape[0] == self.size #num_param array x f, d + assert dlogpdf_df_dtheta.shape[0] == self.size #num_param x f x d x matrix or just num_param x f + assert d2logpdf_df2_dtheta.shape[0] == self.size #num_param x f matrix or num_param x f x d x matrix, num_param x f x f or num_param x f x f x d return dlogpdf_dtheta, dlogpdf_df_dtheta, d2logpdf_df2_dtheta From fe0a4285ca45ab6c9584147da396c2ca0f3c14d0 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Tue, 14 Apr 2015 17:14:05 +0100 Subject: [PATCH 98/99] Removed jitter printing --- GPy/util/linalg.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index 8ac5418f..26c4b774 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -20,7 +20,7 @@ try: from scipy import weave except ImportError: config.set('weave', 'working', 'False') - + _scipyversion = np.float64((scipy.__version__).split('.')[:2]) _fix_dpotri_scipy_bug = True @@ -102,7 +102,6 @@ def jitchol(A, maxtries=5): num_tries = 1 while num_tries <= maxtries and np.isfinite(jitter): try: - print(jitter) L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True) return L except: @@ -115,7 +114,6 @@ def jitchol(A, maxtries=5): except: logging.warning('\n'.join(['Added jitter of {:.10e}'.format(jitter), ' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]])) - import ipdb;ipdb.set_trace() return L # def dtrtri(L, lower=1): From 361f0a527489f12a3949adc008a650221a455e09 Mon Sep 17 00:00:00 2001 From: Alan Saul Date: Thu, 16 Apr 2015 09:25:18 +0100 Subject: [PATCH 99/99] Fixed log predictive density, added option for LOO to provide some intemediate variables --- .../latent_function_inference/laplace.py | 18 ++++++++++++------ GPy/likelihoods/likelihood.py | 17 ++++++++++------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index ed21f094..aefc82ac 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -40,7 +40,7 @@ class Laplace(LatentFunctionInference): self.first_run = True self._previous_Ki_fhat = None - def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None): + def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None, f_hat=None, W=None, Ki_W_i=None): """ Leave one out log predictive density as found in "Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models" @@ -51,13 +51,19 @@ class Laplace(LatentFunctionInference): if K is None: K = kern.K(X) - f_hat, _ = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata) - W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata) + if f_hat is None: + f_hat, _ = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata) + + if W is None: + W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata) + + if Ki_W_i is None: + _, _, _, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave) + logpdf_dfhat = likelihood.dlogpdf_df(f_hat, Y, Y_metadata=Y_metadata) - K_Wi_i, _, _, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave) - - W = np.diagflat(W) + if W.shape[1] == 1: + W = np.diagflat(W) #Eq 14, and 16 var_site = 1./np.diag(W)[:, None] diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index 470f5059..34798a35 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -114,21 +114,24 @@ class Likelihood(Parameterized): #Otherwise just pass along None's zipped_values = zip(flat_y_test, flat_mu_star, flat_var_star, [None]*y_test.shape[0]) - def integral_generator(y, m, v, y_m): - """Generate a function which can be integrated to give p(Y*|Y) = int p(Y*|f*)p(f*|Y) df*""" - def f(f_star): + def integral_generator(yi, mi, vi, yi_m): + """Generate a function which can be integrated + to give p(Y*|Y) = int p(Y*|f*)p(f*|Y) df*""" + def f(fi_star): #exponent = np.exp(-(1./(2*v))*np.square(m-f_star)) #from GPy.util.misc import safe_exp #exponent = safe_exp(exponent) #return self.pdf(f_star, y, y_m)*exponent #More stable in the log space - return np.exp(self.logpdf(f_star, y, y_m) -(1./(2*v))*np.square(m-f_star)) + return np.exp(self.logpdf(fi_star, yi, yi_m) + - 0.5*np.log(2*np.pi*vi) + - 0.5*np.square(mi-fi_star)/vi) return f - scaled_p_ystar, accuracy = zip(*[quad(integral_generator(y, m, v, y_m), -np.inf, np.inf) for y, m, v, y_m in zipped_values]) - scaled_p_ystar = np.array(scaled_p_ystar).reshape(-1,1) - p_ystar = scaled_p_ystar/np.sqrt(2*np.pi*var_star) + p_ystar, _ = zip(*[quad(integral_generator(yi, mi, vi, yi_m), -np.inf, np.inf) + for yi, mi, vi, yi_m in zipped_values]) + p_ystar = np.array(p_ystar).reshape(-1, 1) return np.log(p_ystar) def _moments_match_ep(self,obs,tau,v):