From 4e83501ea5aef2469845420aab876cce6de176c9 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Mon, 8 Sep 2014 17:22:37 +0100 Subject: [PATCH 01/17] update sparse_gp_mpi for new interface --- GPy/core/sparse_gp_mpi.py | 9 +++++---- .../latent_function_inference/var_dtc_parallel.py | 4 +++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/GPy/core/sparse_gp_mpi.py b/GPy/core/sparse_gp_mpi.py index 73a37862..cecbe667 100644 --- a/GPy/core/sparse_gp_mpi.py +++ b/GPy/core/sparse_gp_mpi.py @@ -42,10 +42,10 @@ class SparseGP_MPI(SparseGP): assert isinstance(inference_method, VarDTC_minibatch), 'inference_method has to support MPI!' super(SparseGP_MPI, self).__init__(X, Y, Z, kernel, likelihood, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer) - self.updates = False - self.add_parameter(self.X, index=0) + self.update_model(False) + self.link_parameter(self.X, index=0) if variational_prior is not None: - self.add_parameter(variational_prior) + self.link_parameter(variational_prior) # self.X.fix() self.mpi_comm = mpi_comm @@ -58,7 +58,8 @@ class SparseGP_MPI(SparseGP): self.Y_local = self.Y[N_start:N_end] print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range) mpi_comm.Bcast(self.param_array, root=0) - self.updates = True + self.update_model(True) + def __getstate__(self): dc = super(SparseGP_MPI, self).__getstate__() diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index ab4074f4..a7e2a800 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) from posterior import Posterior -from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs +from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri from ...util import diag from ...core.parameterization.variational import VariationalPosterior import numpy as np @@ -172,7 +172,9 @@ class VarDTC_minibatch(LatentFunctionInference): diag.add(Kmm, self.const_jitter) r1 = checkFullRank(Kmm,name='Kmm') Lm = jitchol(Kmm) + LmInv = dtrtri(Lm) + #LmInvPsi2LmInvT = LmInv.dot(psi2_full).dot(LmInv.T) LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right') Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT r2 = checkFullRank(Lambda,name='Lambda') From 2df978dd2c1d40b51a600419347aa93bd3f30cfa Mon Sep 17 00:00:00 2001 From: Ricardo Date: Tue, 9 Sep 2014 10:51:26 +0100 Subject: [PATCH 02/17] name can be modified --- GPy/kern/_src/hierarchical.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/kern/_src/hierarchical.py b/GPy/kern/_src/hierarchical.py index 3ca6b444..ac360ec7 100644 --- a/GPy/kern/_src/hierarchical.py +++ b/GPy/kern/_src/hierarchical.py @@ -10,11 +10,11 @@ class Hierarchical(Kernpart): A kernel part which can reopresent a hierarchy of indepencnce: a generalisation of independent_outputs """ - def __init__(self,parts): + def __init__(self,parts,name='hierarchy'): self.levels = len(parts) self.input_dim = parts[0].input_dim + 1 self.num_params = np.sum([k.num_params for k in parts]) - self.name = 'hierarchy' + self.name = name self.parts = parts self.param_starts = np.hstack((0,np.cumsum([k.num_params for k in self.parts[:-1]]))) From 0f47a6b35feca3bd744601d7a7abec23cfa48432 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Tue, 9 Sep 2014 11:46:19 +0100 Subject: [PATCH 03/17] adapt the numerical stability strategy from VarDTC to VarDTC_minibatch --- .../var_dtc_parallel.py | 37 ++++++++----------- 1 file changed, 15 insertions(+), 22 deletions(-) diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index a7e2a800..c5cf08d1 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) from posterior import Posterior -from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri +from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri,pdinv from ...util import diag from ...core.parameterization.variational import VariationalPosterior import numpy as np @@ -144,6 +144,7 @@ class VarDTC_minibatch(LatentFunctionInference): """ num_data, output_dim = Y.shape + input_dim = Z.shape[0] if self.mpi_comm != None: num_data_all = np.array(num_data,dtype=np.int32) self.mpi_comm.Allreduce([np.int32(num_data), MPI.INT], [num_data_all, MPI.INT]) @@ -167,32 +168,23 @@ class VarDTC_minibatch(LatentFunctionInference): #====================================================================== from ...util.debug import checkFullRank - + Kmm = kern.K(Z).copy() diag.add(Kmm, self.const_jitter) r1 = checkFullRank(Kmm,name='Kmm') - Lm = jitchol(Kmm) - LmInv = dtrtri(Lm) + KmmInv,Lm,LmInv,_ = pdinv(Kmm) - #LmInvPsi2LmInvT = LmInv.dot(psi2_full).dot(LmInv.T) - LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right') + LmInvPsi2LmInvT = LmInv.dot(psi2_full).dot(LmInv.T) Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT r2 = checkFullRank(Lambda,name='Lambda') - if (not r1) or (not r2): - raise - LL = jitchol(Lambda) - LL = np.dot(Lm,LL) - b,_ = dtrtrs(LL, psi1Y_full.T) +# if (not r1) or (not r2): +# raise + LInv,LL,LLInv,logdet_L = pdinv(Lambda) + b = LLInv.dot(LmInv.dot(psi1Y_full.T)) bbt = np.square(b).sum() - v,_ = dtrtrs(LL.T,b,lower=False) - vvt = np.einsum('md,od->mo',v,v) + v = LmInv.T.dot(LLInv.T.dot(b)) - Psi2LLInvT = dtrtrs(LL,psi2_full)[0].T - LmInvPsi2LLInvT= dtrtrs(Lm,Psi2LLInvT)[0] - KmmInvPsi2LLInvT = dtrtrs(Lm,LmInvPsi2LLInvT,trans=True)[0] - KmmInvPsi2P = dtrtrs(LL,KmmInvPsi2LLInvT.T, trans=True)[0].T - - dL_dpsi2R = (output_dim*KmmInvPsi2P - vvt)/2. # dL_dpsi2 with R inside psi2 + dL_dpsi2R = LmInv.T.dot(-LLInv.T.dot(tdot(b)+output_dim*np.eye(input_dim)).dot(LLInv)+output_dim*np.eye(input_dim)).dot(LmInv)/2. # Cache intermediate results self.midRes['dL_dpsi2R'] = dL_dpsi2R @@ -205,20 +197,21 @@ class VarDTC_minibatch(LatentFunctionInference): logL_R = -np.log(beta).sum() else: logL_R = -num_data*np.log(beta) - logL = -(output_dim*(num_data*log_2_pi+logL_R+psi0_full-np.trace(LmInvPsi2LmInvT))+YRY_full-bbt)/2.-output_dim*(-np.log(np.diag(Lm)).sum()+np.log(np.diag(LL)).sum()) + logL = -(output_dim*(num_data*log_2_pi+logL_R+psi0_full-np.trace(LmInvPsi2LmInvT))+YRY_full-bbt)/2.-output_dim*logdet_L/2. #====================================================================== # Compute dL_dKmm #====================================================================== - dL_dKmm = -(output_dim*np.einsum('md,od->mo',KmmInvPsi2LLInvT,KmmInvPsi2LLInvT) + vvt)/2. +# dL_dKmm = -(output_dim*np.einsum('md,od->mo',KmmInvPsi2LLInvT,KmmInvPsi2LLInvT) + vvt)/2. + dL_dKmm = dL_dpsi2R - KmmInv.dot(psi2_full).dot(KmmInv)/2. #====================================================================== # Compute the Posterior distribution of inducing points p(u|Y) #====================================================================== if not self.Y_speedup or het_noise: - post = Posterior(woodbury_inv=KmmInvPsi2P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm) + post = Posterior(woodbury_inv=LmInv.T.dot(np.eye(input_dim)-LInv).dot(LmInv), woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm) else: post = None From 5697a533e7f1912ca180a71aeade309304a9a101 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Tue, 9 Sep 2014 12:17:29 +0100 Subject: [PATCH 04/17] a bug fix for VarDTC_minibatch --- GPy/inference/latent_function_inference/var_dtc_parallel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index c5cf08d1..53b31dab 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -204,7 +204,7 @@ class VarDTC_minibatch(LatentFunctionInference): #====================================================================== # dL_dKmm = -(output_dim*np.einsum('md,od->mo',KmmInvPsi2LLInvT,KmmInvPsi2LLInvT) + vvt)/2. - dL_dKmm = dL_dpsi2R - KmmInv.dot(psi2_full).dot(KmmInv)/2. + dL_dKmm = dL_dpsi2R - output_dim*KmmInv.dot(psi2_full).dot(KmmInv)/2. #====================================================================== # Compute the Posterior distribution of inducing points p(u|Y) From 47b12c20a30f22f58fe92af3b0e267b4ce810d2c Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Thu, 11 Sep 2014 13:21:35 +0100 Subject: [PATCH 05/17] remove nose from install_requires --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index d9a6ab5e..847088ec 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ setup(name = 'GPy', package_data = {'GPy': ['defaults.cfg', 'installation.cfg', 'util/data_resources.json', 'util/football_teams.json']}, py_modules = ['GPy.__init__'], long_description=read('README.md'), - install_requires=['numpy>=1.6', 'scipy>=0.9','matplotlib>=1.1', 'nose'], + install_requires=['numpy>=1.6', 'scipy>=0.9','matplotlib>=1.1'], extras_require = { 'docs':['Sphinx', 'ipython'], }, From 97d7fa69551ccc9eb0c8814adaee89b8ad8f01c0 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Thu, 11 Sep 2014 14:29:50 +0100 Subject: [PATCH 06/17] add the Windows installation instructions for GPy --- doc/GPy.testing.rst | 8 ++++++++ doc/index.rst | 3 +++ doc/installation.rst | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+) create mode 100644 doc/installation.rst diff --git a/doc/GPy.testing.rst b/doc/GPy.testing.rst index 2d1132d7..657d0638 100644 --- a/doc/GPy.testing.rst +++ b/doc/GPy.testing.rst @@ -84,6 +84,14 @@ GPy.testing.prior_tests module :undoc-members: :show-inheritance: +GPy.testing.tie_tests module +---------------------------- + +.. automodule:: GPy.testing.tie_tests + :members: + :undoc-members: + :show-inheritance: + Module contents --------------- diff --git a/doc/index.rst b/doc/index.rst index 87d80be3..c00f31d3 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -19,6 +19,9 @@ You may also be interested by some examples in the GPy/examples folder. Contents: .. toctree:: + :maxdepth: 2 + + installation GPy diff --git a/doc/installation.rst b/doc/installation.rst new file mode 100644 index 00000000..8059e89a --- /dev/null +++ b/doc/installation.rst @@ -0,0 +1,34 @@ +============== + Installation +============== + + +Linux +============ + + +Windows +====================== +One easy way to get a Python distribution with the required packages is to use the Anaconda environment from Continuum Analytics. + +* Download and install the free version of Anaconda according to your operating system from `their website `_. +* Open a (new) terminal window: + + * Navigate to Applications/Accessories/cmd, or + * open *anaconda Command Prompt* from windows *start* + +You should now be able to launch a Python interpreter by typing *ipython* in the terminal. In the ipython prompt, you can check your installation by importing the libraries we will need later: +:: + $ import numpy + $ import pylab + +To install the latest version of GPy, *git* is required. A *git* client on Windows can be found `here `_. It is recommened to install with the option "*Use Git from the Windows Command Prompt*". Then, GPy can be installed with the following command +:: + pip install git+https://github.com/SheffieldML/GPy.git@devel + +Note that some of the functionalities in GPy require a *C/C++* compiler. One option would be to install a MSVC compiler, e.g., an Express Edition can be found `here `_. + + +MacOSX +=================================== + From badacfb5851cf39b26995c89bb50bd419a479c3d Mon Sep 17 00:00:00 2001 From: Ricardo Date: Thu, 11 Sep 2014 16:18:13 +0100 Subject: [PATCH 07/17] bug fixed in normalization --- GPy/core/gp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 7b010e6c..42dab1b8 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -51,7 +51,7 @@ class GP(Model): assert Y.ndim == 2 logger.info("initializing Y") - if normalizer is None: + if normalizer is True: self.normalizer = MeanNorm() elif normalizer is False: self.normalizer = None From e11e294791f9e37e818955c47a78167cda075013 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Thu, 11 Sep 2014 16:44:01 +0100 Subject: [PATCH 08/17] changes installtion instructions --- doc/installation.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/installation.rst b/doc/installation.rst index 8059e89a..35352272 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -26,9 +26,6 @@ To install the latest version of GPy, *git* is required. A *git* client on Windo :: pip install git+https://github.com/SheffieldML/GPy.git@devel -Note that some of the functionalities in GPy require a *C/C++* compiler. One option would be to install a MSVC compiler, e.g., an Express Edition can be found `here `_. - - MacOSX =================================== From 31f11eda407eac53d97a98deaf9f29c3182826c1 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Thu, 11 Sep 2014 17:29:47 +0100 Subject: [PATCH 09/17] bug fix: param object randomize --- GPy/core/parameterization/parameter_core.py | 6 +++--- GPy/testing/parameterized_tests.py | 14 ++++++++------ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index cae999d9..6256f6b2 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -14,6 +14,7 @@ Observable Pattern for patameterization """ from transformations import Transformation,Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED +from ...util.misc import param_to_array import numpy as np import re import logging @@ -740,7 +741,6 @@ class OptimizationHandlable(Indexable): self.param_array.flat[f] = p [np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]])) for c, ind in self.constraints.iteritems() if c != __fixed__] - self._highest_parent_.tie.propagate_val() self._optimizer_copy_transformed = False self._trigger_params_changed() @@ -829,11 +829,11 @@ class OptimizationHandlable(Indexable): self.update_model(False) # Switch off the updates self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...) # now draw from prior where possible - x = self.param_array.copy() + x = param_to_array(self.param_array).flat.copy() [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None] unfixlist = np.ones((self.size,),dtype=np.bool) unfixlist[self.constraints[__fixed__]] = False - self.param_array[unfixlist] = x[unfixlist] + self.param_array.flat[unfixlist] = x[unfixlist] self.update_model(True) #=========================================================================== diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index a51d9e09..9c212806 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -143,8 +143,9 @@ class ParameterizedTest(unittest.TestCase): def test_randomize(self): ps = self.test1.param.view(np.ndarray).copy() + self.test1.param[2:5].fix() self.test1.param.randomize() - self.assertFalse(np.all(ps==self.test1.param)) + self.assertFalse(np.all(ps==self.test1.param),str(ps)+str(self.test1.param)) def test_fixing_randomize_parameter_handling(self): self.rbf.fix(warning=True) @@ -152,11 +153,12 @@ class ParameterizedTest(unittest.TestCase): self.test1.kern.randomize() self.assertEqual(val, self.rbf.variance) - def test_updates(self): - self.test1.update_model(False) - val = float(self.rbf.variance) - self.test1.kern.randomize() - self.assertEqual(val, self.rbf.variance) +# def test_updates(self): +# # WHAT DO YOU WANT TO TEST HERE? +# self.test1.update_model(False) +# val = float(self.rbf.variance) +# self.test1.kern.randomize() +# self.assertEqual(val, self.rbf.variance,str(self.test1)) def test_fixing_optimize(self): self.testmodel.kern.lengthscale.fix() From d7eee6aa005fb9a6bedf36f22d6163ac73181bb6 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Fri, 12 Sep 2014 10:36:01 +0100 Subject: [PATCH 10/17] finish the debug of sparsegp_mpi --- GPy/core/sparse_gp_mpi.py | 15 +++++++++------ .../latent_function_inference/var_dtc_parallel.py | 15 +-------------- 2 files changed, 10 insertions(+), 20 deletions(-) diff --git a/GPy/core/sparse_gp_mpi.py b/GPy/core/sparse_gp_mpi.py index cecbe667..e7faf7a8 100644 --- a/GPy/core/sparse_gp_mpi.py +++ b/GPy/core/sparse_gp_mpi.py @@ -3,6 +3,7 @@ import numpy as np from sparse_gp import SparseGP +from numpy.linalg.linalg import LinAlgError from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch import logging @@ -83,11 +84,7 @@ class SparseGP_MPI(SparseGP): if self.mpi_comm != None: if self._IN_OPTIMIZATION_ and self.mpi_comm.rank==0: self.mpi_comm.Bcast(np.int32(1),root=0) - self.mpi_comm.Bcast(p, root=0) - - from ..util.debug import checkFinite - checkFinite(p, 'optimizer_array') - + self.mpi_comm.Bcast(p, root=0) SparseGP.optimizer_array.fset(self,p) def optimize(self, optimizer=None, start=None, **kwargs): @@ -103,7 +100,13 @@ class SparseGP_MPI(SparseGP): while True: self.mpi_comm.Bcast(flag,root=0) if flag==1: - self.optimizer_array = x + try: + self.optimizer_array = x + self._fail_count = 0 + except (LinAlgError, ZeroDivisionError, ValueError): + if self._fail_count >= self._allowed_failures: + raise + self._fail_count += 1 elif flag==-1: break else: diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index 53b31dab..b9ecbb5c 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -167,18 +167,12 @@ class VarDTC_minibatch(LatentFunctionInference): # Compute Common Components #====================================================================== - from ...util.debug import checkFullRank - Kmm = kern.K(Z).copy() diag.add(Kmm, self.const_jitter) - r1 = checkFullRank(Kmm,name='Kmm') KmmInv,Lm,LmInv,_ = pdinv(Kmm) LmInvPsi2LmInvT = LmInv.dot(psi2_full).dot(LmInv.T) Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT - r2 = checkFullRank(Lambda,name='Lambda') -# if (not r1) or (not r2): -# raise LInv,LL,LLInv,logdet_L = pdinv(Lambda) b = LLInv.dot(LmInv.dot(psi1Y_full.T)) bbt = np.square(b).sum() @@ -203,7 +197,6 @@ class VarDTC_minibatch(LatentFunctionInference): # Compute dL_dKmm #====================================================================== -# dL_dKmm = -(output_dim*np.einsum('md,od->mo',KmmInvPsi2LLInvT,KmmInvPsi2LLInvT) + vvt)/2. dL_dKmm = dL_dpsi2R - output_dim*KmmInv.dot(psi2_full).dot(KmmInv)/2. #====================================================================== @@ -336,13 +329,7 @@ def update_gradients(model, mpi_comm=None): Y = model.Y_local X = model.X[model.N_range[0]:model.N_range[1]] - try: - model._log_marginal_likelihood, dL_dKmm, model.posterior = model.inference_method.inference_likelihood(model.kern, X, model.Z, model.likelihood, Y) - except Exception: - if model.mpi_comm is None or model.mpi_comm.rank==0: - import time - model.pickle('model_'+str(int(time.time()))+'.pickle') - raise + model._log_marginal_likelihood, dL_dKmm, model.posterior = model.inference_method.inference_likelihood(model.kern, X, model.Z, model.likelihood, Y) het_noise = model.likelihood.variance.size > 1 From 049b58c729c38dc9704268df1b577fad0deeb75c Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Fri, 12 Sep 2014 11:51:51 +0100 Subject: [PATCH 11/17] Remove the dependency on matplotlib --- GPy/examples/classification.py | 6 +++++- GPy/examples/coreg_example.py | 5 ++++- GPy/examples/non_gaussian.py | 5 ++++- GPy/examples/regression.py | 5 ++++- GPy/examples/stochastic.py | 5 ++++- GPy/examples/tutorials.py | 7 +++++-- GPy/kern/_src/rbf.py | 2 -- GPy/kern/_src/trunclinear.py | 4 ---- GPy/models/bcgplvm.py | 2 -- GPy/models/gplvm.py | 4 ++-- GPy/models/sparse_gplvm.py | 7 +------ GPy/plotting/__init__.py | 5 ++++- GPy/plotting/matplot_dep/base_plots.py | 7 +++++-- GPy/plotting/matplot_dep/dim_reduction_plots.py | 10 +++++++--- GPy/plotting/matplot_dep/inference_plots.py | 6 ++++-- GPy/plotting/matplot_dep/mapping_plots.py | 7 +++++-- GPy/plotting/matplot_dep/maps.py | 15 ++++++++------- GPy/plotting/matplot_dep/models_plots.py | 7 +++++-- GPy/plotting/matplot_dep/priors_plots.py | 5 ++++- GPy/plotting/matplot_dep/ssgplvm.py | 1 - GPy/util/datasets.py | 2 +- GPy/util/pca.py | 11 ++++++++--- 22 files changed, 80 insertions(+), 48 deletions(-) diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py index ae9d8eb8..2dc5ad53 100644 --- a/GPy/examples/classification.py +++ b/GPy/examples/classification.py @@ -5,9 +5,13 @@ """ Gaussian Processes classification """ -import pylab as pb import GPy +try: + import pylab as pb +except: + pass + default_seed = 10000 def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True): diff --git a/GPy/examples/coreg_example.py b/GPy/examples/coreg_example.py index 66ba143d..6ec635eb 100644 --- a/GPy/examples/coreg_example.py +++ b/GPy/examples/coreg_example.py @@ -1,5 +1,8 @@ import numpy as np -import pylab as pb +try: + import pylab as pb +except: + pass import GPy pb.ion() pb.close('all') diff --git a/GPy/examples/non_gaussian.py b/GPy/examples/non_gaussian.py index c0fcd693..1e2be93b 100644 --- a/GPy/examples/non_gaussian.py +++ b/GPy/examples/non_gaussian.py @@ -1,7 +1,10 @@ import GPy import numpy as np -import matplotlib.pyplot as plt from GPy.util import datasets +try: + import matplotlib.pyplot as plt +except: + pass def student_t_approx(optimize=True, plot=True): """ diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index c4465061..83bb0453 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -4,7 +4,10 @@ """ Gaussian Processes regression examples """ -import pylab as pb +try: + import pylab as pb +except: + pass import numpy as np import GPy diff --git a/GPy/examples/stochastic.py b/GPy/examples/stochastic.py index c302ec7d..cc365cae 100644 --- a/GPy/examples/stochastic.py +++ b/GPy/examples/stochastic.py @@ -1,7 +1,10 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import pylab as pb +try: + import pylab as pb +except: + pass import numpy as np import GPy diff --git a/GPy/examples/tutorials.py b/GPy/examples/tutorials.py index 7825992d..aa82d9f9 100644 --- a/GPy/examples/tutorials.py +++ b/GPy/examples/tutorials.py @@ -6,8 +6,11 @@ Code of Tutorials """ -import pylab as pb -pb.ion() +try: + import pylab as pb + pb.ion() +except: + pass import numpy as np import GPy diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index 3711738a..62539e6d 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -20,8 +20,6 @@ class RBF(Stationary): _support_GPU = True def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False): super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU) - self.weave_options = {} - self.group_spike_prob = False self.psicomp = PSICOMP_RBF() if self.useGPU: self.psicomp = PSICOMP_RBF_GPU() diff --git a/GPy/kern/_src/trunclinear.py b/GPy/kern/_src/trunclinear.py index 76ed31f7..4ebd51b6 100644 --- a/GPy/kern/_src/trunclinear.py +++ b/GPy/kern/_src/trunclinear.py @@ -3,14 +3,10 @@ import numpy as np -from scipy import weave from kern import Kern -from ...util.linalg import tdot -from ...util.misc import param_to_array from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp from ...util.caching import Cache_this -from ...core.parameterization import variational from ...util.config import * class TruncLinear(Kern): diff --git a/GPy/models/bcgplvm.py b/GPy/models/bcgplvm.py index f21a01f4..c54ffdf6 100644 --- a/GPy/models/bcgplvm.py +++ b/GPy/models/bcgplvm.py @@ -3,8 +3,6 @@ import numpy as np -import pylab as pb -import sys, pdb from ..core import GP from ..models import GPLVM from ..mappings import * diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index 79128270..4e45ac4a 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -3,7 +3,6 @@ import numpy as np -import pylab as pb from .. import kern from ..core import GP, Param from ..likelihoods import Gaussian @@ -55,7 +54,7 @@ class GPLVM(GP): #J = np.zeros((X.shape[0],X.shape[1],self.output_dim)) J = self.jacobian(X) for i in range(X.shape[0]): - target[i]=np.sqrt(pb.det(np.dot(J[i,:,:],np.transpose(J[i,:,:])))) + target[i]=np.sqrt(np.linalg.det(np.dot(J[i,:,:],np.transpose(J[i,:,:])))) return target def plot(self): @@ -63,6 +62,7 @@ class GPLVM(GP): pb.scatter(self.likelihood.Y[:, 0], self.likelihood.Y[:, 1], 40, self.X[:, 0].copy(), linewidth=0, cmap=pb.cm.jet) # @UndefinedVariable Xnew = np.linspace(self.X.min(), self.X.max(), 200)[:, None] mu, _ = self.predict(Xnew) + import pylab as pb pb.plot(mu[:, 0], mu[:, 1], 'k', linewidth=1.5) def plot_latent(self, labels=None, which_indices=None, diff --git a/GPy/models/sparse_gplvm.py b/GPy/models/sparse_gplvm.py index 4642e158..251103f4 100644 --- a/GPy/models/sparse_gplvm.py +++ b/GPy/models/sparse_gplvm.py @@ -3,13 +3,8 @@ import numpy as np -import pylab as pb -import sys, pdb +import sys from GPy.models.sparse_gp_regression import SparseGPRegression -from GPy.models.gplvm import GPLVM -# from .. import kern -# from ..core import model -# from ..util.linalg import pdinv, PCA class SparseGPLVM(SparseGPRegression): """ diff --git a/GPy/plotting/__init__.py b/GPy/plotting/__init__.py index 7a39ca9a..d3a96914 100644 --- a/GPy/plotting/__init__.py +++ b/GPy/plotting/__init__.py @@ -1,4 +1,7 @@ # Copyright (c) 2014, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import matplot_dep +try: + import matplot_dep +except (ImportError, NameError): + print 'Fail to load GPy.plotting.matplot_dep.' \ No newline at end of file diff --git a/GPy/plotting/matplot_dep/base_plots.py b/GPy/plotting/matplot_dep/base_plots.py index db9ab8e4..b4142342 100644 --- a/GPy/plotting/matplot_dep/base_plots.py +++ b/GPy/plotting/matplot_dep/base_plots.py @@ -2,8 +2,11 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) -import Tango -import pylab as pb +try: + import Tango + import pylab as pb +except: + pass import numpy as np def ax_default(fignum, ax): diff --git a/GPy/plotting/matplot_dep/dim_reduction_plots.py b/GPy/plotting/matplot_dep/dim_reduction_plots.py index 1d5fdd61..20e8e962 100644 --- a/GPy/plotting/matplot_dep/dim_reduction_plots.py +++ b/GPy/plotting/matplot_dep/dim_reduction_plots.py @@ -1,12 +1,16 @@ -import pylab as pb + import numpy as np from latent_space_visualizations.controllers.imshow_controller import ImshowController,ImAnnotateController from ...util.misc import param_to_array from ...core.parameterization.variational import VariationalPosterior from .base_plots import x_frame2D import itertools -import Tango -from matplotlib.cm import get_cmap +try: + import Tango + from matplotlib.cm import get_cmap + import pylab as pb +except: + pass def most_significant_input_dimensions(model, which_indices): """ diff --git a/GPy/plotting/matplot_dep/inference_plots.py b/GPy/plotting/matplot_dep/inference_plots.py index 6a3a8a93..c802932c 100644 --- a/GPy/plotting/matplot_dep/inference_plots.py +++ b/GPy/plotting/matplot_dep/inference_plots.py @@ -1,8 +1,10 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import pylab as pb -import sys +try: + import pylab as pb +except: + pass #import numpy as np #import Tango #from base_plots import gpplot, x_frame1D, x_frame2D diff --git a/GPy/plotting/matplot_dep/mapping_plots.py b/GPy/plotting/matplot_dep/mapping_plots.py index 3e3ea793..6156687d 100644 --- a/GPy/plotting/matplot_dep/mapping_plots.py +++ b/GPy/plotting/matplot_dep/mapping_plots.py @@ -1,9 +1,12 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import pylab as pb import numpy as np -import Tango +try: + import Tango + import pylab as pb +except: + pass from base_plots import x_frame1D, x_frame2D diff --git a/GPy/plotting/matplot_dep/maps.py b/GPy/plotting/matplot_dep/maps.py index e941ab2d..dbedaa98 100644 --- a/GPy/plotting/matplot_dep/maps.py +++ b/GPy/plotting/matplot_dep/maps.py @@ -1,13 +1,14 @@ import numpy as np -import pylab as pb -import matplotlib.patches as patches -from matplotlib.patches import Polygon -from matplotlib.collections import PatchCollection -#from matplotlib import cm +try: + import pylab as pb + from matplotlib.patches import Polygon + from matplotlib.collections import PatchCollection + #from matplotlib import cm + pb.ion() +except: + pass import re -pb.ion() - def plot(shape_records,facecolor='w',edgecolor='k',linewidths=.5, ax=None,xlims=None,ylims=None): """ Plot the geometry of a shapefile diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 46a79ad8..509c9485 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -1,9 +1,12 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import pylab as pb +try: + import Tango + import pylab as pb +except: + pass import numpy as np -import Tango from base_plots import gpplot, x_frame1D, x_frame2D from ...util.misc import param_to_array from ...models.gp_coregionalized_regression import GPCoregionalizedRegression diff --git a/GPy/plotting/matplot_dep/priors_plots.py b/GPy/plotting/matplot_dep/priors_plots.py index af999740..8f02a03b 100644 --- a/GPy/plotting/matplot_dep/priors_plots.py +++ b/GPy/plotting/matplot_dep/priors_plots.py @@ -3,7 +3,10 @@ import numpy as np -import pylab as pb +try: + import pylab as pb +except: + pass def univariate_plot(prior): diff --git a/GPy/plotting/matplot_dep/ssgplvm.py b/GPy/plotting/matplot_dep/ssgplvm.py index 4106e251..ef45a759 100644 --- a/GPy/plotting/matplot_dep/ssgplvm.py +++ b/GPy/plotting/matplot_dep/ssgplvm.py @@ -6,7 +6,6 @@ import pylab from ...models import SSGPLVM from img_plots import plot_2D_images -from ...util.misc import param_to_array class SSGPLVM_plot(object): def __init__(self,model, imgsize): diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py index 17b26f31..93a5dceb 100644 --- a/GPy/util/datasets.py +++ b/GPy/util/datasets.py @@ -2,7 +2,6 @@ import csv import os import copy import numpy as np -import pylab as pb import GPy import scipy.io import cPickle as pickle @@ -346,6 +345,7 @@ def football_data(season='1314', data_set='football_data'): data_resources[data_set_season]['files'] = [files] if not data_available(data_set_season): download_data(data_set_season) + import pylab as pb for file in reversed(files): filename = os.path.join(data_path, data_set_season, file) # rewrite files removing blank rows. diff --git a/GPy/util/pca.py b/GPy/util/pca.py index 967d0e1b..046f47d7 100644 --- a/GPy/util/pca.py +++ b/GPy/util/pca.py @@ -5,8 +5,11 @@ Created on 10 Sep 2012 @copyright: Max Zwiessele 2012 ''' import numpy -import pylab -import matplotlib +try: + import pylab + import matplotlib +except: + pass from numpy.linalg.linalg import LinAlgError class pca(object): @@ -88,13 +91,15 @@ class pca(object): def plot_2d(self, X, labels=None, s=20, marker='o', dimensions=(0, 1), ax=None, colors=None, - fignum=None, cmap=matplotlib.cm.jet, # @UndefinedVariable + fignum=None, cmap=None, # @UndefinedVariable ** kwargs): """ Plot dimensions `dimensions` with given labels against each other in PC space. Labels can be any sequence of labels of dimensions X.shape[0]. Labels can be drawn with a subsequent call to legend() """ + if cmap is None: + cmap = matplotlib.cm.jet if ax is None: fig = pylab.figure(fignum) ax = fig.add_subplot(111) From 33fcd06ccc9c4f387338aa37e916f97aa067aaa3 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Fri, 12 Sep 2014 11:53:59 +0100 Subject: [PATCH 12/17] change setup.py accordingly. --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 847088ec..5e313732 100644 --- a/setup.py +++ b/setup.py @@ -24,9 +24,9 @@ setup(name = 'GPy', package_data = {'GPy': ['defaults.cfg', 'installation.cfg', 'util/data_resources.json', 'util/football_teams.json']}, py_modules = ['GPy.__init__'], long_description=read('README.md'), - install_requires=['numpy>=1.6', 'scipy>=0.9','matplotlib>=1.1'], + install_requires=['numpy>=1.6', 'scipy>=0.9'], extras_require = { - 'docs':['Sphinx', 'ipython'], + 'docs':['matplotlib>=1.1','Sphinx','ipython'], }, classifiers=[ "License :: OSI Approved :: BSD License"], From ed754823be2f6f6b4ab4a3e82230158ec9c08810 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Mon, 15 Sep 2014 09:55:02 +0100 Subject: [PATCH 13/17] NonContiguos tests fixed for Kdiag_dX --- GPy/inference/latent_function_inference/dtc.py | 1 + GPy/testing/kernel_tests.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/GPy/inference/latent_function_inference/dtc.py b/GPy/inference/latent_function_inference/dtc.py index 1b6b1dbd..aa398166 100644 --- a/GPy/inference/latent_function_inference/dtc.py +++ b/GPy/inference/latent_function_inference/dtc.py @@ -124,6 +124,7 @@ class vDTC(object): v, _ = dtrtrs(L, tmp, lower=1, trans=1) tmp, _ = dtrtrs(LA, Li, lower=1, trans=0) P = tdot(tmp.T) + stop #compute log marginal log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \ diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index 83e1085c..95ad7961 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -215,7 +215,10 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb if verbose: print("Checking gradients of Kdiag(X) wrt X.") try: - result = Kern_check_dKdiag_dX(kern, X=X).checkgrad(verbose=verbose) + testmodel = Kern_check_dKdiag_dX(kern, X=X) + if fixed_X_dims is not None: + testmodel.X[:,fixed_X_dims].fix() + result = testmodel.checkgrad(verbose=verbose) except NotImplementedError: result=True if verbose: @@ -346,6 +349,7 @@ class KernelTestsNonContinuous(unittest.TestCase): kern = GPy.kern.IndependentOutputs(k, -1, name='ind_split') self.assertTrue(check_kernel_gradient_functions(kern, X=self.X, X2=self.X2, verbose=verbose, fixed_X_dims=-1)) + def test_ODE_UY(self): kern = GPy.kern.ODE_UY(2, active_dims=[0, self.D]) X = self.X[self.X[:,-1]!=2] From ff6361728612802652638f8fc1a419443dab12be Mon Sep 17 00:00:00 2001 From: James Hensman Date: Tue, 16 Sep 2014 13:20:38 +0100 Subject: [PATCH 14/17] for loop speedup in grdients X --- GPy/kern/_src/stationary.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 04427c2c..1694cf48 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -171,7 +171,8 @@ class Stationary(Kern): #the lower memory way with a loop ret = np.empty(X.shape, dtype=np.float64) - [np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=ret[:,q]) for q in xrange(self.input_dim)] + for q in xrange(self.input_dim): + np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=ret[:,q]) ret /= self.lengthscale**2 return ret From 803c345d443b20346b6b75c803093953993b71b7 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Wed, 17 Sep 2014 11:12:38 +0100 Subject: [PATCH 15/17] docstring for ExpQuad (thanks Mike O. ) --- GPy/kern/_src/stationary.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 1694cf48..cc5634e9 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -310,6 +310,19 @@ class Matern52(Stationary): class ExpQuad(Stationary): + """ + The Exponentiated quadratic covariance function. + + .. math:: + + k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r) + + notes:: + - Yes, this is exactly the same as the RBF covariance function, but the + RBF implementation also has some features for doing variational kernels + (the psi-statistics). + + """ def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='ExpQuad'): super(ExpQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) From 31478d4d593185c09dcf0f4218eecc3fef9dd418 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Wed, 17 Sep 2014 11:22:31 +0100 Subject: [PATCH 16/17] improved docsting for optimize --- GPy/core/model.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/GPy/core/model.py b/GPy/core/model.py index 8c556da2..dc0a9f5e 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -213,6 +213,7 @@ class Model(Parameterized): def optimize(self, optimizer=None, start=None, **kwargs): """ Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. + kwargs are passed to the optimizer. They can be: :param max_f_eval: maximum number of function evaluations @@ -222,7 +223,15 @@ class Model(Parameterized): :param optimizer: which optimizer to use (defaults to self.preferred optimizer) :type optimizer: string - TODO: valid args + Valid optimizers are: + - 'scg': scaled conjugate gradient method, recommended for stability. + See also GPy.inference.optimization.scg + - 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc) + - 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin), + - 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b), + - 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only! + + """ if self.is_fixed: raise RuntimeError, "Cannot optimize, when everything is fixed" From 48fb60489160de6fb0e84f6559b85b07dd16e274 Mon Sep 17 00:00:00 2001 From: James Hensman Date: Wed, 17 Sep 2014 12:30:56 +0100 Subject: [PATCH 17/17] some improvements to plotting 2d kernels --- GPy/plotting/matplot_dep/kernel_plots.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/GPy/plotting/matplot_dep/kernel_plots.py b/GPy/plotting/matplot_dep/kernel_plots.py index f2082db0..c0bd1599 100644 --- a/GPy/plotting/matplot_dep/kernel_plots.py +++ b/GPy/plotting/matplot_dep/kernel_plots.py @@ -100,9 +100,7 @@ def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False, filtering=Non return ax -def plot(kernel, x=None, plot_limits=None, which_parts='all', resolution=None, *args, **kwargs): - if which_parts == 'all': - which_parts = [True] * kernel.size +def plot(kernel, x=None, plot_limits=None, resolution=None, *args, **kwargs): if kernel.input_dim == 1: if x is None: x = np.zeros((1, 1)) @@ -133,7 +131,7 @@ def plot(kernel, x=None, plot_limits=None, which_parts='all', resolution=None, * assert x.size == 2, "The size of the fixed variable x is not 2" x = x.reshape((1, 2)) - if plot_limits == None: + if plot_limits is None: xmin, xmax = (x - 5).flatten(), (x + 5).flatten() elif len(plot_limits) == 2: xmin, xmax = plot_limits @@ -142,12 +140,10 @@ def plot(kernel, x=None, plot_limits=None, which_parts='all', resolution=None, * resolution = resolution or 51 xx, yy = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:xmax[1]:1j * resolution] - xg = np.linspace(xmin[0], xmax[0], resolution) - yg = np.linspace(xmin[1], xmax[1], resolution) Xnew = np.vstack((xx.flatten(), yy.flatten())).T - Kx = kernel.K(Xnew, x, which_parts) + Kx = kernel.K(Xnew, x) Kx = Kx.reshape(resolution, resolution).T - pb.contour(xg, yg, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) # @UndefinedVariable + pb.contour(xx, xx, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) # @UndefinedVariable pb.xlim(xmin[0], xmax[0]) pb.ylim(xmin[1], xmax[1]) pb.xlabel("x1")