diff --git a/GPy/__init__.py b/GPy/__init__.py index ceff68a0..99a91fe8 100644 --- a/GPy/__init__.py +++ b/GPy/__init__.py @@ -14,7 +14,6 @@ import examples import likelihoods import testing from numpy.testing import Tester -from nose.tools import nottest import kern import plotting @@ -22,10 +21,16 @@ import plotting from core import Model from core.parameterization import Param, Parameterized, ObsAr -@nottest -def tests(): - Tester(testing).test(verbose=10) - +#@nottest +try: + #Get rid of nose dependency by only ignoring if you have nose installed + from nose.tools import nottest + @nottest + def tests(): + Tester(testing).test(verbose=10) +except: + def tests(): + Tester(testing).test(verbose=10) def load(file_path): """ @@ -36,4 +41,4 @@ def load(file_path): import cPickle as pickle with open(file_path, 'rb') as f: m = pickle.load(f) - return m \ No newline at end of file + return m diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 8e481b02..6923b20d 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -468,7 +468,7 @@ def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True): k = GPy.kern.RBF(1) # create simple GP Model - no input uncertainty on this one - m = GPy.models.SparseGPRegression(X, Y, kernel=GPy.kern.RBF(1), Z=Z) + m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z) if optimize: m.optimize('scg', messages=1, max_iters=max_iters) diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index 01d2f997..2c741b9d 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -14,6 +14,9 @@ import numpy as np from ...util.linalg import mdot, jitchol, dpotrs, dtrtrs, dpotri, symmetrify, pdinv from posterior import Posterior import warnings +def warning_on_one_line(message, category, filename, lineno, file=None, line=None): + return ' %s:%s: %s:%s\n' % (filename, lineno, category.__name__, message) +warnings.formatwarning = warning_on_one_line from scipy import optimize from . import LatentFunctionInference @@ -29,8 +32,11 @@ class Laplace(LatentFunctionInference): """ self._mode_finding_tolerance = 1e-7 - self._mode_finding_max_iter = 40 - self.bad_fhat = True + self._mode_finding_max_iter = 60 + self.bad_fhat = False + #Store whether it is the first run of the inference so that we can choose whether we need + #to calculate things or reuse old variables + self.first_run = True self._previous_Ki_fhat = None def inference(self, kern, X, likelihood, Y, Y_metadata=None): @@ -42,8 +48,9 @@ class Laplace(LatentFunctionInference): K = kern.K(X) #Find mode - if self.bad_fhat: + if self.bad_fhat or self.first_run: Ki_f_init = np.zeros_like(Y) + first_run = False else: Ki_f_init = self._previous_Ki_fhat @@ -123,11 +130,11 @@ class Laplace(LatentFunctionInference): #Warn of bad fits if difference > self._mode_finding_tolerance: if not self.bad_fhat: - warnings.warn("Not perfect f_hat fit difference: {}".format(difference)) + warnings.warn("Not perfect mode found (f_hat). difference: {}, iteration: {} out of max {}".format(difference, iteration, self._mode_finding_max_iter)) self.bad_fhat = True elif self.bad_fhat: self.bad_fhat = False - warnings.warn("f_hat now fine again") + warnings.warn("f_hat now fine again. difference: {}, iteration: {} out of max {}".format(difference, iteration, self._mode_finding_max_iter)) return f, Ki_f diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 1cabecc3..0459132a 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -167,6 +167,7 @@ class VarDTC(LatentFunctionInference): woodbury_vector = Cpsi1Vf # == Cpsi1V else: print 'foobar' + stop psi1V = np.dot(Y.T*beta, psi1).T tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0) tmp, _ = dpotrs(LB, tmp, lower=1) diff --git a/GPy/inference/mcmc/__init__.py b/GPy/inference/mcmc/__init__.py new file mode 100644 index 00000000..956448d4 --- /dev/null +++ b/GPy/inference/mcmc/__init__.py @@ -0,0 +1 @@ +from hmc import HMC diff --git a/GPy/inference/optimization/hmc.py b/GPy/inference/mcmc/hmc.py similarity index 82% rename from GPy/inference/optimization/hmc.py rename to GPy/inference/mcmc/hmc.py index 8c65cdf0..54893769 100644 --- a/GPy/inference/optimization/hmc.py +++ b/GPy/inference/mcmc/hmc.py @@ -1,10 +1,23 @@ -"""HMC implementation""" +# ## Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np class HMC: - def __init__(self,model,M=None,stepsize=1e-1): + """ + An implementation of Hybrid Monte Carlo (HMC) for GPy models + + Initialize an object for HMC sampling. Note that the status of the model (model parameters) will be changed during sampling. + + :param model: the GPy model that will be sampled + :type model: GPy.core.Model + :param M: the mass matrix (an identity matrix by default) + :type M: numpy.ndarray + :param stepsize: the step size for HMC sampling + :type stepsize: float + """ + def __init__(self, model, M=None,stepsize=1e-1): self.model = model self.stepsize = stepsize self.p = np.empty_like(model.optimizer_array.copy()) @@ -14,9 +27,19 @@ class HMC: self.M = M self.Minv = np.linalg.inv(self.M) - def sample(self, m_iters=1000, hmc_iters=20): - params = np.empty((m_iters,self.p.size)) - for i in xrange(m_iters): + def sample(self, num_samples=1000, hmc_iters=20): + """ + Sample the (unfixed) model parameters. + + :param num_samples: the number of samples to draw (1000 by default) + :type num_samples: int + :param hmc_iters: the number of leap-frog iterations (20 by default) + :type hmc_iters: int + :return: the list of parameters samples with the size N x P (N - the number of samples, P - the number of parameters to sample) + :rtype: numpy.ndarray + """ + params = np.empty((num_samples,self.p.size)) + for i in xrange(num_samples): self.p[:] = np.random.multivariate_normal(np.zeros(self.p.size),self.M) H_old = self._computeH() theta_old = self.model.optimizer_array.copy() @@ -125,8 +148,6 @@ class HMC_shortcut: break else: Hlist = range(hmc_iters+pos,hmc_iters+pos+self.groupsize) -# print Hlist -# print self._testH(H_buf[Hlist]) if self._testH(H_buf[Hlist]): pos += -1 @@ -139,14 +160,10 @@ class HMC_shortcut: pos_new = pos + r self.model.optimizer_array = theta_buf[hmc_iters+pos_new] self.p[:] = p_buf[hmc_iters+pos_new] # the sign of momentum might be wrong! -# print reversal[0],pos,pos_new -# print H_buf break def _testH(self, Hlist): Hstd = np.std(Hlist) -# print Hlist -# print Hstd if Hstdself.Hstd_th[1]: return False else: diff --git a/GPy/inference/optimization/samplers.py b/GPy/inference/mcmc/samplers.py similarity index 100% rename from GPy/inference/optimization/samplers.py rename to GPy/inference/mcmc/samplers.py diff --git a/GPy/inference/optimization/BayesOpt.py b/GPy/inference/optimization/BayesOpt.py deleted file mode 100644 index 2e54a23b..00000000 --- a/GPy/inference/optimization/BayesOpt.py +++ /dev/null @@ -1,63 +0,0 @@ -import numpy as np -from scipy.stats import norm -import matplotlib.pyplot as plt - - -####### Preliminar BO with standad acquisition functions ############################### -# Types of BO -# MM: Maximum (or minimum) mean -# MPI: Maximum posterior improvement -# MUI: Maximum upper interval - -def BOacquisition(X,Y,model,type_bo="MPI",type_objective="max",par_mpi = 0,z_mui=1.96,plot=True,n_eval = 500): - - # Only works in dimension 1 - # Grid where the GP will be evaluated - X_star = np.linspace(min(X)-10,max(X)+10,n_eval) - X_star = X_star[:,None] - - # Posterior GP evaluated on the grid - fest = model.predict(X_star) - - # Calculate the acquisition function - ## IF Maximize - if type_objective == "max": - if type_bo == "MPI": # add others here - acqu = norm.cdf((fest[0]-(1+par_mpi)*max(fest[0])) / fest[1]) - acqu = acqu/(2*max(acqu)) - if type_bo == "MM": - acqu = fest[0]/max(fest[0]) - acqu = acqu/(2*max(acqu)) - if type_bo == "MUI": - acqu = fest[0]+z_mui*np.sqrt(fest[1]) - acqu = acqu/(2*max(acqu)) - optimal_loc = np.argmax(acqu) - x_new = X_star[optimal_loc] - - ## IF Minimize - if type_objective == "min": - if type_bo == "MPI": # add others here - acqu = 1-norm.cdf((fest[0]-(1+par_mpi)*min(fest[0])) / fest[1]) - acqu = acqu/(2*max(acqu)) - if type_bo == "MM": - acqu = 1-fest[0]/max(fest[0]) - acqu = acqu/(2*max(acqu)) - if type_bo == "MUI": - acqu = -fest[0]+z_mui*np.sqrt(fest[1]) - acqu = acqu/(2*max(acqu)) - optimal_loc = np.argmax(acqu) - x_new = X_star[optimal_loc] - - # Plot GP posterior, collected data and the acquisition function - if plot: - plt.plot(X,Y , 'p') - plt.title('Acquisition function') - model.plot() - plt.plot(X_star, acqu, 'r--') - - - # Return the point where we shoould take the new sample - return x_new - ############################################################### - - diff --git a/GPy/inference/optimization/__init__.py b/GPy/inference/optimization/__init__.py index 1590568f..1a8f043b 100644 --- a/GPy/inference/optimization/__init__.py +++ b/GPy/inference/optimization/__init__.py @@ -1,3 +1,2 @@ from scg import SCG from optimization import * -from hmc import HMC,HMC_shortcut diff --git a/GPy/testing/pickle_tests.py b/GPy/testing/pickle_tests.py index dfabe54e..b541d21e 100644 --- a/GPy/testing/pickle_tests.py +++ b/GPy/testing/pickle_tests.py @@ -17,10 +17,15 @@ from GPy.kern._src.rbf import RBF from GPy.kern._src.linear import Linear from GPy.kern._src.static import Bias, White from GPy.examples.dimensionality_reduction import mrd_simulation -from GPy.examples.regression import toy_rbf_1d_50 from GPy.core.parameterization.variational import NormalPosterior from GPy.models.gp_regression import GPRegression +def toy_model(): + X = np.linspace(0,1,50)[:, None] + Y = np.sin(X) + m = GPRegression(X=X, Y=Y) + return m + class ListDictTestCase(unittest.TestCase): def assertListDictEquals(self, d1, d2, msg=None): for k,v in d1.iteritems(): @@ -105,7 +110,7 @@ class Test(ListDictTestCase): self.assertSequenceEqual(str(par), str(pcopy)) def test_model(self): - par = toy_rbf_1d_50(optimize=0, plot=0) + par = toy_model() pcopy = par.copy() self.assertListEqual(par.param_array.tolist(), pcopy.param_array.tolist()) np.testing.assert_allclose(par.gradient_full, pcopy.gradient_full) @@ -124,7 +129,7 @@ class Test(ListDictTestCase): self.assert_(pcopy.checkgrad()) def test_modelrecreation(self): - par = toy_rbf_1d_50(optimize=0, plot=0) + par = toy_model() pcopy = GPRegression(par.X.copy(), par.Y.copy(), kernel=par.kern.copy()) np.testing.assert_allclose(par.param_array, pcopy.param_array) np.testing.assert_allclose(par.gradient_full, pcopy.gradient_full) @@ -135,7 +140,7 @@ class Test(ListDictTestCase): self.assert_(np.any(pcopy.gradient!=0.0)) pcopy.optimize('bfgs') par.optimize('bfgs') - np.testing.assert_allclose(pcopy.param_array, par.param_array, atol=.001) + np.testing.assert_allclose(pcopy.param_array, par.param_array, atol=1e-6) with tempfile.TemporaryFile('w+b') as f: par.pickle(f) f.seek(0) @@ -193,7 +198,7 @@ class Test(ListDictTestCase): @unittest.skip def test_add_observer(self): - par = toy_rbf_1d_50(optimize=0, plot=0) + par = toy_model() par.name = "original" par.count = 0 par.add_observer(self, self._callback, 1) @@ -211,4 +216,4 @@ class Test(ListDictTestCase): if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.test_parameter_index_operations'] - unittest.main() \ No newline at end of file + unittest.main() diff --git a/doc/GPy.inference.optimization.rst b/doc/GPy.inference.optimization.rst index 83339202..a81a8e68 100644 --- a/doc/GPy.inference.optimization.rst +++ b/doc/GPy.inference.optimization.rst @@ -4,14 +4,6 @@ GPy.inference.optimization package Submodules ---------- -GPy.inference.optimization.BayesOpt module ------------------------------------------- - -.. automodule:: GPy.inference.optimization.BayesOpt - :members: - :undoc-members: - :show-inheritance: - GPy.inference.optimization.conjugate_gradient_descent module ------------------------------------------------------------ @@ -28,14 +20,6 @@ GPy.inference.optimization.gradient_descent_update_rules module :undoc-members: :show-inheritance: -GPy.inference.optimization.hmc module -------------------------------------- - -.. automodule:: GPy.inference.optimization.hmc - :members: - :undoc-members: - :show-inheritance: - GPy.inference.optimization.optimization module ---------------------------------------------- @@ -44,14 +28,6 @@ GPy.inference.optimization.optimization module :undoc-members: :show-inheritance: -GPy.inference.optimization.samplers module ------------------------------------------- - -.. automodule:: GPy.inference.optimization.samplers - :members: - :undoc-members: - :show-inheritance: - GPy.inference.optimization.scg module ------------------------------------- @@ -68,6 +44,14 @@ GPy.inference.optimization.sgd module :undoc-members: :show-inheritance: +GPy.inference.optimization.stochastics module +--------------------------------------------- + +.. automodule:: GPy.inference.optimization.stochastics + :members: + :undoc-members: + :show-inheritance: + Module contents --------------- diff --git a/doc/GPy.inference.rst b/doc/GPy.inference.rst index 2aa7839f..235f804b 100644 --- a/doc/GPy.inference.rst +++ b/doc/GPy.inference.rst @@ -7,6 +7,7 @@ Subpackages .. toctree:: GPy.inference.latent_function_inference + GPy.inference.mcmc GPy.inference.optimization Module contents diff --git a/doc/GPy.likelihoods.rst b/doc/GPy.likelihoods.rst index 70679454..9d74fead 100644 --- a/doc/GPy.likelihoods.rst +++ b/doc/GPy.likelihoods.rst @@ -60,14 +60,6 @@ GPy.likelihoods.mixed_noise module :undoc-members: :show-inheritance: -GPy.likelihoods.negative_binomial module ----------------------------------------- - -.. automodule:: GPy.likelihoods.negative_binomial - :members: - :undoc-members: - :show-inheritance: - GPy.likelihoods.ordinal module ------------------------------ @@ -84,30 +76,6 @@ GPy.likelihoods.poisson module :undoc-members: :show-inheritance: -GPy.likelihoods.skew_exponential module ---------------------------------------- - -.. automodule:: GPy.likelihoods.skew_exponential - :members: - :undoc-members: - :show-inheritance: - -GPy.likelihoods.skew_normal module ----------------------------------- - -.. automodule:: GPy.likelihoods.skew_normal - :members: - :undoc-members: - :show-inheritance: - -GPy.likelihoods.sstudent_t module ---------------------------------- - -.. automodule:: GPy.likelihoods.sstudent_t - :members: - :undoc-members: - :show-inheritance: - GPy.likelihoods.student_t module -------------------------------- @@ -116,14 +84,6 @@ GPy.likelihoods.student_t module :undoc-members: :show-inheritance: -GPy.likelihoods.symbolic module -------------------------------- - -.. automodule:: GPy.likelihoods.symbolic - :members: - :undoc-members: - :show-inheritance: - Module contents --------------- diff --git a/doc/GPy.mappings.rst b/doc/GPy.mappings.rst index e5d89872..c13642cc 100644 --- a/doc/GPy.mappings.rst +++ b/doc/GPy.mappings.rst @@ -36,14 +36,6 @@ GPy.mappings.mlp module :undoc-members: :show-inheritance: -GPy.mappings.symbolic module ----------------------------- - -.. automodule:: GPy.mappings.symbolic - :members: - :undoc-members: - :show-inheritance: - Module contents --------------- diff --git a/doc/GPy.testing.rst b/doc/GPy.testing.rst index 657d0638..2d1132d7 100644 --- a/doc/GPy.testing.rst +++ b/doc/GPy.testing.rst @@ -84,14 +84,6 @@ GPy.testing.prior_tests module :undoc-members: :show-inheritance: -GPy.testing.tie_tests module ----------------------------- - -.. automodule:: GPy.testing.tie_tests - :members: - :undoc-members: - :show-inheritance: - Module contents --------------- diff --git a/doc/GPy.util.rst b/doc/GPy.util.rst index 14c7643b..e50efdfb 100644 --- a/doc/GPy.util.rst +++ b/doc/GPy.util.rst @@ -204,14 +204,6 @@ GPy.util.subarray_and_sorting module :undoc-members: :show-inheritance: -GPy.util.symbolic module ------------------------- - -.. automodule:: GPy.util.symbolic - :members: - :undoc-members: - :show-inheritance: - GPy.util.univariate_Gaussian module -----------------------------------