diff --git a/GPy/core/parameterization/priors.py b/GPy/core/parameterization/priors.py index 239b2a26..cb981354 100644 --- a/GPy/core/parameterization/priors.py +++ b/GPy/core/parameterization/priors.py @@ -366,6 +366,7 @@ class InverseGamma(Gamma): def rvs(self, n): return 1. / np.random.gamma(scale=1. / self.b, shape=self.a, size=n) + class DGPLVM_KFDA(Prior): """ Implementation of the Discriminative Gaussian Process Latent Variable function using @@ -512,6 +513,7 @@ class DGPLVM_KFDA(Prior): self.A = self.compute_A(lst_ni) self.x_shape = x_shape + class DGPLVM(Prior): """ Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel. @@ -903,7 +905,7 @@ class DGPLVM_Lamda(Prior, Parameterized): # Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1)) #Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1) #Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.5))[0] - Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.9)[0] + Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.9)[0] return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw)) # This function calculates derivative of the log of prior function @@ -927,7 +929,7 @@ class DGPLVM_Lamda(Prior, Parameterized): # Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1)) #Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1) #Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.5))[0] - Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.9)[0] + Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.9)[0] Sb_inv_N_trans = np.transpose(Sb_inv_N) Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans Sw_trans = np.transpose(Sw) @@ -1198,6 +1200,7 @@ class DGPLVM_T(Prior): + class HalfT(Prior): """ Implementation of the half student t probability function, coupled with random variables. @@ -1208,15 +1211,17 @@ class HalfT(Prior): """ domain = _POSITIVE _instances = [] - def __new__(cls, A, nu): # Singleton: + + def __new__(cls, A, nu): # Singleton: if cls._instances: cls._instances[:] = [instance for instance in cls._instances if instance()] for instance in cls._instances: if instance().A == A and instance().nu == nu: - return instance() + return instance() o = super(Prior, cls).__new__(cls, A, nu) cls._instances.append(weakref.ref(o)) return cls._instances[-1]() + def __init__(self, A, nu): self.A = float(A) self.nu = float(nu) @@ -1225,37 +1230,81 @@ class HalfT(Prior): def __str__(self): return "hT({:.2g}, {:.2g})".format(self.A, self.nu) - def lnpdf(self,theta): - return (theta>0) * ( self.constant -.5*(self.nu+1) * np.log( 1.+ (1./self.nu) * (theta/self.A)**2 ) ) + def lnpdf(self, theta): + return (theta > 0) * (self.constant - .5*(self.nu + 1) * np.log(1. + (1./self.nu) * (theta/self.A)**2)) - #theta = theta if isinstance(theta,np.ndarray) else np.array([theta]) - #lnpdfs = np.zeros_like(theta) - #theta = np.array([theta]) - #above_zero = theta.flatten()>1e-6 - #v = self.nu - #sigma2=self.A - #stop - #lnpdfs[above_zero] = (+ gammaln((v + 1) * 0.5) - # - gammaln(v * 0.5) - # - 0.5*np.log(sigma2 * v * np.pi) - # - 0.5*(v + 1)*np.log(1 + (1/np.float(v))*((theta[above_zero][0]**2)/sigma2)) - #) - #return lnpdfs + # theta = theta if isinstance(theta,np.ndarray) else np.array([theta]) + # lnpdfs = np.zeros_like(theta) + # theta = np.array([theta]) + # above_zero = theta.flatten()>1e-6 + # v = self.nu + # sigma2=self.A + # stop + # lnpdfs[above_zero] = (+ gammaln((v + 1) * 0.5) + # - gammaln(v * 0.5) + # - 0.5*np.log(sigma2 * v * np.pi) + # - 0.5*(v + 1)*np.log(1 + (1/np.float(v))*((theta[above_zero][0]**2)/sigma2)) + # ) + # return lnpdfs - def lnpdf_grad(self,theta): - theta = theta if isinstance(theta,np.ndarray) else np.array([theta]) + def lnpdf_grad(self, theta): + theta = theta if isinstance(theta, np.ndarray) else np.array([theta]) grad = np.zeros_like(theta) - above_zero = theta>1e-6 + above_zero = theta > 1e-6 v = self.nu - sigma2=self.A + sigma2 = self.A grad[above_zero] = -0.5*(v+1)*(2*theta[above_zero])/(v*sigma2 + theta[above_zero][0]**2) return grad def rvs(self, n): - #return np.random.randn(n) * self.sigma + self.mu - from scipy.stats import t - #[np.abs(x) for x in t.rvs(df=4,loc=0,scale=50, size=10000)]) - ret = t.rvs(self.nu,loc=0,scale=self.A, size=n) - ret[ret<0] = 0 - return ret + # return np.random.randn(n) * self.sigma + self.mu + from scipy.stats import t + # [np.abs(x) for x in t.rvs(df=4,loc=0,scale=50, size=10000)]) + ret = t.rvs(self.nu, loc=0, scale=self.A, size=n) + ret[ret < 0] = 0 + return ret + +class Exponential(Prior): + """ + Implementation of the Exponential probability function, + coupled with random variables. + + :param l: shape parameter + + """ + domain = _POSITIVE + _instances = [] + + def __new__(cls, l): # Singleton: + if cls._instances: + cls._instances[:] = [instance for instance in cls._instances if instance()] + for instance in cls._instances: + if instance().l == l: + return instance() + o = super(Exponential, cls).__new__(cls, l) + cls._instances.append(weakref.ref(o)) + return cls._instances[-1]() + + def __init__(self, l): + self.l = l + + def __str__(self): + return "Exp({:.2g})".format(self.l) + + def summary(self): + ret = {"E[x]": 1. / self.l, + "E[ln x]": np.nan, + "var[x]": 1. / self.l**2, + "Entropy": 1. - np.log(self.l), + "Mode": 0.} + return ret + + def lnpdf(self, x): + return np.log(self.l) - self.l * x + + def lnpdf_grad(self, x): + return - self.l + + def rvs(self, n): + return np.random.exponential(scale=self.l, size=n) diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py index 6d6633cb..830809d6 100644 --- a/GPy/core/parameterization/transformations.py +++ b/GPy/core/parameterization/transformations.py @@ -62,7 +62,7 @@ class Transformation(object): import matplotlib.pyplot as plt from ...plotting.matplot_dep import base_plots x = np.linspace(-8,8) - base_plots.meanplot(x, self.f(x),axes=axes*args,**kw) + base_plots.meanplot(x, self.f(x), *args, ax=axes, **kw) axes = plt.gca() axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) @@ -488,7 +488,7 @@ class Logistic(Transformation): return instance() newfunc = super(Transformation, cls).__new__ if newfunc is object.__new__: - o = newfunc(cls) + o = newfunc(cls) else: o = newfunc(cls, lower, upper, *args, **kwargs) cls._instances.append(weakref.ref(o)) diff --git a/GPy/inference/mcmc/__init__.py b/GPy/inference/mcmc/__init__.py index 8f185457..b30b6ff0 100644 --- a/GPy/inference/mcmc/__init__.py +++ b/GPy/inference/mcmc/__init__.py @@ -1 +1,2 @@ -from .hmc import HMC +from hmc import HMC +from samplers import * diff --git a/GPy/inference/mcmc/samplers.py b/GPy/inference/mcmc/samplers.py index 2fd88d2f..7ca6a4c6 100644 --- a/GPy/inference/mcmc/samplers.py +++ b/GPy/inference/mcmc/samplers.py @@ -18,11 +18,11 @@ class Metropolis_Hastings: def __init__(self,model,cov=None): """Metropolis Hastings, with tunings according to Gelman et al. """ self.model = model - current = self.model._get_params_transformed() + current = self.model.optimizer_array self.D = current.size self.chains = [] if cov is None: - self.cov = model.Laplace_covariance() + self.cov = np.eye(self.D) else: self.cov = cov self.scale = 2.4/np.sqrt(self.D) @@ -33,20 +33,20 @@ class Metropolis_Hastings: if start is None: self.model.randomize() else: - self.model._set_params_transformed(start) + self.model.optimizer_array = start - - - def sample(self, Ntotal, Nburn, Nthin, tune=True, tune_throughout=False, tune_interval=400): - current = self.model._get_params_transformed() - fcurrent = self.model.log_likelihood() + self.model.log_prior() + def sample(self, Ntotal=10000, Nburn=1000, Nthin=10, tune=True, tune_throughout=False, tune_interval=400): + current = self.model.optimizer_array + fcurrent = self.model.log_likelihood() + self.model.log_prior() + \ + self.model._log_det_jacobian() accepted = np.zeros(Ntotal,dtype=np.bool) for it in range(Ntotal): - print("sample %d of %d\r"%(it,Ntotal), end=' ') + print "sample %d of %d\r"%(it,Ntotal), sys.stdout.flush() prop = np.random.multivariate_normal(current, self.cov*self.scale*self.scale) - self.model._set_params_transformed(prop) - fprop = self.model.log_likelihood() + self.model.log_prior() + self.model.optimizer_array = prop + fprop = self.model.log_likelihood() + self.model.log_prior() + \ + self.model._log_det_jacobian() if fprop>fcurrent:#sample accepted, going 'uphill' accepted[it] = True @@ -74,10 +74,11 @@ class Metropolis_Hastings: def predict(self,function,args): """Make a prediction for the function, to which we will pass the additional arguments""" - param = self.model._get_params() + param = self.model.param_array fs = [] for p in self.chain: - self.model._set_params(p) + self.model.param_array = p fs.append(function(*args)) - self.model._set_params(param)# reset model to starting state + # reset model to starting state + self.model.param_array = param return fs diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 924694e9..1cc0c0ba 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -256,8 +256,6 @@ class Kern(Parameterized): :param other: the other kernel to be added :type other: GPy.kern - :param tensor: whether or not to use the tensor space (default is false). - :type tensor: bool """ assert isinstance(other, Kern), "only kernels can be multiplied to kernels..." diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index ff7cf140..b47e663d 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -27,8 +27,6 @@ class Prod(CombinationKernel): :param k1, k2: the kernels to multiply :type k1, k2: Kern - :param tensor: The kernels are either multiply as functions defined on the same input space (default) or on the product of the input spaces - :type tensor: Boolean :rtype: kernel object """ diff --git a/GPy/testing/linalg_test.py b/GPy/testing/linalg_test.py index ec3aca5a..78f6de66 100644 --- a/GPy/testing/linalg_test.py +++ b/GPy/testing/linalg_test.py @@ -1,7 +1,6 @@ import numpy as np import scipy as sp -from GPy.util.linalg import jitchol -import GPy +from ..util.linalg import jitchol,trace_dot class LinalgTests(np.testing.TestCase): def setUp(self): @@ -37,12 +36,13 @@ class LinalgTests(np.testing.TestCase): except sp.linalg.LinAlgError: return True - def test_einsum_ijk_jlk_to_il(self): - A = np.random.randn(50, 150, 5) - B = np.random.randn(150, 100, 5) - pure = np.einsum('ijk,jlk->il', A, B) - quick = GPy.util.linalg.ijk_jlk_to_il(A, B) - np.testing.assert_allclose(pure, quick) + def test_trace_dot(self): + N = 5 + A = np.random.rand(N,N) + B = np.random.rand(N,N) + trace = np.trace(A.dot(B)) + test_trace = trace_dot(A,B) + np.testing.assert_allclose(trace,test_trace,atol=1e-13) def test_einsum_ij_jlk_to_ilk(self): A = np.random.randn(15, 150, 5) diff --git a/GPy/testing/rv_transformation_tests.py b/GPy/testing/rv_transformation_tests.py new file mode 100644 index 00000000..e6b3e3e7 --- /dev/null +++ b/GPy/testing/rv_transformation_tests.py @@ -0,0 +1,101 @@ +# Written by Ilias Bilionis +""" +Test if hyperparameters in models are properly transformed. +""" + + +import unittest +import numpy as np +import scipy.stats as st +import GPy + + +class TestModel(GPy.core.Model): + """ + A simple GPy model with one parameter. + """ + def __init__(self): + GPy.core.Model.__init__(self, 'test_model') + theta = GPy.core.Param('theta', 1.) + self.link_parameter(theta) + + def log_likelihood(self): + return 0. + + +class RVTransformationTestCase(unittest.TestCase): + + def _test_trans(self, trans): + m = TestModel() + prior = GPy.priors.LogGaussian(.5, 0.1) + m.theta.set_prior(prior) + m.theta.unconstrain() + m.theta.constrain(trans) + # The PDF of the transformed variables + p_phi = lambda(phi): np.exp(-m._objective_grads(phi)[0]) + # To the empirical PDF of: + theta_s = prior.rvs(100000) + phi_s = trans.finv(theta_s) + # which is essentially a kernel density estimation + kde = st.gaussian_kde(phi_s) + # We will compare the PDF here: + phi = np.linspace(phi_s.min(), phi_s.max(), 100) + # The transformed PDF of phi should be this: + pdf_phi = np.array([p_phi(p) for p in phi]) + # UNCOMMENT TO SEE GRAPHICAL COMPARISON + #import matplotlib.pyplot as plt + #fig, ax = plt.subplots() + #ax.hist(phi_s, normed=True, bins=100, alpha=0.25, label='Histogram') + #ax.plot(phi, kde(phi), '--', linewidth=2, label='Kernel Density Estimation') + #ax.plot(phi, pdf_phi, ':', linewidth=2, label='Transformed PDF') + #ax.set_xlabel(r'transformed $\theta$', fontsize=16) + #ax.set_ylabel('PDF', fontsize=16) + #plt.legend(loc='best') + #plt.show(block=True) + # END OF PLOT + # The following test cannot be very accurate + self.assertTrue(np.linalg.norm(pdf_phi - kde(phi)) / np.linalg.norm(kde(phi)) <= 1e-1) + # Check the gradients at a few random points + for i in xrange(10): + m.theta = theta_s[i] + self.assertTrue(m.checkgrad(verbose=True)) + + def test_Logexp(self): + self._test_trans(GPy.constraints.Logexp()) + self._test_trans(GPy.constraints.Exponent()) + + +if __name__ == '__main__': + unittest.main() + quit() + m = TestModel() + prior = GPy.priors.LogGaussian(0., .9) + m.theta.set_prior(prior) + + # The following should return the PDF in terms of the transformed quantities + p_phi = lambda(phi): np.exp(-m._objective_grads(phi)[0]) + + # Let's look at the transformation phi = log(exp(theta - 1)) + trans = GPy.constraints.Exponent() + m.theta.constrain(trans) + # Plot the transformed probability density + phi = np.linspace(-8, 8, 100) + fig, ax = plt.subplots() + # Let's draw some samples of theta and transform them so that we see + # which one is right + theta_s = prior.rvs(10000) + # Transform it to the new variables + phi_s = trans.finv(theta_s) + # And draw their histogram + ax.hist(phi_s, normed=True, bins=100, alpha=0.25, label='Empirical') + # This is to be compared to the PDF of the model expressed in terms of these new + # variables + ax.plot(phi, [p_phi(p) for p in phi], label='Transformed PDF', linewidth=2) + ax.set_xlim(-3, 10) + ax.set_xlabel(r'transformed $\theta$', fontsize=16) + ax.set_ylabel('PDF', fontsize=16) + plt.legend(loc='best') + # Now let's test the gradients + m.checkgrad(verbose=True) + # And show the plot + plt.show(block=True) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index c2f481f0..b4ffd1b0 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -157,7 +157,7 @@ def trace_dot(a, b): """ Efficiently compute the trace of the matrix product of a and b """ - return np.sum(a * b) + return np.einsum('ij,ji->', a, b) def mdot(*args): """ diff --git a/README.md b/README.md index e74f895c..c9a6c708 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # GPy + A Gaussian processes framework in Python. * [GPy homepage](http://sheffieldml.github.io/GPy/) @@ -10,6 +11,19 @@ A Gaussian processes framework in Python. Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GPy.png) +### Citation + + @Misc{gpy2014, + author = {The GPy authors}, + title = {{GPy}: A Gaussian process framework in python}, + howpublished = {\url{http://github.com/SheffieldML/GPy}}, + year = {2012--2014} + } + +### Pronounciation + +We like to pronounce it 'Gee-pie'. + ### Getting started: installing with pip We are now requiring the newest version of ![scipy](http://www.scipy.org/) and thus, we strongly recommend using @@ -83,8 +97,12 @@ clone this git repository and add it to your path: ### OSX -Everything appears to work out-of-the box using ![enthought](http://www.enthought.com) on osx Mavericks. Download/clone GPy, and then add GPy to your PYTHONPATH +Everything appears to work out-of-the box using +![anaconda python distribution](http://continuum.io/downloads) +on osx Mavericks. +Download/clone GPy, and then add GPy to your PYTHONPATH + conda update scipy git clone git@github.com:SheffieldML/GPy.git ~/SheffieldML echo 'PYTHONPATH=$PYTHONPATH:~/SheffieldML' >> ~/.profile @@ -137,7 +155,9 @@ Run nosetests from the root directory of the repository: or from within IPython import GPy; GPy.tests() - + + + ## Funding Acknowledgements diff --git a/doc/log.txt b/doc/log.txt deleted file mode 100644 index d4f829cb..00000000 --- a/doc/log.txt +++ /dev/null @@ -1,222 +0,0 @@ -/home/maxz/Documents/gpy/GPy/__init__.py:docstring of GPy.load:1: WARNING: Inline interpreted text or phrase reference start-string without end-string. -/home/maxz/Documents/gpy/GPy/core/gp.py:docstring of GPy.core.gp.GP.optimize:8: ERROR: Unknown interpreted text role "module". -/home/maxz/Documents/gpy/GPy/core/gp.py:docstring of GPy.core.gp.GP.predict_wishard_embedding:6: WARNING: Field list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/core/gp.py:docstring of GPy.core.gp.GP.predictive_gradients:5: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/core/gp.py:docstring of GPy.core.gp.GP.predictive_gradients:10: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/core/model.py:docstring of GPy.core.model.Model.optimize_restarts:29: WARNING: Explicit markup ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/doc/GPy.core.rst:57: WARNING: autodoc: failed to import module u'GPy.core.svigp'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named svigp -/home/maxz/Documents/gpy/doc/GPy.core.rst:65: WARNING: autodoc: failed to import module u'GPy.core.symbolic'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) - File "/home/maxz/Documents/gpy/GPy/core/symbolic.py", line 10, in - from sympy.utilities.lambdify import lambdastr, _imp_namespace, _get_namespace -ImportError: No module named lambdify -/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Indexable.unset_priors:1: WARNING: Inline emphasis start-string without end-string. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Nameable.hierarchy_name:4: WARNING: Field list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Parameterizable.traverse:1: WARNING: Inline emphasis start-string without end-string. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Parameterizable.traverse:1: WARNING: Inline strong start-string without end-string. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameterized.py:docstring of GPy.core.parameterization.parameterized.Parameterized:18: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameterized.py:docstring of GPy.core.parameterization.parameterized.Parameterized:20: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/core/parameterization/ties_and_remappings.py:docstring of GPy.core.parameterization.ties_and_remappings.Tie:18: SEVERE: Unexpected section title or transition. - -================================ -/home/maxz/Documents/gpy/doc/GPy.examples.rst:50: WARNING: autodoc: failed to import module u'GPy.examples.stochastic'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named stochastic -/home/maxz/Documents/gpy/doc/GPy.examples.rst:58: WARNING: autodoc: failed to import module u'GPy.examples.tutorials'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named tutorials -/home/maxz/Documents/gpy/doc/GPy.inference.latent_function_inference.rst:82: WARNING: autodoc: failed to import module u'GPy.inference.latent_function_inference.var_dtc_gpu'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named var_dtc_gpu -/home/maxz/Documents/gpy/doc/GPy.inference.optimization.rst:42: WARNING: autodoc: failed to import module u'GPy.inference.optimization.sgd'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named sgd -/home/maxz/Documents/gpy/GPy/kern/_src/coregionalize.py:docstring of GPy.kern._src.coregionalize.Coregionalize:5: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/doc/GPy.kern._src.rst:73: WARNING: autodoc: failed to import module u'GPy.kern._src.hierarchical'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named hierarchical -/home/maxz/Documents/gpy/GPy/kern/_src/independent_outputs.py:docstring of GPy.kern._src.independent_outputs.IndependentOutputs:9: WARNING: Field list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:24: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:22: WARNING: Inline literal start-string without end-string. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:25: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:27: WARNING: Definition list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:27: WARNING: Inline literal start-string without end-string. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:27: WARNING: Inline interpreted text or phrase reference start-string without end-string. -/home/maxz/Documents/gpy/doc/GPy.kern._src.rst:177: WARNING: autodoc: failed to import module u'GPy.kern._src.symbolic'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) - File "/home/maxz/Documents/gpy/GPy/kern/_src/symbolic.py", line 5, in - from ...core.symbolic import Symbolic_core - File "/home/maxz/Documents/gpy/GPy/core/symbolic.py", line 10, in - from sympy.utilities.lambdify import lambdastr, _imp_namespace, _get_namespace -ImportError: No module named lambdify -/home/maxz/Documents/gpy/GPy/models/gp_kronecker_gaussian_regression.py:docstring of GPy.models.gp_kronecker_gaussian_regression.GPKroneckerGaussianRegression:13: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/models/gp_kronecker_gaussian_regression.py:docstring of GPy.models.gp_kronecker_gaussian_regression.GPKroneckerGaussianRegression:18: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/doc/GPy.models.rst:66: WARNING: autodoc: failed to import module u'GPy.models.gp_multioutput_regression'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named gp_multioutput_regression -/home/maxz/Documents/gpy/GPy/models/gp_var_gauss.py:docstring of GPy.models.gp_var_gauss.GPVariationalGaussianApproximation:9: WARNING: Definition list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:32: WARNING: Field list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:32: WARNING: Inline interpreted text or phrase reference start-string without end-string. -/home/maxz/Documents/gpy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:34: WARNING: Definition list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/doc/GPy.models.rst:138: WARNING: autodoc: failed to import module u'GPy.models.sparse_gp_multioutput_regression'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named sparse_gp_multioutput_regression -/home/maxz/Documents/gpy/doc/GPy.models.rst:178: WARNING: autodoc: failed to import module u'GPy.models.svigp_regression'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named svigp_regression -/home/maxz/Documents/gpy/GPy/plotting/matplot_dep/netpbmfile.py:docstring of GPy.plotting.matplot_dep.netpbmfile.imread:6: SEVERE: Unexpected section title. - -Examples --------- -/home/maxz/Documents/gpy/GPy/plotting/matplot_dep/netpbmfile.py:docstring of GPy.plotting.matplot_dep.netpbmfile.imsave:4: SEVERE: Unexpected section title. - -Examples --------- -/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_checkgrad:6: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_checkgrad:7: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_partial:7: WARNING: Definition list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_partial:9: ERROR: Unexpected indentation. -docstring of GPy.util.datasets.hapmap3:7: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/doc/GPy.util.rst:74: WARNING: autodoc: failed to import module u'GPy.util.erfcx'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named erfcx -/home/maxz/Documents/gpy/doc/GPy.util.rst:146: WARNING: autodoc: failed to import module u'GPy.util.mpi'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named mpi -/home/maxz/Documents/gpy/GPy/util/netpbmfile.py:docstring of GPy.util.netpbmfile.imread:6: SEVERE: Unexpected section title. - -Examples --------- -/home/maxz/Documents/gpy/GPy/util/netpbmfile.py:docstring of GPy.util.netpbmfile.imsave:4: SEVERE: Unexpected section title. - -Examples --------- -/home/maxz/Documents/gpy/doc/GPy.util.rst:2: SEVERE: Duplicate ID: "module-GPy.util.subarray_and_sorting". -/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:8: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:11: SEVERE: Unexpected section title. - -Examples: -========= -/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:19: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:21: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/util/block_matrices.py:docstring of GPy.util.block_matrices.block_dot:3: ERROR: Undefined substitution referenced: "A11.B11|B12.B12". -/home/maxz/Documents/gpy/GPy/util/block_matrices.py:docstring of GPy.util.block_matrices.block_dot:3: ERROR: Undefined substitution referenced: "A21.B21|A22.B22". -/home/maxz/Documents/gpy/doc/installation.rst:22: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/doc/installation.rst:27: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/doc/tuto_creating_new_kernels.rst:58: WARNING: Inline literal start-string without end-string. -/home/maxz/Documents/gpy/doc/tuto_creating_new_models.rst:24: ERROR: Unknown target name: "parameterized". -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:83: WARNING: Title underline too short. - -Interacting with Parameters: -======================= -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:83: WARNING: Title underline too short. - -Interacting with Parameters: -======================= -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:109: WARNING: Title underline too short. - -Regular expressions ----------------- -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:164: WARNING: Title underline too short. - -Setting and fetching parameters `parameter_array` ------------------------------------------- -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:164: WARNING: Title underline too short. - -Setting and fetching parameters `parameter_array` ------------------------------------------- -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:220: WARNING: Title underline too short. - -Getting the model parameter's gradients -============================ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:220: WARNING: Title underline too short. - -Getting the model parameter's gradients -============================ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:236: WARNING: Title underline too short. - -Adjusting the model's constraints -================================ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:236: WARNING: Title underline too short. - -Adjusting the model's constraints -================================ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:287: WARNING: Title underline too short. - -Available Constraints -============== -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:287: WARNING: Title underline too short. - -Available Constraints -============== -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:299: WARNING: Title underline too short. - -Tying Parameters -============ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:299: WARNING: Title underline too short. - -Tying Parameters -============ -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:3: WARNING: Title overline too short. - -******************* -Parameterization handling -******************* -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:10: WARNING: Title underline too short. - -Parameter handles -============== -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:16: WARNING: Title underline too short. - -:py:class:`~GPy.core.parameterization.parameterized.Parameterized` -========== -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:16: WARNING: Title underline too short. - -:py:class:`~GPy.core.parameterization.parameterized.Parameterized` -========== -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:21: WARNING: Title underline too short. - -:py:class:`~GPy.core.parameterization.param.Param` -=========== -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:21: WARNING: Title underline too short. - -:py:class:`~GPy.core.parameterization.param.Param` -=========== -/home/maxz/Documents/gpy/doc/installation.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/kernel_implementation.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/modules.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_GP_regression.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_creating_new_kernels.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_creating_new_models.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_kernel_overview.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:336: WARNING: undefined label: creating_new_kernels (if the link has no caption the label must precede a section header) diff --git a/doc/tuto_kernel_overview.rst b/doc/tuto_kernel_overview.rst index e9e8f290..fc93491a 100644 --- a/doc/tuto_kernel_overview.rst +++ b/doc/tuto_kernel_overview.rst @@ -13,9 +13,9 @@ First we import the libraries we will need :: For most kernels, the dimension is the only mandatory parameter to define a kernel object. However, it is also possible to specify the values of the parameters. For example, the three following commands are valid for defining a squared exponential kernel (ie rbf or Gaussian) :: - ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.) - ker2 = GPy.kern.rbf(input_dim=1, variance = .75, lengthscale=2.) - ker3 = GPy.kern.rbf(1, .5, .5) + ker1 = GPy.kern.RBF(1) # Equivalent to ker1 = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.) + ker2 = GPy.kern.RBF(input_dim=1, variance = .75, lengthscale=2.) + ker3 = GPy.kern.RBF(1, .5, .5) A ``print`` and a ``plot`` functions are implemented to represent kernel objects. The commands :: @@ -52,21 +52,18 @@ Operations to combine kernels In ``GPy``, kernel objects can be added or multiplied. In both cases, two kinds of operations are possible since one can assume that the kernels to add/multiply are defined on the same space or on different subspaces. In other words, it is possible to use two kernels :math:`k_1,\ k_2` over :math:`\mathbb{R} \times \mathbb{R}` to create - * a kernel over :math:`\mathbb{R} \times \mathbb{R}`: :math:`k(x,y) = k_1(x,y) \times k_2(x,y)` - * a kernel over :math:`\mathbb{R}^2 \times \mathbb{R}^2`: :math:`k(\mathbf{x},\mathbf{y}) = k_1(x_1,y_1) \times k_2(x_2,y_2)` + * a kernel over :math:`\mathbb{R} \times \mathbb{R}`: :math:`k(x,y) = k_1(x,y) \times k_2(x,y)`. -These two options are available in GPy using the flag ``tensor`` in the ``add`` and ``prod`` functions. Here is a quick example :: +This is available in GPy via the ``add`` and ``prod`` functions. Here is a quick example :: - k1 = GPy.kern.rbf(1,1.,2.) + k1 = GPy.kern.RBF(1,1.,2.) k2 = GPy.kern.Matern32(1, 0.5, 0.2) # Product of kernels - k_prod = k1.prod(k2) # By default, tensor=False - k_prodtens = k1.prod(k2,tensor=True) + k_prod = k1.prod(k2) # Sum of kernels - k_add = k1.add(k2) # By default, tensor=False - k_addtens = k1.add(k2,tensor=True) + k_add = k1.add(k2) .. # plots pb.figure(figsize=(8,8)) @@ -75,23 +72,23 @@ These two options are available in GPy using the flag ``tensor`` in the ``add`` pb.title('prod') pb.subplot(2,2,2) k_prodtens.plot() - pb.title('tensor prod') + pb.title('prod') pb.subplot(2,2,3) k_add.plot() pb.title('sum') pb.subplot(2,2,4) k_addtens.plot() - pb.title('tensor sum') + pb.title('sum') pb.subplots_adjust(wspace=0.3, hspace=0.3) .. figure:: Figures/tuto_kern_overview_multadd.png :align: center :height: 500px -A shortcut for ``add`` and ``prod`` (with default flag ``tensor=False``) is provided by the usual ``+`` and ``*`` operators. Here is another example where we create a periodic kernel with some decay :: +A shortcut for ``add`` and ``prod`` is provided by the usual ``+`` and ``*`` operators. Here is another example where we create a periodic kernel with some decay :: - k1 = GPy.kern.rbf(1,1.,2) - k2 = GPy.kern.periodic_Matern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5) + k1 = GPy.kern.RBF(1,1.,2) + k2 = GPy.kern.PeriodicMatern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5) k = k1 * k2 # equivalent to k = k1.prod(k2) print k @@ -116,14 +113,14 @@ A shortcut for ``add`` and ``prod`` (with default flag ``tensor=False``) is prov In general, ``kern`` objects can be seen as a sum of ``kernparts`` objects, where the later are covariance functions defined on the same space. For example, the following code :: k = (k1+k2)*(k1+k2) - print k.parts[0].name, '\n', k.parts[1].name, '\n', k.parts[2].name, '\n', k.parts[3].name + print k.parts[0].name, '\n', k.parts[1].name, '\n', k.parts[1].parts[0].name, '\n', k.parts[1].parts[1].name, '\n' returns :: + add_1 + add_2 + rbf + periodic_Matern52 - rbfrbf - rbfperiodic_Mat52 - periodic_Mat52rbf - periodic_Mat52periodic_Mat52 Constraining the parameters =========================== @@ -137,9 +134,9 @@ Various constrains can be applied to the parameters of a kernel When calling one of these functions, the parameters to constrain can either by specified by a regular expression that matches its name or by a number that corresponds to the rank of the parameter. Here is an example :: - k1 = GPy.kern.rbf(1) + k1 = GPy.kern.RBF(1) k2 = GPy.kern.Matern32(1) - k3 = GPy.kern.white(1) + k3 = GPy.kern.White(1) k = k1 + k2 + k3 print k @@ -182,9 +179,9 @@ In two dimensions ANOVA kernels have the following form: Let us assume that we want to define an ANOVA kernel with a Matern 3/2 kernel for :math:`k_i`. As seen previously, we can define this kernel as follows :: - k_cst = GPy.kern.bias(1,variance=1.) - k_mat = GPy.kern.Matern52(1,variance=1., lengthscale=3) - Kanova = (k_cst + k_mat).prod(k_cst + k_mat,tensor=True) + k_cst = GPy.kern.Bias(1,variance=1.) + k_mat = GPy.kern.Matern52(1,variance=1.,lengthscale=3) + Kanova = (k_cst + k_mat).prod(k_cst + k_mat) print Kanova Printing the resulting kernel outputs the following :: @@ -257,17 +254,17 @@ The submodels can be represented with the option ``which_function`` of ``plot``: import GPy pb.ion() - ker1 = GPy.kern.rbf(D=1) # Equivalent to ker1 = GPy.kern.rbf(D=1, variance=1., lengthscale=1.) - ker2 = GPy.kern.rbf(D=1, variance = .75, lengthscale=3.) - ker3 = GPy.kern.rbf(1, .5, .25) + ker1 = GPy.kern.RBF(D=1) # Equivalent to ker1 = GPy.kern.RBF(D=1, variance=1., lengthscale=1.) + ker2 = GPy.kern.RBF(D=1, variance = .75, lengthscale=3.) + ker3 = GPy.kern.RBF(1, .5, .25) ker1.plot() ker2.plot() ker3.plot() #pb.savefig("Figures/tuto_kern_overview_basicdef.png") - kernels = [GPy.kern.rbf(1), GPy.kern.exponential(1), GPy.kern.Matern32(1), GPy.kern.Matern52(1), GPy.kern.Brownian(1), GPy.kern.bias(1), GPy.kern.linear(1), GPy.kern.spline(1), GPy.kern.periodic_exponential(1), GPy.kern.periodic_Matern32(1), GPy.kern.periodic_Matern52(1), GPy.kern.white(1)] - kernel_names = ["GPy.kern.rbf", "GPy.kern.exponential", "GPy.kern.Matern32", "GPy.kern.Matern52", "GPy.kern.Brownian", "GPy.kern.bias", "GPy.kern.linear", "GPy.kern.spline", "GPy.kern.periodic_exponential", "GPy.kern.periodic_Matern32", "GPy.kern.periodic_Matern52", "GPy.kern.white"] + kernels = [GPy.kern.RBF(1), GPy.kern.Exponential(1), GPy.kern.Matern32(1), GPy.kern.Matern52(1), GPy.kern.Brownian(1), GPy.kern.Bias(1), GPy.kern.Linear(1), GPy.kern.PeriodicExponential(1), GPy.kern.PeriodicMatern32(1), GPy.kern.PeriodicMatern52(1), GPy.kern.White(1)] + kernel_names = ["GPy.kern.RBF", "GPy.kern.Exponential", "GPy.kern.Matern32", "GPy.kern.Matern52", "GPy.kern.Brownian", "GPy.kern.Bias", "GPy.kern.Linear", "GPy.kern.PeriodicExponential", "GPy.kern.PeriodicMatern32", "GPy.kern.PeriodicMatern52", "GPy.kern.White"] pb.figure(figsize=(16,12)) pb.subplots_adjust(wspace=.5, hspace=.5)