diff --git a/AUTHORS.txt b/AUTHORS.txt index 31efef02..c1599265 100644 --- a/AUTHORS.txt +++ b/AUTHORS.txt @@ -1,8 +1,3 @@ -James Hensman -Nicolo Fusi -Ricardo Andrade -Nicolas Durrande -Alan Saul -Max Zwiessele -Neil D. Lawrence -Zhenwen Dai +# This list was out of date, I've put "see contributors" for the moment. Not sure how to fix long term but don't want to be +# embarrassed by some off the great contributors of late being omitted from a list. +See contributors. diff --git a/GPy/__init__.py b/GPy/__init__.py index 26713406..5f2a209f 100644 --- a/GPy/__init__.py +++ b/GPy/__init__.py @@ -21,6 +21,8 @@ from . import plotting from .core import Model from .core.parameterization import Param, Parameterized, ObsAr +from .__version__ import __version__ + #@nottest try: #Get rid of nose dependency by only ignoring if you have nose installed diff --git a/GPy/__version__.py b/GPy/__version__.py new file mode 100644 index 00000000..732155f8 --- /dev/null +++ b/GPy/__version__.py @@ -0,0 +1 @@ +__version__ = "0.8.3" diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 9a199faa..903044b9 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -535,7 +535,7 @@ class GP(Model): which_data_ycols='all', fixed_inputs=[], levels=20, samples=0, fignum=None, ax=None, resolution=None, plot_raw=False, linecol=None,fillcol=None, Y_metadata=None, - data_symbol='kx', predict_kw=None, plot_training_data=True): + data_symbol='kx', predict_kw=None, plot_training_data=True, samples_y=0, apply_link=False): """ Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. @@ -558,7 +558,7 @@ class GP(Model): :param levels: number of levels to plot in a contour plot. :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure :type levels: int - :param samples: the number of a posteriori samples to plot + :param samples: the number of a posteriori samples to plot, p(f*|y) :type samples: int :param fignum: figure to plot on. :type fignum: figure number @@ -574,6 +574,10 @@ class GP(Model): :type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib. :param plot_training_data: whether or not to plot the training points :type plot_training_data: boolean + :param samples_y: the number of a posteriori samples to plot, p(y*|y) + :type samples_y: int + :param apply_link: if there is a link function of the likelihood, plot the link(f*) rather than f*, when plotting posterior samples f + :type apply_link: boolean """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import models_plots @@ -587,7 +591,7 @@ class GP(Model): levels, samples, fignum, ax, resolution, plot_raw=plot_raw, Y_metadata=Y_metadata, data_symbol=data_symbol, predict_kw=predict_kw, - plot_training_data=plot_training_data, **kw) + plot_training_data=plot_training_data, samples_y=samples_y, apply_link=apply_link, **kw) def plot_data(self, which_data_rows='all', @@ -613,7 +617,7 @@ class GP(Model): :param levels: number of levels to plot in a contour plot. :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure :type levels: int - :param samples: the number of a posteriori samples to plot + :param samples: the number of a posteriori samples to plot, p(f*|y) :type samples: int :param fignum: figure to plot on. :type fignum: figure number diff --git a/GPy/core/parameterization/priors.py b/GPy/core/parameterization/priors.py index 239b2a26..2d52bff0 100644 --- a/GPy/core/parameterization/priors.py +++ b/GPy/core/parameterization/priors.py @@ -366,6 +366,7 @@ class InverseGamma(Gamma): def rvs(self, n): return 1. / np.random.gamma(scale=1. / self.b, shape=self.a, size=n) + class DGPLVM_KFDA(Prior): """ Implementation of the Discriminative Gaussian Process Latent Variable function using @@ -512,6 +513,7 @@ class DGPLVM_KFDA(Prior): self.A = self.compute_A(lst_ni) self.x_shape = x_shape + class DGPLVM(Prior): """ Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel. @@ -669,7 +671,7 @@ class DGPLVM(Prior): M_i = self.compute_Mi(cls) Sb = self.compute_Sb(cls, M_i, M_0) Sw = self.compute_Sw(cls, M_i) - # Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1)) + # sb_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1)) #Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1) #Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0] Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0] @@ -1198,6 +1200,7 @@ class DGPLVM_T(Prior): + class HalfT(Prior): """ Implementation of the half student t probability function, coupled with random variables. @@ -1208,15 +1211,17 @@ class HalfT(Prior): """ domain = _POSITIVE _instances = [] - def __new__(cls, A, nu): # Singleton: + + def __new__(cls, A, nu): # Singleton: if cls._instances: cls._instances[:] = [instance for instance in cls._instances if instance()] for instance in cls._instances: if instance().A == A and instance().nu == nu: - return instance() + return instance() o = super(Prior, cls).__new__(cls, A, nu) cls._instances.append(weakref.ref(o)) return cls._instances[-1]() + def __init__(self, A, nu): self.A = float(A) self.nu = float(nu) @@ -1225,37 +1230,81 @@ class HalfT(Prior): def __str__(self): return "hT({:.2g}, {:.2g})".format(self.A, self.nu) - def lnpdf(self,theta): - return (theta>0) * ( self.constant -.5*(self.nu+1) * np.log( 1.+ (1./self.nu) * (theta/self.A)**2 ) ) + def lnpdf(self, theta): + return (theta > 0) * (self.constant - .5*(self.nu + 1) * np.log(1. + (1./self.nu) * (theta/self.A)**2)) - #theta = theta if isinstance(theta,np.ndarray) else np.array([theta]) - #lnpdfs = np.zeros_like(theta) - #theta = np.array([theta]) - #above_zero = theta.flatten()>1e-6 - #v = self.nu - #sigma2=self.A - #stop - #lnpdfs[above_zero] = (+ gammaln((v + 1) * 0.5) - # - gammaln(v * 0.5) - # - 0.5*np.log(sigma2 * v * np.pi) - # - 0.5*(v + 1)*np.log(1 + (1/np.float(v))*((theta[above_zero][0]**2)/sigma2)) - #) - #return lnpdfs + # theta = theta if isinstance(theta,np.ndarray) else np.array([theta]) + # lnpdfs = np.zeros_like(theta) + # theta = np.array([theta]) + # above_zero = theta.flatten()>1e-6 + # v = self.nu + # sigma2=self.A + # stop + # lnpdfs[above_zero] = (+ gammaln((v + 1) * 0.5) + # - gammaln(v * 0.5) + # - 0.5*np.log(sigma2 * v * np.pi) + # - 0.5*(v + 1)*np.log(1 + (1/np.float(v))*((theta[above_zero][0]**2)/sigma2)) + # ) + # return lnpdfs - def lnpdf_grad(self,theta): - theta = theta if isinstance(theta,np.ndarray) else np.array([theta]) + def lnpdf_grad(self, theta): + theta = theta if isinstance(theta, np.ndarray) else np.array([theta]) grad = np.zeros_like(theta) - above_zero = theta>1e-6 + above_zero = theta > 1e-6 v = self.nu - sigma2=self.A + sigma2 = self.A grad[above_zero] = -0.5*(v+1)*(2*theta[above_zero])/(v*sigma2 + theta[above_zero][0]**2) return grad def rvs(self, n): - #return np.random.randn(n) * self.sigma + self.mu - from scipy.stats import t - #[np.abs(x) for x in t.rvs(df=4,loc=0,scale=50, size=10000)]) - ret = t.rvs(self.nu,loc=0,scale=self.A, size=n) - ret[ret<0] = 0 - return ret + # return np.random.randn(n) * self.sigma + self.mu + from scipy.stats import t + # [np.abs(x) for x in t.rvs(df=4,loc=0,scale=50, size=10000)]) + ret = t.rvs(self.nu, loc=0, scale=self.A, size=n) + ret[ret < 0] = 0 + return ret + +class Exponential(Prior): + """ + Implementation of the Exponential probability function, + coupled with random variables. + + :param l: shape parameter + + """ + domain = _POSITIVE + _instances = [] + + def __new__(cls, l): # Singleton: + if cls._instances: + cls._instances[:] = [instance for instance in cls._instances if instance()] + for instance in cls._instances: + if instance().l == l: + return instance() + o = super(Exponential, cls).__new__(cls, l) + cls._instances.append(weakref.ref(o)) + return cls._instances[-1]() + + def __init__(self, l): + self.l = l + + def __str__(self): + return "Exp({:.2g})".format(self.l) + + def summary(self): + ret = {"E[x]": 1. / self.l, + "E[ln x]": np.nan, + "var[x]": 1. / self.l**2, + "Entropy": 1. - np.log(self.l), + "Mode": 0.} + return ret + + def lnpdf(self, x): + return np.log(self.l) - self.l * x + + def lnpdf_grad(self, x): + return - self.l + + def rvs(self, n): + return np.random.exponential(scale=self.l, size=n) diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py index 6d6633cb..830809d6 100644 --- a/GPy/core/parameterization/transformations.py +++ b/GPy/core/parameterization/transformations.py @@ -62,7 +62,7 @@ class Transformation(object): import matplotlib.pyplot as plt from ...plotting.matplot_dep import base_plots x = np.linspace(-8,8) - base_plots.meanplot(x, self.f(x),axes=axes*args,**kw) + base_plots.meanplot(x, self.f(x), *args, ax=axes, **kw) axes = plt.gca() axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) @@ -488,7 +488,7 @@ class Logistic(Transformation): return instance() newfunc = super(Transformation, cls).__new__ if newfunc is object.__new__: - o = newfunc(cls) + o = newfunc(cls) else: o = newfunc(cls, lower, upper, *args, **kwargs) cls._instances.append(weakref.ref(o)) diff --git a/GPy/inference/mcmc/__init__.py b/GPy/inference/mcmc/__init__.py index 8f185457..9f6457e6 100644 --- a/GPy/inference/mcmc/__init__.py +++ b/GPy/inference/mcmc/__init__.py @@ -1 +1,2 @@ from .hmc import HMC +from .samplers import * diff --git a/GPy/inference/mcmc/samplers.py b/GPy/inference/mcmc/samplers.py index 2fd88d2f..12939cfe 100644 --- a/GPy/inference/mcmc/samplers.py +++ b/GPy/inference/mcmc/samplers.py @@ -18,11 +18,11 @@ class Metropolis_Hastings: def __init__(self,model,cov=None): """Metropolis Hastings, with tunings according to Gelman et al. """ self.model = model - current = self.model._get_params_transformed() + current = self.model.optimizer_array self.D = current.size self.chains = [] if cov is None: - self.cov = model.Laplace_covariance() + self.cov = np.eye(self.D) else: self.cov = cov self.scale = 2.4/np.sqrt(self.D) @@ -33,20 +33,20 @@ class Metropolis_Hastings: if start is None: self.model.randomize() else: - self.model._set_params_transformed(start) + self.model.optimizer_array = start - - - def sample(self, Ntotal, Nburn, Nthin, tune=True, tune_throughout=False, tune_interval=400): - current = self.model._get_params_transformed() - fcurrent = self.model.log_likelihood() + self.model.log_prior() + def sample(self, Ntotal=10000, Nburn=1000, Nthin=10, tune=True, tune_throughout=False, tune_interval=400): + current = self.model.optimizer_array + fcurrent = self.model.log_likelihood() + self.model.log_prior() + \ + self.model._log_det_jacobian() accepted = np.zeros(Ntotal,dtype=np.bool) for it in range(Ntotal): - print("sample %d of %d\r"%(it,Ntotal), end=' ') + print("sample %d of %d\r"%(it,Ntotal),end="\t") sys.stdout.flush() prop = np.random.multivariate_normal(current, self.cov*self.scale*self.scale) - self.model._set_params_transformed(prop) - fprop = self.model.log_likelihood() + self.model.log_prior() + self.model.optimizer_array = prop + fprop = self.model.log_likelihood() + self.model.log_prior() + \ + self.model._log_det_jacobian() if fprop>fcurrent:#sample accepted, going 'uphill' accepted[it] = True @@ -74,10 +74,11 @@ class Metropolis_Hastings: def predict(self,function,args): """Make a prediction for the function, to which we will pass the additional arguments""" - param = self.model._get_params() + param = self.model.param_array fs = [] for p in self.chain: - self.model._set_params(p) + self.model.param_array = p fs.append(function(*args)) - self.model._set_params(param)# reset model to starting state + # reset model to starting state + self.model.param_array = param return fs diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 924694e9..1cc0c0ba 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -256,8 +256,6 @@ class Kern(Parameterized): :param other: the other kernel to be added :type other: GPy.kern - :param tensor: whether or not to use the tensor space (default is false). - :type tensor: bool """ assert isinstance(other, Kern), "only kernels can be multiplied to kernels..." diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index ff7cf140..b47e663d 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -27,8 +27,6 @@ class Prod(CombinationKernel): :param k1, k2: the kernels to multiply :type k1, k2: Kern - :param tensor: The kernels are either multiply as functions defined on the same input space (default) or on the product of the input spaces - :type tensor: Boolean :rtype: kernel object """ diff --git a/GPy/likelihoods/exponential.py b/GPy/likelihoods/exponential.py index 0a6c543d..ecf0977e 100644 --- a/GPy/likelihoods/exponential.py +++ b/GPy/likelihoods/exponential.py @@ -124,7 +124,7 @@ class Exponential(Likelihood): #d3lik_dlink3 = 6*y/(link_f**4) - 2./(link_f**3) return d3lik_dlink3 - def samples(self, gp): + def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index e961dd1e..74c4c6fd 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -622,7 +622,7 @@ class Likelihood(Parameterized): Nf_samp = 300 Ny_samp = 1 s = np.random.randn(mu.shape[0], Nf_samp)*np.sqrt(var) + mu - ss_y = self.samples(s, Y_metadata, samples=Ny_samp) + ss_y = self.samples(s, Y_metadata)#, samples=Ny_samp) #ss_y = ss_y.reshape(mu.shape[0], mu.shape[1], Nf_samp*Ny_samp) pred_quantiles = [np.percentile(ss_y, q, axis=1)[:,None] for q in quantiles] diff --git a/GPy/likelihoods/poisson.py b/GPy/likelihoods/poisson.py index 1c3fec9e..cfe279bb 100644 --- a/GPy/likelihoods/poisson.py +++ b/GPy/likelihoods/poisson.py @@ -137,7 +137,7 @@ class Poisson(Likelihood): """ return self.gp_link.transf(gp) - def samples(self, gp, Y_metadata=None, samples=1): + def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. @@ -145,5 +145,7 @@ class Poisson(Likelihood): """ orig_shape = gp.shape gp = gp.flatten() - Ysim = np.random.poisson(self.gp_link.transf(gp), [samples, gp.size]).T - return Ysim.reshape(orig_shape+(samples,)) + # Ysim = np.random.poisson(self.gp_link.transf(gp), [samples, gp.size]).T + # return Ysim.reshape(orig_shape+(samples,)) + Ysim = np.random.poisson(self.gp_link.transf(gp)) + return Ysim.reshape(orig_shape) diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 3a5a01d2..a15146cf 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -75,7 +75,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', levels=20, samples=0, fignum=None, ax=None, resolution=None, plot_raw=False, linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None, data_symbol='kx', - apply_link=False, samples_f=0, plot_uncertain_inputs=True, predict_kw=None, plot_training_data=True): + apply_link=False, samples_y=0, plot_uncertain_inputs=True, predict_kw=None, plot_training_data=True): """ Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. @@ -93,24 +93,30 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', :type which_data_rows: 'all' or a list of integers :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v. :type fixed_inputs: a list of tuples - :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D - :type resolution: int - :param levels: number of levels to plot in a contour plot. + :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure :type levels: int - :param samples: the number of a posteriori samples to plot p(y*|y) + :param samples: the number of a posteriori samples to plot p(f*|y) :type samples: int :param fignum: figure to plot on. :type fignum: figure number :param ax: axes to plot on. :type ax: axes handle + :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D + :type resolution: int + :param plot_raw: Whether to plot the raw function p(f|y) + :type plot_raw: boolean :param linecol: color of line to plot. - :type linecol: + :type linecol: hex or color :param fillcol: color of fill - :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure - :param apply_link: apply the link function if plotting f (default false) + :type fillcol: hex or color + :param apply_link: apply the link function if plotting f (default false), as well as posterior samples if requested :type apply_link: boolean - :param samples_f: the number of posteriori f samples to plot p(f*|y) - :type samples_f: int + :param samples_y: the number of posteriori f samples to plot p(y*|y) + :type samples_y: int + :param plot_uncertain_inputs: plot the uncertainty of the inputs as error bars if they have uncertainty (BGPLVM etc.) + :type plot_uncertain_inputs: boolean + :param predict_kw: keyword args for _raw_predict and predict functions if required + :type predict_kw: dict :param plot_training_data: whether or not to plot the training points :type plot_training_data: boolean """ @@ -185,17 +191,17 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', #optionally plot some samples if samples: #NOTE not tested with fixed_inputs - Ysim = model.posterior_samples(Xgrid, samples, Y_metadata=Y_metadata) - print(Ysim.shape) - print(Xnew.shape) - for yi in Ysim.T: - plots['posterior_samples'] = ax.plot(Xnew, yi[:,None], '#3300FF', linewidth=0.25) - #ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs. - - if samples_f: #NOTE not tested with fixed_inputs - Fsim = model.posterior_samples_f(Xgrid, samples_f) + Fsim = model.posterior_samples_f(Xgrid, samples) + if apply_link: + Fsim = model.likelihood.gp_link.transf(Fsim) for fi in Fsim.T: - plots['posterior_samples_f'] = ax.plot(Xnew, fi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) + plots['posterior_samples'] = ax.plot(Xnew, fi[:,None], '#3300FF', linewidth=0.25) + #ax.plot(Xnew, fi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs. + + if samples_y: #NOTE not tested with fixed_inputs + Ysim = model.posterior_samples(Xgrid, samples_y, Y_metadata=Y_metadata) + for yi in Ysim.T: + plots['posterior_samples_y'] = ax.scatter(Xnew, yi[:,None], s=5, c=Tango.colorsHex['darkBlue'], marker='o', alpha=0.5) #ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs. diff --git a/GPy/testing/linalg_test.py b/GPy/testing/linalg_test.py index ec3aca5a..fd818433 100644 --- a/GPy/testing/linalg_test.py +++ b/GPy/testing/linalg_test.py @@ -1,7 +1,6 @@ import numpy as np import scipy as sp -from GPy.util.linalg import jitchol -import GPy +from ..util.linalg import jitchol,trace_dot, ijk_jlk_to_il, ijk_ljk_to_ilk class LinalgTests(np.testing.TestCase): def setUp(self): @@ -37,18 +36,19 @@ class LinalgTests(np.testing.TestCase): except sp.linalg.LinAlgError: return True - def test_einsum_ijk_jlk_to_il(self): - A = np.random.randn(50, 150, 5) - B = np.random.randn(150, 100, 5) - pure = np.einsum('ijk,jlk->il', A, B) - quick = GPy.util.linalg.ijk_jlk_to_il(A, B) - np.testing.assert_allclose(pure, quick) + def test_trace_dot(self): + N = 5 + A = np.random.rand(N,N) + B = np.random.rand(N,N) + trace = np.trace(A.dot(B)) + test_trace = trace_dot(A,B) + np.testing.assert_allclose(trace,test_trace,atol=1e-13) def test_einsum_ij_jlk_to_ilk(self): A = np.random.randn(15, 150, 5) B = np.random.randn(150, 50, 5) pure = np.einsum('ijk,jlk->il', A, B) - quick = GPy.util.linalg.ijk_jlk_to_il(A,B) + quick = ijk_jlk_to_il(A,B) np.testing.assert_allclose(pure, quick) def test_einsum_ijk_ljk_to_ilk(self): @@ -56,5 +56,5 @@ class LinalgTests(np.testing.TestCase): B = np.random.randn(150, 20, 5) #B = A.copy() pure = np.einsum('ijk,ljk->ilk', A, B) - quick = GPy.util.linalg.ijk_ljk_to_ilk(A,B) + quick = ijk_ljk_to_ilk(A,B) np.testing.assert_allclose(pure, quick) diff --git a/GPy/testing/rv_transformation_tests.py b/GPy/testing/rv_transformation_tests.py new file mode 100644 index 00000000..44d8710d --- /dev/null +++ b/GPy/testing/rv_transformation_tests.py @@ -0,0 +1,101 @@ +# Written by Ilias Bilionis +""" +Test if hyperparameters in models are properly transformed. +""" + + +import unittest +import numpy as np +import scipy.stats as st +import GPy + + +class TestModel(GPy.core.Model): + """ + A simple GPy model with one parameter. + """ + def __init__(self): + GPy.core.Model.__init__(self, 'test_model') + theta = GPy.core.Param('theta', 1.) + self.link_parameter(theta) + + def log_likelihood(self): + return 0. + + +class RVTransformationTestCase(unittest.TestCase): + + def _test_trans(self, trans): + m = TestModel() + prior = GPy.priors.LogGaussian(.5, 0.1) + m.theta.set_prior(prior) + m.theta.unconstrain() + m.theta.constrain(trans) + # The PDF of the transformed variables + p_phi = lambda phi : np.exp(-m._objective_grads(phi)[0]) + # To the empirical PDF of: + theta_s = prior.rvs(100000) + phi_s = trans.finv(theta_s) + # which is essentially a kernel density estimation + kde = st.gaussian_kde(phi_s) + # We will compare the PDF here: + phi = np.linspace(phi_s.min(), phi_s.max(), 100) + # The transformed PDF of phi should be this: + pdf_phi = np.array([p_phi(p) for p in phi]) + # UNCOMMENT TO SEE GRAPHICAL COMPARISON + #import matplotlib.pyplot as plt + #fig, ax = plt.subplots() + #ax.hist(phi_s, normed=True, bins=100, alpha=0.25, label='Histogram') + #ax.plot(phi, kde(phi), '--', linewidth=2, label='Kernel Density Estimation') + #ax.plot(phi, pdf_phi, ':', linewidth=2, label='Transformed PDF') + #ax.set_xlabel(r'transformed $\theta$', fontsize=16) + #ax.set_ylabel('PDF', fontsize=16) + #plt.legend(loc='best') + #plt.show(block=True) + # END OF PLOT + # The following test cannot be very accurate + self.assertTrue(np.linalg.norm(pdf_phi - kde(phi)) / np.linalg.norm(kde(phi)) <= 1e-1) + # Check the gradients at a few random points + for i in range(10): + m.theta = theta_s[i] + self.assertTrue(m.checkgrad(verbose=True)) + + def test_Logexp(self): + self._test_trans(GPy.constraints.Logexp()) + self._test_trans(GPy.constraints.Exponent()) + + +if __name__ == '__main__': + unittest.main() + quit() + m = TestModel() + prior = GPy.priors.LogGaussian(0., .9) + m.theta.set_prior(prior) + + # The following should return the PDF in terms of the transformed quantities + p_phi = lambda phi : np.exp(-m._objective_grads(phi)[0]) + + # Let's look at the transformation phi = log(exp(theta - 1)) + trans = GPy.constraints.Exponent() + m.theta.constrain(trans) + # Plot the transformed probability density + phi = np.linspace(-8, 8, 100) + fig, ax = plt.subplots() + # Let's draw some samples of theta and transform them so that we see + # which one is right + theta_s = prior.rvs(10000) + # Transform it to the new variables + phi_s = trans.finv(theta_s) + # And draw their histogram + ax.hist(phi_s, normed=True, bins=100, alpha=0.25, label='Empirical') + # This is to be compared to the PDF of the model expressed in terms of these new + # variables + ax.plot(phi, [p_phi(p) for p in phi], label='Transformed PDF', linewidth=2) + ax.set_xlim(-3, 10) + ax.set_xlabel(r'transformed $\theta$', fontsize=16) + ax.set_ylabel('PDF', fontsize=16) + plt.legend(loc='best') + # Now let's test the gradients + m.checkgrad(verbose=True) + # And show the plot + plt.show(block=True) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index c2f481f0..b4ffd1b0 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -157,7 +157,7 @@ def trace_dot(a, b): """ Efficiently compute the trace of the matrix product of a and b """ - return np.sum(a * b) + return np.einsum('ij,ji->', a, b) def mdot(*args): """ diff --git a/README.md b/README.md index e74f895c..4818b766 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ # GPy + A Gaussian processes framework in Python. * [GPy homepage](http://sheffieldml.github.io/GPy/) @@ -10,23 +11,31 @@ A Gaussian processes framework in Python. Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GPy.png) +### Citation + + @Misc{gpy2014, + author = {{The GPy authors}}, + title = {{GPy}: A Gaussian process framework in python}, + howpublished = {\url{http://github.com/SheffieldML/GPy}}, + year = {2012--2015} + } + +### Pronounciation + +We like to pronounce it 'Gee-pie'. + ### Getting started: installing with pip -We are now requiring the newest version of ![scipy](http://www.scipy.org/) and thus, we strongly recommend using -the ![anaconda python distribution](http://continuum.io/downloads). +We are now requiring the newest version (0.16) of +[scipy](http://www.scipy.org/) and thus, we strongly recommend using +the [anaconda python distribution](http://continuum.io/downloads). With anaconda you can install GPy by the following: conda update scipy pip install gpy -. We've also had luck with ![enthought](http://www.enthought.com), although enthought currently (as of 8th Sep. 2015) does not support scipy 0.16. - -On a fresh install of windows 8.1, we downloaded the Anaconda python distribution, started the anaconda command prompt and typed - - conda update scipy - pip install GPy - -Everything seems to work: from here you can type `ipython` and then `import GPy; GPy.tests()`. Working as of 21/11/14 +We've also had luck with [enthought](http://www.enthought.com), +although enthought currently (as of 8th Sep. 2015) does not support scipy 0.16. If you'd like to install from source, or want to contribute to the project (e.g. by sending pull requests via github), read on. @@ -55,21 +64,13 @@ The command `python3 setup.py build_ext --inplace` builds the Cython extensions. * All weave functions not covered by the test suite are *simply commented out*. Can add equivalents later as test functions become available * A set of benchmarks would be useful! -### Citation - @Misc{gpy2014, - author = {The GPy authors}, - title = {{GPy}: A Gaussian process framework in python}, - howpublished = {\url{http://github.com/SheffieldML/GPy}}, - year = {2012--2014} - } - -### Pronounciation - -We like to pronounce it 'Gee-pie'. ### Ubuntu hackers +:Note: Right now the Ubuntu package index does not include scipy 0.16.0, and thus, cannot +be used for GPy. We hope this gets fixed soon. + For the most part, the developers are using ubuntu. To install the required packages: sudo apt-get install python-numpy python-scipy python-matplotlib @@ -83,10 +84,29 @@ clone this git repository and add it to your path: ### OSX -Everything appears to work out-of-the box using ![enthought](http://www.enthought.com) on osx Mavericks. Download/clone GPy, and then add GPy to your PYTHONPATH - git clone git@github.com:SheffieldML/GPy.git ~/SheffieldML - echo 'PYTHONPATH=$PYTHONPATH:~/SheffieldML' >> ~/.profile +We were working hard to make pre-built distributions ready. +You can now install GPy via pip on MacOSX using +[anaconda python distribution](http://continuum.io/downloads): + + conda update scipy + pip install gpy + +If this does not work, then you need to build GPy yourself, +using the [development toolkits](https://developer.apple.com/xcode/). +Download/clone GPy and run the build process: + + conda update scipy + git clone git@github.com:SheffieldML/GPy.git ~/GPy + cd ~/GPy + python setup.py install + +If you do not wish to build the C extensions (10 times speedup), +you can run the pure python installations, by just adding GPy +to your python path. + + echo 'PYTHONPATH=$PYTHONPATH:~/SheffieldML' >> ~/.profile + ### Compiling documentation: @@ -137,7 +157,9 @@ Run nosetests from the root directory of the repository: or from within IPython import GPy; GPy.tests() - + + + ## Funding Acknowledgements @@ -145,15 +167,12 @@ Current support for the GPy software is coming through the following projects. * [EU FP7-PEOPLE Project Ref 316861](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/mlpm/) "MLPM2012: Machine Learning for Personalized Medicine" -* [BBSRC Project No BB/K011197/1](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/recombinant/) "Linking recombinant gene sequence to protein product manufacturability using CHO cell genomic resources" - * MRC Special Training Fellowship "Bayesian models of expression in the transcriptome for clinical RNA-seq" -* [EU FP7-KBBE Project Ref 289434](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/biopredyn/) "From Data to Models: New Bioinformatics Methods and Tools for Data-Driven Predictive Dynamic Modelling in Biotechnological Applications" - * [EU FP7-ICT Project Ref 612139](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/wysiwyd/) "WYSIWYD: What You Say is What You Did" Previous support for the GPy software came from the following projects: - +* [BBSRC Project No BB/K011197/1](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/recombinant/) "Linking recombinant gene sequence to protein product manufacturability using CHO cell genomic resources" +* [EU FP7-KBBE Project Ref 289434](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/biopredyn/) "From Data to Models: New Bioinformatics Methods and Tools for Data-Driven Predictive Dynamic Modelling in Biotechnological Applications" * [BBSRC Project No BB/H018123/2](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/iterative/) "An iterative pipeline of computational modelling and experimental design for uncovering gene regulatory networks in vertebrates" * [Erasysbio](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/synergy/) "SYNERGY: Systems approach to gene regulation biology through nuclear receptors" diff --git a/doc/log.txt b/doc/log.txt deleted file mode 100644 index d4f829cb..00000000 --- a/doc/log.txt +++ /dev/null @@ -1,222 +0,0 @@ -/home/maxz/Documents/gpy/GPy/__init__.py:docstring of GPy.load:1: WARNING: Inline interpreted text or phrase reference start-string without end-string. -/home/maxz/Documents/gpy/GPy/core/gp.py:docstring of GPy.core.gp.GP.optimize:8: ERROR: Unknown interpreted text role "module". -/home/maxz/Documents/gpy/GPy/core/gp.py:docstring of GPy.core.gp.GP.predict_wishard_embedding:6: WARNING: Field list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/core/gp.py:docstring of GPy.core.gp.GP.predictive_gradients:5: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/core/gp.py:docstring of GPy.core.gp.GP.predictive_gradients:10: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/core/model.py:docstring of GPy.core.model.Model.optimize_restarts:29: WARNING: Explicit markup ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/doc/GPy.core.rst:57: WARNING: autodoc: failed to import module u'GPy.core.svigp'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named svigp -/home/maxz/Documents/gpy/doc/GPy.core.rst:65: WARNING: autodoc: failed to import module u'GPy.core.symbolic'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) - File "/home/maxz/Documents/gpy/GPy/core/symbolic.py", line 10, in - from sympy.utilities.lambdify import lambdastr, _imp_namespace, _get_namespace -ImportError: No module named lambdify -/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Indexable.unset_priors:1: WARNING: Inline emphasis start-string without end-string. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Nameable.hierarchy_name:4: WARNING: Field list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Parameterizable.traverse:1: WARNING: Inline emphasis start-string without end-string. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Parameterizable.traverse:1: WARNING: Inline strong start-string without end-string. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameterized.py:docstring of GPy.core.parameterization.parameterized.Parameterized:18: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/core/parameterization/parameterized.py:docstring of GPy.core.parameterization.parameterized.Parameterized:20: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/core/parameterization/ties_and_remappings.py:docstring of GPy.core.parameterization.ties_and_remappings.Tie:18: SEVERE: Unexpected section title or transition. - -================================ -/home/maxz/Documents/gpy/doc/GPy.examples.rst:50: WARNING: autodoc: failed to import module u'GPy.examples.stochastic'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named stochastic -/home/maxz/Documents/gpy/doc/GPy.examples.rst:58: WARNING: autodoc: failed to import module u'GPy.examples.tutorials'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named tutorials -/home/maxz/Documents/gpy/doc/GPy.inference.latent_function_inference.rst:82: WARNING: autodoc: failed to import module u'GPy.inference.latent_function_inference.var_dtc_gpu'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named var_dtc_gpu -/home/maxz/Documents/gpy/doc/GPy.inference.optimization.rst:42: WARNING: autodoc: failed to import module u'GPy.inference.optimization.sgd'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named sgd -/home/maxz/Documents/gpy/GPy/kern/_src/coregionalize.py:docstring of GPy.kern._src.coregionalize.Coregionalize:5: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/doc/GPy.kern._src.rst:73: WARNING: autodoc: failed to import module u'GPy.kern._src.hierarchical'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named hierarchical -/home/maxz/Documents/gpy/GPy/kern/_src/independent_outputs.py:docstring of GPy.kern._src.independent_outputs.IndependentOutputs:9: WARNING: Field list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:24: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:22: WARNING: Inline literal start-string without end-string. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:25: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:27: WARNING: Definition list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:27: WARNING: Inline literal start-string without end-string. -/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:27: WARNING: Inline interpreted text or phrase reference start-string without end-string. -/home/maxz/Documents/gpy/doc/GPy.kern._src.rst:177: WARNING: autodoc: failed to import module u'GPy.kern._src.symbolic'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) - File "/home/maxz/Documents/gpy/GPy/kern/_src/symbolic.py", line 5, in - from ...core.symbolic import Symbolic_core - File "/home/maxz/Documents/gpy/GPy/core/symbolic.py", line 10, in - from sympy.utilities.lambdify import lambdastr, _imp_namespace, _get_namespace -ImportError: No module named lambdify -/home/maxz/Documents/gpy/GPy/models/gp_kronecker_gaussian_regression.py:docstring of GPy.models.gp_kronecker_gaussian_regression.GPKroneckerGaussianRegression:13: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/models/gp_kronecker_gaussian_regression.py:docstring of GPy.models.gp_kronecker_gaussian_regression.GPKroneckerGaussianRegression:18: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/doc/GPy.models.rst:66: WARNING: autodoc: failed to import module u'GPy.models.gp_multioutput_regression'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named gp_multioutput_regression -/home/maxz/Documents/gpy/GPy/models/gp_var_gauss.py:docstring of GPy.models.gp_var_gauss.GPVariationalGaussianApproximation:9: WARNING: Definition list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:32: WARNING: Field list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:32: WARNING: Inline interpreted text or phrase reference start-string without end-string. -/home/maxz/Documents/gpy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:34: WARNING: Definition list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/doc/GPy.models.rst:138: WARNING: autodoc: failed to import module u'GPy.models.sparse_gp_multioutput_regression'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named sparse_gp_multioutput_regression -/home/maxz/Documents/gpy/doc/GPy.models.rst:178: WARNING: autodoc: failed to import module u'GPy.models.svigp_regression'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named svigp_regression -/home/maxz/Documents/gpy/GPy/plotting/matplot_dep/netpbmfile.py:docstring of GPy.plotting.matplot_dep.netpbmfile.imread:6: SEVERE: Unexpected section title. - -Examples --------- -/home/maxz/Documents/gpy/GPy/plotting/matplot_dep/netpbmfile.py:docstring of GPy.plotting.matplot_dep.netpbmfile.imsave:4: SEVERE: Unexpected section title. - -Examples --------- -/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_checkgrad:6: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_checkgrad:7: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_partial:7: WARNING: Definition list ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_partial:9: ERROR: Unexpected indentation. -docstring of GPy.util.datasets.hapmap3:7: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/doc/GPy.util.rst:74: WARNING: autodoc: failed to import module u'GPy.util.erfcx'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named erfcx -/home/maxz/Documents/gpy/doc/GPy.util.rst:146: WARNING: autodoc: failed to import module u'GPy.util.mpi'; the following exception was raised: -Traceback (most recent call last): - File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object - __import__(self.modname) -ImportError: No module named mpi -/home/maxz/Documents/gpy/GPy/util/netpbmfile.py:docstring of GPy.util.netpbmfile.imread:6: SEVERE: Unexpected section title. - -Examples --------- -/home/maxz/Documents/gpy/GPy/util/netpbmfile.py:docstring of GPy.util.netpbmfile.imsave:4: SEVERE: Unexpected section title. - -Examples --------- -/home/maxz/Documents/gpy/doc/GPy.util.rst:2: SEVERE: Duplicate ID: "module-GPy.util.subarray_and_sorting". -/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:8: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:11: SEVERE: Unexpected section title. - -Examples: -========= -/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:19: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:21: WARNING: Block quote ends without a blank line; unexpected unindent. -/home/maxz/Documents/gpy/GPy/util/block_matrices.py:docstring of GPy.util.block_matrices.block_dot:3: ERROR: Undefined substitution referenced: "A11.B11|B12.B12". -/home/maxz/Documents/gpy/GPy/util/block_matrices.py:docstring of GPy.util.block_matrices.block_dot:3: ERROR: Undefined substitution referenced: "A21.B21|A22.B22". -/home/maxz/Documents/gpy/doc/installation.rst:22: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/doc/installation.rst:27: ERROR: Unexpected indentation. -/home/maxz/Documents/gpy/doc/tuto_creating_new_kernels.rst:58: WARNING: Inline literal start-string without end-string. -/home/maxz/Documents/gpy/doc/tuto_creating_new_models.rst:24: ERROR: Unknown target name: "parameterized". -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:83: WARNING: Title underline too short. - -Interacting with Parameters: -======================= -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:83: WARNING: Title underline too short. - -Interacting with Parameters: -======================= -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:109: WARNING: Title underline too short. - -Regular expressions ----------------- -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:164: WARNING: Title underline too short. - -Setting and fetching parameters `parameter_array` ------------------------------------------- -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:164: WARNING: Title underline too short. - -Setting and fetching parameters `parameter_array` ------------------------------------------- -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:220: WARNING: Title underline too short. - -Getting the model parameter's gradients -============================ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:220: WARNING: Title underline too short. - -Getting the model parameter's gradients -============================ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:236: WARNING: Title underline too short. - -Adjusting the model's constraints -================================ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:236: WARNING: Title underline too short. - -Adjusting the model's constraints -================================ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:287: WARNING: Title underline too short. - -Available Constraints -============== -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:287: WARNING: Title underline too short. - -Available Constraints -============== -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:299: WARNING: Title underline too short. - -Tying Parameters -============ -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:299: WARNING: Title underline too short. - -Tying Parameters -============ -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:3: WARNING: Title overline too short. - -******************* -Parameterization handling -******************* -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:10: WARNING: Title underline too short. - -Parameter handles -============== -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:16: WARNING: Title underline too short. - -:py:class:`~GPy.core.parameterization.parameterized.Parameterized` -========== -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:16: WARNING: Title underline too short. - -:py:class:`~GPy.core.parameterization.parameterized.Parameterized` -========== -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:21: WARNING: Title underline too short. - -:py:class:`~GPy.core.parameterization.param.Param` -=========== -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:21: WARNING: Title underline too short. - -:py:class:`~GPy.core.parameterization.param.Param` -=========== -/home/maxz/Documents/gpy/doc/installation.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/kernel_implementation.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/modules.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_GP_regression.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_creating_new_kernels.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_creating_new_models.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_kernel_overview.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:: WARNING: document isn't included in any toctree -/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:336: WARNING: undefined label: creating_new_kernels (if the link has no caption the label must precede a section header) diff --git a/doc/tuto_kernel_overview.rst b/doc/tuto_kernel_overview.rst index e9e8f290..fc93491a 100644 --- a/doc/tuto_kernel_overview.rst +++ b/doc/tuto_kernel_overview.rst @@ -13,9 +13,9 @@ First we import the libraries we will need :: For most kernels, the dimension is the only mandatory parameter to define a kernel object. However, it is also possible to specify the values of the parameters. For example, the three following commands are valid for defining a squared exponential kernel (ie rbf or Gaussian) :: - ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.) - ker2 = GPy.kern.rbf(input_dim=1, variance = .75, lengthscale=2.) - ker3 = GPy.kern.rbf(1, .5, .5) + ker1 = GPy.kern.RBF(1) # Equivalent to ker1 = GPy.kern.RBF(input_dim=1, variance=1., lengthscale=1.) + ker2 = GPy.kern.RBF(input_dim=1, variance = .75, lengthscale=2.) + ker3 = GPy.kern.RBF(1, .5, .5) A ``print`` and a ``plot`` functions are implemented to represent kernel objects. The commands :: @@ -52,21 +52,18 @@ Operations to combine kernels In ``GPy``, kernel objects can be added or multiplied. In both cases, two kinds of operations are possible since one can assume that the kernels to add/multiply are defined on the same space or on different subspaces. In other words, it is possible to use two kernels :math:`k_1,\ k_2` over :math:`\mathbb{R} \times \mathbb{R}` to create - * a kernel over :math:`\mathbb{R} \times \mathbb{R}`: :math:`k(x,y) = k_1(x,y) \times k_2(x,y)` - * a kernel over :math:`\mathbb{R}^2 \times \mathbb{R}^2`: :math:`k(\mathbf{x},\mathbf{y}) = k_1(x_1,y_1) \times k_2(x_2,y_2)` + * a kernel over :math:`\mathbb{R} \times \mathbb{R}`: :math:`k(x,y) = k_1(x,y) \times k_2(x,y)`. -These two options are available in GPy using the flag ``tensor`` in the ``add`` and ``prod`` functions. Here is a quick example :: +This is available in GPy via the ``add`` and ``prod`` functions. Here is a quick example :: - k1 = GPy.kern.rbf(1,1.,2.) + k1 = GPy.kern.RBF(1,1.,2.) k2 = GPy.kern.Matern32(1, 0.5, 0.2) # Product of kernels - k_prod = k1.prod(k2) # By default, tensor=False - k_prodtens = k1.prod(k2,tensor=True) + k_prod = k1.prod(k2) # Sum of kernels - k_add = k1.add(k2) # By default, tensor=False - k_addtens = k1.add(k2,tensor=True) + k_add = k1.add(k2) .. # plots pb.figure(figsize=(8,8)) @@ -75,23 +72,23 @@ These two options are available in GPy using the flag ``tensor`` in the ``add`` pb.title('prod') pb.subplot(2,2,2) k_prodtens.plot() - pb.title('tensor prod') + pb.title('prod') pb.subplot(2,2,3) k_add.plot() pb.title('sum') pb.subplot(2,2,4) k_addtens.plot() - pb.title('tensor sum') + pb.title('sum') pb.subplots_adjust(wspace=0.3, hspace=0.3) .. figure:: Figures/tuto_kern_overview_multadd.png :align: center :height: 500px -A shortcut for ``add`` and ``prod`` (with default flag ``tensor=False``) is provided by the usual ``+`` and ``*`` operators. Here is another example where we create a periodic kernel with some decay :: +A shortcut for ``add`` and ``prod`` is provided by the usual ``+`` and ``*`` operators. Here is another example where we create a periodic kernel with some decay :: - k1 = GPy.kern.rbf(1,1.,2) - k2 = GPy.kern.periodic_Matern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5) + k1 = GPy.kern.RBF(1,1.,2) + k2 = GPy.kern.PeriodicMatern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5) k = k1 * k2 # equivalent to k = k1.prod(k2) print k @@ -116,14 +113,14 @@ A shortcut for ``add`` and ``prod`` (with default flag ``tensor=False``) is prov In general, ``kern`` objects can be seen as a sum of ``kernparts`` objects, where the later are covariance functions defined on the same space. For example, the following code :: k = (k1+k2)*(k1+k2) - print k.parts[0].name, '\n', k.parts[1].name, '\n', k.parts[2].name, '\n', k.parts[3].name + print k.parts[0].name, '\n', k.parts[1].name, '\n', k.parts[1].parts[0].name, '\n', k.parts[1].parts[1].name, '\n' returns :: + add_1 + add_2 + rbf + periodic_Matern52 - rbfrbf - rbfperiodic_Mat52 - periodic_Mat52rbf - periodic_Mat52periodic_Mat52 Constraining the parameters =========================== @@ -137,9 +134,9 @@ Various constrains can be applied to the parameters of a kernel When calling one of these functions, the parameters to constrain can either by specified by a regular expression that matches its name or by a number that corresponds to the rank of the parameter. Here is an example :: - k1 = GPy.kern.rbf(1) + k1 = GPy.kern.RBF(1) k2 = GPy.kern.Matern32(1) - k3 = GPy.kern.white(1) + k3 = GPy.kern.White(1) k = k1 + k2 + k3 print k @@ -182,9 +179,9 @@ In two dimensions ANOVA kernels have the following form: Let us assume that we want to define an ANOVA kernel with a Matern 3/2 kernel for :math:`k_i`. As seen previously, we can define this kernel as follows :: - k_cst = GPy.kern.bias(1,variance=1.) - k_mat = GPy.kern.Matern52(1,variance=1., lengthscale=3) - Kanova = (k_cst + k_mat).prod(k_cst + k_mat,tensor=True) + k_cst = GPy.kern.Bias(1,variance=1.) + k_mat = GPy.kern.Matern52(1,variance=1.,lengthscale=3) + Kanova = (k_cst + k_mat).prod(k_cst + k_mat) print Kanova Printing the resulting kernel outputs the following :: @@ -257,17 +254,17 @@ The submodels can be represented with the option ``which_function`` of ``plot``: import GPy pb.ion() - ker1 = GPy.kern.rbf(D=1) # Equivalent to ker1 = GPy.kern.rbf(D=1, variance=1., lengthscale=1.) - ker2 = GPy.kern.rbf(D=1, variance = .75, lengthscale=3.) - ker3 = GPy.kern.rbf(1, .5, .25) + ker1 = GPy.kern.RBF(D=1) # Equivalent to ker1 = GPy.kern.RBF(D=1, variance=1., lengthscale=1.) + ker2 = GPy.kern.RBF(D=1, variance = .75, lengthscale=3.) + ker3 = GPy.kern.RBF(1, .5, .25) ker1.plot() ker2.plot() ker3.plot() #pb.savefig("Figures/tuto_kern_overview_basicdef.png") - kernels = [GPy.kern.rbf(1), GPy.kern.exponential(1), GPy.kern.Matern32(1), GPy.kern.Matern52(1), GPy.kern.Brownian(1), GPy.kern.bias(1), GPy.kern.linear(1), GPy.kern.spline(1), GPy.kern.periodic_exponential(1), GPy.kern.periodic_Matern32(1), GPy.kern.periodic_Matern52(1), GPy.kern.white(1)] - kernel_names = ["GPy.kern.rbf", "GPy.kern.exponential", "GPy.kern.Matern32", "GPy.kern.Matern52", "GPy.kern.Brownian", "GPy.kern.bias", "GPy.kern.linear", "GPy.kern.spline", "GPy.kern.periodic_exponential", "GPy.kern.periodic_Matern32", "GPy.kern.periodic_Matern52", "GPy.kern.white"] + kernels = [GPy.kern.RBF(1), GPy.kern.Exponential(1), GPy.kern.Matern32(1), GPy.kern.Matern52(1), GPy.kern.Brownian(1), GPy.kern.Bias(1), GPy.kern.Linear(1), GPy.kern.PeriodicExponential(1), GPy.kern.PeriodicMatern32(1), GPy.kern.PeriodicMatern52(1), GPy.kern.White(1)] + kernel_names = ["GPy.kern.RBF", "GPy.kern.Exponential", "GPy.kern.Matern32", "GPy.kern.Matern52", "GPy.kern.Brownian", "GPy.kern.Bias", "GPy.kern.Linear", "GPy.kern.PeriodicExponential", "GPy.kern.PeriodicMatern32", "GPy.kern.PeriodicMatern52", "GPy.kern.White"] pb.figure(figsize=(16,12)) pb.subplots_adjust(wspace=.5, hspace=.5) diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..a695b3c7 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,10 @@ +[bumpversion] +current_version = 0.8.3 +tag = True +commit = True + +[bumpversion:file:GPy/__version__.py] + +[bdist_wheel] +universal = 1 + diff --git a/setup.py b/setup.py index ef51cd3e..58267ab8 100644 --- a/setup.py +++ b/setup.py @@ -1,18 +1,29 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - +from __future__ import print_function import os import sys from setuptools import setup, Extension import numpy as np -# Version number -version = '0.6.1' def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() -#Mac OS X Clang doesn't support OpenMP th the current time. +def read_to_rst(fname): + try: + import pypandoc + #print 'Warning in installation: For rst formatting in pypi, consider installing pypandoc for conversion' + return pypandoc.convert('README.md', 'rst') + except: + return read(fname) + +version_dummy = {} +exec(read('GPy/__version__.py'), version_dummy) +__version__ = version_dummy['__version__'] +del version_dummy + +#Mac OS X Clang doesn't support OpenMP at the current time. #This detects if we are building on a Mac def ismac(): platform = sys.platform @@ -23,7 +34,7 @@ def ismac(): if ismac(): compile_flags = [ '-O3', ] - link_args = [''] + link_args = [] else: compile_flags = [ '-fopenmp', '-O3', ] link_args = ['-lgomp'] @@ -48,9 +59,9 @@ ext_mods = [Extension(name='GPy.kern._src.stationary_cython', extra_compile_args=compile_flags)] setup(name = 'GPy', - version = version, + version = __version__, author = read('AUTHORS.txt'), - author_email = "james.hensman@gmail.com", + author_email = "gpy.authors@gmail.com", description = ("The Gaussian Process Toolbox"), license = "BSD 3-clause", keywords = "machine-learning gaussian-processes kernels", @@ -72,12 +83,13 @@ setup(name = 'GPy', package_dir={'GPy': 'GPy'}, package_data = {'GPy': ['defaults.cfg', 'installation.cfg', 'util/data_resources.json', - 'util/football_teams.json']}, + 'util/football_teams.json', + ]}, include_package_data = True, py_modules = ['GPy.__init__'], test_suite = 'GPy.testing', - long_description=read('README.md'), - install_requires=['numpy>=1.7', 'scipy>=0.16'], + long_description=read_to_rst('README.md'), + install_requires=['numpy>=1.7', 'scipy>=0.16', 'six'], extras_require = {'docs':['matplotlib >=1.3','Sphinx','IPython']}, classifiers=['License :: OSI Approved :: BSD License', 'Natural Language :: English',