Merge branch 'devel' of github.com:SheffieldML/GPy into devel

This commit is contained in:
Alan Saul 2015-10-04 12:28:10 +01:00
commit e1b1faa133
27 changed files with 331 additions and 94 deletions

View file

@ -1,10 +1,10 @@
sudo: false
os:
- osx
- linux
# - osx
language: python
#language: python
#addons:
# apt:
@ -14,28 +14,52 @@ language: python
# - libatlas-base-dev
# - liblapack-dev
python:
- 2.7
- 3.3
- 3.4
before_install:
- wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
- chmod +x miniconda.sh
- ./miniconda.sh -b
- export PATH=/home/travis/miniconda/bin:$PATH
# - conda update --yes conda
install:
- conda install --yes python=$TRAVIS_PYTHON_VERSION numpy=1.9 scipy=0.16 nose pip six
- pip install .
script:
- cd $HOME
- mkdir empty
- cd empty
- nosetests GPy.testing
cache:
directories:
- $HOME/.cache/pip
- $HOME/download/
- $HOME/install/
env:
- PYTHON_VERSION=2.7
- PYTHON_VERSION=3.5
before_install:
- export CONDA_CACHED=1
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
export OS=Linux;
elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
export OS=MacOSX;
else
echo "OS not supported yet";
exit 1;
fi;
- if [[ $PYTHON_VERSION == "2.7" ]]; then
export MINICONDA=Miniconda;
elif [[ $PYTHON_VERSION == 3* ]]; then
export MINICONDA=Miniconda3;
else
echo "Could not find python version";
exit 1;
fi;
- if [ ! -d $HOME/download/ ]; then mkdir $HOME/download/; fi;
- if [ ! -d $HOME/install/ ]; then mkdir $HOME/install/; fi;
- export MINICONDA_FILE=$MINICONDA-latest-$OS-x86_64-$PYTHON_VERSION
- export MINCONDA_CACHE_FILE=$HOME/download/$MINICONDA_FILE.sh
- export MINICONDA_INSTALL=$HOME/install/$MINICONDA_FILE
- if [ ! -f $MINCONDA_CACHE_FILE ]; then
export CONDA_CACHED=0;
wget http://repo.continuum.io/miniconda/$MINICONDA-latest-$OS-x86_64.sh -O $MINCONDA_CACHE_FILE;
bash $MINCONDA_CACHE_FILE -b -p $MINICONDA_INSTALL;
fi;
- export PATH="$MINICONDA_INSTALL/bin:$PATH";
install:
- conda install --yes python=$PYTHON_VERSION numpy=1.9 scipy=0.16 nose pip six matplotlib;
- pip install codecov
- python setup.py develop
script:
- coverage run travis_tests.py
after_success:
- codecov

View file

@ -34,18 +34,24 @@ except:
def tests(verbose=10):
Tester(testing).test(verbose=verbose)
def load(file_path):
def load(file_or_path):
"""
Load a previously pickled model, using `m.pickle('path/to/file.pickle)'
:param file_name: path/to/file.pickle
"""
import cPickle as pickle
try:
with open(file_path, 'rb') as f:
import cPickle as pickle
if isinstance(file_or_path, basestring):
with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
except:
import pickle as pickle
with open(file_path, 'rb') as f:
import pickle
if isinstance(file_or_path, basestring):
with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
return m

View file

@ -368,9 +368,9 @@ class Model(Parameterized):
for nind, xind in zip(param_index, transformed_index):
xx = x.copy()
xx[xind] += step
f1 = self._objective(xx)
f1 = float(self._objective(xx))
xx[xind] -= 2.*step
f2 = self._objective(xx)
f2 = float(self._objective(xx))
#Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall
#the same
if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15:
@ -378,9 +378,9 @@ class Model(Parameterized):
else:
df_ratio = 1.0
df_unstable = df_ratio < df_tolerance
numerical_gradient = (f1 - f2) / (2 * step)
numerical_gradient = (f1 - f2) / (2. * step)
if np.all(gradient[xind] == 0): ratio = (f1 - f2) == gradient[xind]
else: ratio = (f1 - f2) / (2 * step * gradient[xind])
else: ratio = (f1 - f2) / (2. * step * gradient[xind])
difference = np.abs(numerical_gradient - gradient[xind])
if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance:
@ -422,7 +422,7 @@ class Model(Parameterized):
to_print.append(super(Model, self)._repr_html_())
return "\n".join(to_print)
def __str__(self):
def __str__(self, VT100=True):
model_details = [['Name', self.name],
['Log-likelihood', '{}'.format(float(self.log_likelihood()))],
["Number of Parameters", '{}'.format(self.size)],
@ -432,6 +432,6 @@ class Model(Parameterized):
from operator import itemgetter
max_len = reduce(lambda a, b: max(len(b[0]), a), model_details, 0)
to_print = [""] + ["{0:{l}} : {1}".format(name, detail, l=max_len) for name, detail in model_details] + ["Parameters:"]
to_print.append(super(Model, self).__str__())
to_print.append(super(Model, self).__str__(VT100=VT100))
return "\n".join(to_print)

View file

@ -227,8 +227,8 @@ class Nameable(Gradcheckable):
Make an object nameable inside the hierarchy.
"""
def __init__(self, name, *a, **kw):
super(Nameable, self).__init__(*a, **kw)
self._name = name or self.__class__.__name__
super(Nameable, self).__init__(*a, **kw)
@property
def name(self):

View file

@ -405,7 +405,7 @@ class Parameterized(Parameterizable):
</style>"""
return style + '\n' + '<table class="tg">' + '\n'.format(sep).join(to_print) + '\n</table>'
def __str__(self, header=True):
def __str__(self, header=True, VT100=True):
name = adjust_name_for_printing(self.name) + "."
constrs = self._constraints_str;
ts = self._ties_str
@ -416,7 +416,10 @@ class Parameterized(Parameterizable):
cl = max([len(str(x)) if x else 0 for x in constrs + ["Constraint"]])
tl = max([len(str(x)) if x else 0 for x in ts + ["Tied to"]])
pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]])
if VT100:
format_spec = " \033[1m{{name:<{0}s}}\033[0;0m | {{desc:>{1}s}} | {{const:^{2}s}} | {{pri:^{3}s}} | {{t:^{4}s}}".format(nl, sl, cl, pl, tl)
else:
format_spec = " {{name:<{0}s}} | {{desc:>{1}s}} | {{const:^{2}s}} | {{pri:^{3}s}} | {{t:^{4}s}}".format(nl, sl, cl, pl, tl)
to_print = []
for n, d, c, t, p in zip(names, desc, constrs, ts, prirs):
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))

View file

@ -42,10 +42,11 @@ class SparseGP(GP):
def __init__(self, X, Y, Z, kernel, likelihood, mean_function=None, X_variance=None, inference_method=None,
name='sparse gp', Y_metadata=None, normalizer=False):
#pick a sensible inference method
if inference_method is None:
if isinstance(likelihood, likelihoods.Gaussian):
inference_method = var_dtc.VarDTC(limit=1 if not self.missing_data else Y.shape[1])
inference_method = var_dtc.VarDTC(limit=1)
else:
#inference_method = ??
raise NotImplementedError("what to do what to do?")

View file

@ -144,6 +144,33 @@ class opt_lbfgsb(Optimizer):
if opt_result[2]['warnflag']==2:
self.status = 'Error' + str(opt_result[2]['task'])
class opt_bfgs(Optimizer):
def __init__(self, *args, **kwargs):
Optimizer.__init__(self, *args, **kwargs)
self.opt_name = "BFGS (Scipy implementation)"
def opt(self, f_fp=None, f=None, fp=None):
"""
Run the optimizer
"""
rcstrings = ['','Maximum number of iterations exceeded', 'Gradient and/or function calls not changing']
opt_dict = {}
if self.xtol is not None:
print("WARNING: bfgs doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
print("WARNING: bfgs doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
opt_dict['pgtol'] = self.gtol
opt_result = optimize.fmin_bfgs(f, self.x_init, fp, disp=self.messages,
maxiter=self.max_iters, full_output=True, **opt_dict)
self.x_opt = opt_result[0]
self.f_opt = f_fp(self.x_opt)[0]
self.funct_eval = opt_result[4]
self.status = rcstrings[opt_result[6]]
class opt_simplex(Optimizer):
def __init__(self, *args, **kwargs):
Optimizer.__init__(self, *args, **kwargs)
@ -255,6 +282,7 @@ def get_optimizer(f_min):
optimizers = {'fmin_tnc': opt_tnc,
'simplex': opt_simplex,
'lbfgsb': opt_lbfgsb,
'org-bfgs': opt_bfgs,
'scg': opt_SCG,
'adadelta':Opt_Adadelta}

View file

@ -32,7 +32,7 @@ class PSICOMP_GH(PSICOMP):
self.Xs = ObsAr(np.empty((self.degree,)+qX.mean.shape))
mu, S = qX.mean.values, qX.variance.values
S_sq = np.sqrt(S)
for i in xrange(self.degree):
for i in range(self.degree):
self.Xs[i] = self.locs[i]*S_sq+mu
return self.Xs
@ -46,7 +46,7 @@ class PSICOMP_GH(PSICOMP):
psi0 = np.zeros((N,))
psi1 = np.zeros((N,M))
psi2 = np.zeros((N,M,M)) if return_psi2_n else np.zeros((M,M))
for i in xrange(self.degree):
for i in range(self.degree):
if self.cache_K:
X = Xs[i]
else:
@ -74,7 +74,7 @@ class PSICOMP_GH(PSICOMP):
dZ = np.zeros_like(Z)
dmu = np.zeros_like(mu)
dS = np.zeros_like(S)
for i in xrange(self.degree):
for i in range(self.degree):
if self.cache_K:
X = Xs[i]
else:

View file

@ -7,6 +7,8 @@ from .stationary import Stationary
from .psi_comp import PSICOMP_RBF
from .psi_comp.rbf_psi_gpucomp import PSICOMP_RBF_GPU
from ...util.config import *
from ...core import Param
from GPy.core.parameterization.transformations import Logexp
class RBF(Stationary):
"""
@ -18,12 +20,17 @@ class RBF(Stationary):
"""
_support_GPU = True
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False):
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False, inv_l=False):
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
if self.useGPU:
self.psicomp = PSICOMP_RBF_GPU()
else:
self.psicomp = PSICOMP_RBF()
self.use_invLengthscale = inv_l
if inv_l:
self.unlink_parameter(self.lengthscale)
self.inv_l = Param('inv_lengthscale',1./self.lengthscale**2, Logexp())
self.link_parameter(self.inv_l)
def K_of_r(self, r):
return self.variance * np.exp(-0.5 * r**2)
@ -48,6 +55,10 @@ class RBF(Stationary):
assert self.input_dim == 1 #TODO: higher dim spectra?
return self.variance*np.sqrt(2*np.pi)*self.lengthscale*np.exp(-self.lengthscale*2*omega**2/2)
def parameters_changed(self):
if self.use_invLengthscale: self.lengthscale[:] = 1./np.sqrt(self.inv_l+1e-200)
super(RBF,self).parameters_changed()
#---------------------------------------#
# PSI statistics #
#---------------------------------------#
@ -68,6 +79,8 @@ class RBF(Stationary):
dL_dvar, dL_dlengscale = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[:2]
self.variance.gradient = dL_dvar
self.lengthscale.gradient = dL_dlengscale
if self.use_invLengthscale:
self.inv_l.gradient = dL_dlengscale*(self.lengthscale**3/-2.)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2]
@ -75,3 +88,10 @@ class RBF(Stationary):
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]
def update_gradients_diag(self, dL_dKdiag, X):
super(RBF,self).update_gradients_diag(dL_dKdiag, X)
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)
def update_gradients_full(self, dL_dK, X, X2=None):
super(RBF,self).update_gradients_full(dL_dK, X, X2)
if self.use_invLengthscale: self.inv_l.gradient =self.lengthscale.gradient*(self.lengthscale**3/-2.)

View file

@ -25,7 +25,7 @@ class Linear(Mapping):
"""
def __init__(self, input_dim, output_dim, name='linmap'):
Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name)
super(Linear, self).__init__(input_dim=input_dim, output_dim=output_dim, name=name)
self.A = Param('A', np.random.randn(self.input_dim, self.output_dim))
self.link_parameter(self.A)

View file

@ -20,10 +20,10 @@ class GPClassification(GP):
"""
def __init__(self, X, Y, kernel=None,Y_metadata=None):
def __init__(self, X, Y, kernel=None,Y_metadata=None, mean_function=None):
if kernel is None:
kernel = kern.RBF(X.shape[1])
likelihood = likelihoods.Bernoulli()
GP.__init__(self, X=X, Y=Y, kernel=kernel, likelihood=likelihood, inference_method=EP(), name='gp_classification')
GP.__init__(self, X=X, Y=Y, kernel=kernel, likelihood=likelihood, inference_method=EP(), mean_function=mean_function, name='gp_classification')

View file

@ -361,7 +361,7 @@ class SkewChecker(HessianChecker):
#Check every block individually (for ease)
check_passed = [False]*numeric_hess.shape[2]
for block_ind in xrange(numeric_hess.shape[2]):
for block_ind in range(numeric_hess.shape[2]):
#Unless super_plot is set, just plot the first one
p = True if (plot and block_ind == numeric_hess.shape[2]-1) or super_plot else False
if verbose:

View file

@ -342,3 +342,60 @@ class MRD(BayesianGPLVMMiniBatch):
self.kern = self.bgplvms[0].kern
self.likelihood = self.bgplvms[0].likelihood
self.parameters_changed()
def factorize_space(self, threshold=0.005, printOut=False, views=None):
"""
Given a trained MRD model, this function looks at the optimized ARD weights (lengthscales)
and decides which part of the latent space is shared across views or private, according to a threshold.
The threshold is applied after all weights are normalized so that the maximum value is 1.
"""
M = len(self.bgplvms)
if views is None:
# There are some small modifications needed to make this work for M > 2 (currently the code
# takes account of this, but it's not right there)
if M is not 2:
raise NotImplementedError("Not implemented for M > 2")
obsMod = [0]
infMod = 1
else:
obsMod = views[0]
infMod = views[1]
scObs = [None] * len(obsMod)
for i in range(0,len(obsMod)):
# WARNING: the [0] in the end assumes that the ARD kernel (if there's addition) is the 1st one
scObs[i] = np.atleast_2d(self.bgplvms[obsMod[i]].kern.input_sensitivity(summarize=False))[0]
# Normalise to have max 1
scObs[i] /= np.max(scObs[i])
scInf = np.atleast_2d(self.bgplvms[infMod].kern.input_sensitivity(summarize=False))[0]
scInf /= np.max(scInf)
retainedScales = [None]*(len(obsMod)+1)
for i in range(0,len(obsMod)):
retainedScales[obsMod[i]] = np.where(scObs[i] > threshold)[0]
retainedScales[infMod] = np.where(scInf > threshold)[0]
for i in range(len(retainedScales)):
retainedScales[i] = [k for k in retainedScales[i]] # Transform array to list
sharedDims = set(retainedScales[obsMod[0]]).intersection(set(retainedScales[infMod]))
for i in range(1,len(obsMod)):
sharedDims = sharedDims.intersection(set(retainedScales[obsMod[i]]))
privateDims = [None]*M
for i in range(0,len(retainedScales)):
privateDims[i] = set(retainedScales[i]).difference(sharedDims)
privateDims[i] = [k for k in privateDims[i]] # Transform set to list
sharedDims = [k for k in sharedDims] # Transform set to list
sharedDims.sort()
for i in range(len(privateDims)):
privateDims[i].sort()
if printOut:
print('# Shared dimensions: ' + str(sharedDims))
for i in range(len(retainedScales)):
print('# Private dimensions model ' + str(i) + ':' + str(privateDims[i]))
return sharedDims, privateDims

View file

@ -2,12 +2,12 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from latent_space_visualizations.controllers.imshow_controller import ImshowController,ImAnnotateController
from .latent_space_visualizations.controllers.imshow_controller import ImshowController,ImAnnotateController
from ...core.parameterization.variational import VariationalPosterior
from .base_plots import x_frame2D
import itertools
try:
import Tango
from . import Tango
from matplotlib.cm import get_cmap
from matplotlib import pyplot as pb
from matplotlib import cm

View file

@ -3,7 +3,7 @@
import numpy as np
from matplotlib import pyplot as pb
import Tango
from . import Tango
from matplotlib.textpath import TextPath
from matplotlib.transforms import offset_copy
from .base_plots import ax_default

View file

@ -1 +1 @@
import controllers
from .import controllers

View file

@ -1 +1 @@
import axis_event_controller, imshow_controller
from . import axis_event_controller, imshow_controller

View file

@ -3,7 +3,7 @@ Created on 24 Jul 2013
@author: maxz
'''
from axis_event_controller import BufferedAxisChangedController
from .axis_event_controller import BufferedAxisChangedController
import itertools
import numpy

View file

@ -3,11 +3,11 @@
import numpy as np
try:
import Tango
from . import Tango
from matplotlib import pyplot as pb
except:
pass
from base_plots import x_frame1D, x_frame2D
from .base_plots import x_frame1D, x_frame2D
def plot_mapping(self, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, samples=0, fignum=None, ax=None, fixed_inputs=[], linecol=Tango.colorsHex['darkBlue']):

View file

@ -230,10 +230,16 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
#set the limits of the plot to some sensible values
try:
ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper))
if ymin != ymax:
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
except:
# do nothing
# No training data on model
pass
#add inducing inputs (if a sparse model is used)
if hasattr(model,"Z"):

View file

@ -5,7 +5,7 @@ The module plotting results for SSGPLVM
import pylab
from ...models import SSGPLVM
from img_plots import plot_2D_images
from .img_plots import plot_2D_images
class SSGPLVM_plot(object):
def __init__(self,model, imgsize):

View file

@ -4,7 +4,7 @@ import GPy
import numpy as np
import matplotlib as mpl
import time
from GPy.core.parameterization.variational import VariationalPosterior
from ...core.parameterization.variational import VariationalPosterior
try:
import visual
visual_available = True

View file

@ -6,7 +6,7 @@ import numpy as np
import GPy
class MFtests(unittest.TestCase):
def simple_mean_function():
def test_simple_mean_function(self):
"""
The simplest possible mean function. No parameters, just a simple Sinusoid.
"""

View file

@ -14,9 +14,9 @@ class TestModel(GPy.core.Model):
"""
A simple GPy model with one parameter.
"""
def __init__(self):
def __init__(self, theta=1.):
GPy.core.Model.__init__(self, 'test_model')
theta = GPy.core.Param('theta', 1.)
theta = GPy.core.Param('theta', theta)
self.link_parameter(theta)
def log_likelihood(self):
@ -34,7 +34,7 @@ class RVTransformationTestCase(unittest.TestCase):
# The PDF of the transformed variables
p_phi = lambda phi : np.exp(-m._objective_grads(phi)[0])
# To the empirical PDF of:
theta_s = prior.rvs(1e6)
theta_s = prior.rvs(1e5)
phi_s = trans.finv(theta_s)
# which is essentially a kernel density estimation
kde = st.gaussian_kde(phi_s)
@ -55,15 +55,31 @@ class RVTransformationTestCase(unittest.TestCase):
# END OF PLOT
# The following test cannot be very accurate
self.assertTrue(np.linalg.norm(pdf_phi - kde(phi)) / np.linalg.norm(kde(phi)) <= 1e-1)
# Check the gradients at a few random points
for i in range(5):
m.theta = theta_s[i]
self.assertTrue(m.checkgrad(verbose=True))
def _test_grad(self, trans):
np.random.seed(1234)
m = TestModel(np.random.uniform(.5, 1.5, 20))
prior = GPy.priors.LogGaussian(.5, 0.1)
m.theta.set_prior(prior)
m.theta.constrain(trans)
m.randomize()
print(m)
self.assertTrue(m.checkgrad(1))
def test_Logexp(self):
self._test_trans(GPy.constraints.Logexp())
@unittest.skip("Gradient not checking right, @jameshensman what is going on here?")
def test_Logexp_grad(self):
self._test_grad(GPy.constraints.Logexp())
def test_Exponent(self):
self._test_trans(GPy.constraints.Exponent())
@unittest.skip("Gradient not checking right, @jameshensman what is going on here?")
def test_Exponent_grad(self):
self._test_grad(GPy.constraints.Exponent())
if __name__ == '__main__':
unittest.main()

View file

@ -1,6 +1,5 @@
# GPy
A Gaussian processes framework in Python.
* [GPy homepage](http://sheffieldml.github.io/GPy/)
@ -8,8 +7,21 @@ A Gaussian processes framework in Python.
* [User mailing list](https://lists.shef.ac.uk/sympa/subscribe/gpy-users)
* [Online documentation](https://gpy.readthedocs.org/en/latest/)
* [Unit tests (Travis-CI)](https://travis-ci.org/SheffieldML/GPy)
* [![licence](https://img.shields.io/badge/licence-BSD-blue.svg)](http://opensource.org/licenses/BSD-3-Clause)
#### Continuous integration
| | Travis-CI | Codecov | Readthedocs |
| ---: | :--: | :---: | :---: |
| **master:** | [![master](https://travis-ci.org/SheffieldML/GPy.svg?branch=master)](https://travis-ci.org/SheffieldML/GPy) | [![codecov.io](http://codecov.io/github/SheffieldML/GPy/coverage.svg?branch=master)](http://codecov.io/github/SheffieldML/GPy?branch=master) | [![mdocs](https://img.shields.io/badge/docs-master-blue.svg?style=flat)](http://gpy.readthedocs.org/en/master/) |
| **devel:** | [![devel](https://travis-ci.org/SheffieldML/GPy.svg?branch=devel)](https://travis-ci.org/SheffieldML/GPy) | [![codecov.io](http://codecov.io/github/SheffieldML/GPy/coverage.svg?branch=devel)](http://codecov.io/github/SheffieldML/GPy?branch=devel) | [![ddocs](https://img.shields.io/badge/docs-devel-blue.svg?style=flat)](http://gpy.readthedocs.org/en/devel/) |
### Supported Platforms:
[<img src="https://www.python.org/static/community_logos/python-logo-generic.svg" height="40">](https://www.python.org/) [<img src="https://upload.wikimedia.org/wikipedia/commons/5/5f/Windows_logo_-_2012.svg" height="40">](http://www.microsoft.com/en-gb/windows) [<img src="https://upload.wikimedia.org/wikipedia/commons/8/8e/OS_X-Logo.svg" height="40">](http://www.apple.com/osx/) [<img src="https://upload.wikimedia.org/wikipedia/commons/3/35/Tux.svg" height="40">](https://en.wikipedia.org/wiki/List_of_Linux_distributions)
----
Python 2.7, 3.3 and higher
Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GPy.png)
### Citation
@ -20,9 +32,9 @@ Continuous integration status: ![CI status](https://travis-ci.org/SheffieldML/GP
year = {2012--2015}
}
### Pronounciation
### Pronounciation: dʒí páj
We like to pronounce it 'Gee-pie'.
We like to pronounce it 'g-pie'.
### Getting started: installing with pip
@ -39,12 +51,9 @@ although enthought currently (as of 8th Sep. 2015) does not support scipy 0.16.
If you'd like to install from source, or want to contribute to the project (e.g. by sending pull requests via github), read on.
### Python 3 Compatibility
Work is underway to make GPy run on Python 3.
### Troubleshooting installation problems
* All tests in the testsuite now run on Python3.
To see this for yourself, in Ubuntu 14.04, you can do
If you're having trouble installing GPy via `pip install GPy` here is a probable solution:
git clone https://github.com/mikecroucher/GPy.git
cd GPy
@ -52,19 +61,11 @@ To see this for yourself, in Ubuntu 14.04, you can do
python3 setup.py build_ext --inplace
nosetests3 GPy/testing
nosetests3 is Ubuntu's way of reffering to the Python 3 version of nosetests. You install it with
sudo apt-get install python3-nose
The command `python3 setup.py build_ext --inplace` builds the Cython extensions. IF it doesn't work, you may need to install this:
sudo apt-get install python3-dev
* Test coverage is less than 100% so it is expected that there is still more work to be done. We need more tests and examples to try out.
* All weave functions not covered by the test suite are *simply commented out*. Can add equivalents later as test functions become available
* A set of benchmarks would be useful!
### Direct downloads
[![PyPI version](https://badge.fury.io/py/GPy.svg)](https://pypi.python.org/pypi/GPy) [![source](https://img.shields.io/badge/download-source-green.svg)](https://github.com/SheffieldML/GPy/releases/latest)
[![Windows](https://img.shields.io/badge/download-windows-orange.svg)](https://github.com/SheffieldML/GPy/releases/latest)
[![MacOSX](https://img.shields.io/badge/download-macosx-blue.svg)](https://github.com/SheffieldML/GPy/releases/latest)
### Ubuntu hackers
@ -165,6 +166,8 @@ or from within IPython
Current support for the GPy software is coming through the following projects.
* [EU FP7-HEALTH Project Ref 305626](http://radiant-project.eu) "RADIANT: Rapid Development and Distribution of Statistical Tools for High-Throughput Sequencing Data"
* [EU FP7-PEOPLE Project Ref 316861](http://staffwww.dcs.shef.ac.uk/people/N.Lawrence/projects/mlpm/) "MLPM2012: Machine Learning for Personalized Medicine"
* MRC Special Training Fellowship "Bayesian models of expression in the transcriptome for clinical RNA-seq"

View file

@ -1,5 +1,39 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
# Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Copyright (c) 2014, James Hensman, Max Zwiessele
# Copyright (c) 2015, Max Zwiessele
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramax nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from __future__ import print_function
import os
import sys

39
travis_tests.py Normal file
View file

@ -0,0 +1,39 @@
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramax nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
#!/usr/bin/env python
import matplotlib
matplotlib.use('svg')
import nose
nose.main('GPy', defaultTest='GPy/testing')