[updates] merged update structure

This commit is contained in:
Max Zwiessele 2014-09-22 09:29:48 +01:00
commit bccd8e094a
36 changed files with 190 additions and 107 deletions

View file

@ -51,7 +51,7 @@ class GP(Model):
assert Y.ndim == 2
logger.info("initializing Y")
if normalizer is None:
if normalizer is True:
self.normalizer = MeanNorm()
elif normalizer is False:
self.normalizer = None

View file

@ -213,6 +213,7 @@ class Model(Parameterized):
def optimize(self, optimizer=None, start=None, **kwargs):
"""
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
kwargs are passed to the optimizer. They can be:
:param max_f_eval: maximum number of function evaluations
@ -222,7 +223,15 @@ class Model(Parameterized):
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
:type optimizer: string
TODO: valid args
Valid optimizers are:
- 'scg': scaled conjugate gradient method, recommended for stability.
See also GPy.inference.optimization.scg
- 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc)
- 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin),
- 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b),
- 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only!
"""
if self.is_fixed:
raise RuntimeError, "Cannot optimize, when everything is fixed"

View file

@ -3,6 +3,7 @@
import numpy as np
from sparse_gp import SparseGP
from numpy.linalg.linalg import LinAlgError
from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch
import logging
@ -42,10 +43,10 @@ class SparseGP_MPI(SparseGP):
assert isinstance(inference_method, VarDTC_minibatch), 'inference_method has to support MPI!'
super(SparseGP_MPI, self).__init__(X, Y, Z, kernel, likelihood, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer)
self.updates = False
self.add_parameter(self.X, index=0)
self.update_model(False)
self.link_parameter(self.X, index=0)
if variational_prior is not None:
self.add_parameter(variational_prior)
self.link_parameter(variational_prior)
# self.X.fix()
self.mpi_comm = mpi_comm
@ -58,7 +59,8 @@ class SparseGP_MPI(SparseGP):
self.Y_local = self.Y[N_start:N_end]
print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range)
mpi_comm.Bcast(self.param_array, root=0)
self.updates = True
self.update_model(True)
def __getstate__(self):
dc = super(SparseGP_MPI, self).__getstate__()
@ -83,10 +85,6 @@ class SparseGP_MPI(SparseGP):
if self._IN_OPTIMIZATION_ and self.mpi_comm.rank==0:
self.mpi_comm.Bcast(np.int32(1),root=0)
self.mpi_comm.Bcast(p, root=0)
from ..util.debug import checkFinite
checkFinite(p, 'optimizer_array')
SparseGP.optimizer_array.fset(self,p)
def optimize(self, optimizer=None, start=None, **kwargs):
@ -102,7 +100,13 @@ class SparseGP_MPI(SparseGP):
while True:
self.mpi_comm.Bcast(flag,root=0)
if flag==1:
self.optimizer_array = x
try:
self.optimizer_array = x
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
elif flag==-1:
break
else:

View file

@ -5,9 +5,13 @@
"""
Gaussian Processes classification
"""
import pylab as pb
import GPy
try:
import pylab as pb
except:
pass
default_seed = 10000
def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):

View file

@ -1,5 +1,8 @@
import numpy as np
import pylab as pb
try:
import pylab as pb
except:
pass
import GPy
pb.ion()
pb.close('all')

View file

@ -1,7 +1,10 @@
import GPy
import numpy as np
import matplotlib.pyplot as plt
from GPy.util import datasets
try:
import matplotlib.pyplot as plt
except:
pass
def student_t_approx(optimize=True, plot=True):
"""

View file

@ -4,7 +4,10 @@
"""
Gaussian Processes regression examples
"""
import pylab as pb
try:
import pylab as pb
except:
pass
import numpy as np
import GPy

View file

@ -1,7 +1,10 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import pylab as pb
try:
import pylab as pb
except:
pass
import numpy as np
import GPy

View file

@ -6,8 +6,11 @@
Code of Tutorials
"""
import pylab as pb
pb.ion()
try:
import pylab as pb
pb.ion()
except:
pass
import numpy as np
import GPy

View file

@ -124,6 +124,7 @@ class vDTC(object):
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
P = tdot(tmp.T)
stop
#compute log marginal
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \

View file

@ -2,7 +2,7 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from posterior import Posterior
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri,pdinv
from ...util import diag
from ...core.parameterization.variational import VariationalPosterior
import numpy as np
@ -144,6 +144,7 @@ class VarDTC_minibatch(LatentFunctionInference):
"""
num_data, output_dim = Y.shape
input_dim = Z.shape[0]
if self.mpi_comm != None:
num_data_all = np.array(num_data,dtype=np.int32)
self.mpi_comm.Allreduce([np.int32(num_data), MPI.INT], [num_data_all, MPI.INT])
@ -166,31 +167,18 @@ class VarDTC_minibatch(LatentFunctionInference):
# Compute Common Components
#======================================================================
from ...util.debug import checkFullRank
Kmm = kern.K(Z).copy()
diag.add(Kmm, self.const_jitter)
r1 = checkFullRank(Kmm,name='Kmm')
Lm = jitchol(Kmm)
KmmInv,Lm,LmInv,_ = pdinv(Kmm)
LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right')
LmInvPsi2LmInvT = LmInv.dot(psi2_full).dot(LmInv.T)
Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT
r2 = checkFullRank(Lambda,name='Lambda')
if (not r1) or (not r2):
raise
LL = jitchol(Lambda)
LL = np.dot(Lm,LL)
b,_ = dtrtrs(LL, psi1Y_full.T)
LInv,LL,LLInv,logdet_L = pdinv(Lambda)
b = LLInv.dot(LmInv.dot(psi1Y_full.T))
bbt = np.square(b).sum()
v,_ = dtrtrs(LL.T,b,lower=False)
vvt = np.einsum('md,od->mo',v,v)
v = LmInv.T.dot(LLInv.T.dot(b))
Psi2LLInvT = dtrtrs(LL,psi2_full)[0].T
LmInvPsi2LLInvT= dtrtrs(Lm,Psi2LLInvT)[0]
KmmInvPsi2LLInvT = dtrtrs(Lm,LmInvPsi2LLInvT,trans=True)[0]
KmmInvPsi2P = dtrtrs(LL,KmmInvPsi2LLInvT.T, trans=True)[0].T
dL_dpsi2R = (output_dim*KmmInvPsi2P - vvt)/2. # dL_dpsi2 with R inside psi2
dL_dpsi2R = LmInv.T.dot(-LLInv.T.dot(tdot(b)+output_dim*np.eye(input_dim)).dot(LLInv)+output_dim*np.eye(input_dim)).dot(LmInv)/2.
# Cache intermediate results
self.midRes['dL_dpsi2R'] = dL_dpsi2R
@ -203,20 +191,20 @@ class VarDTC_minibatch(LatentFunctionInference):
logL_R = -np.log(beta).sum()
else:
logL_R = -num_data*np.log(beta)
logL = -(output_dim*(num_data*log_2_pi+logL_R+psi0_full-np.trace(LmInvPsi2LmInvT))+YRY_full-bbt)/2.-output_dim*(-np.log(np.diag(Lm)).sum()+np.log(np.diag(LL)).sum())
logL = -(output_dim*(num_data*log_2_pi+logL_R+psi0_full-np.trace(LmInvPsi2LmInvT))+YRY_full-bbt)/2.-output_dim*logdet_L/2.
#======================================================================
# Compute dL_dKmm
#======================================================================
dL_dKmm = -(output_dim*np.einsum('md,od->mo',KmmInvPsi2LLInvT,KmmInvPsi2LLInvT) + vvt)/2.
dL_dKmm = dL_dpsi2R - output_dim*KmmInv.dot(psi2_full).dot(KmmInv)/2.
#======================================================================
# Compute the Posterior distribution of inducing points p(u|Y)
#======================================================================
if not self.Y_speedup or het_noise:
post = Posterior(woodbury_inv=KmmInvPsi2P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm)
post = Posterior(woodbury_inv=LmInv.T.dot(np.eye(input_dim)-LInv).dot(LmInv), woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm)
else:
post = None
@ -341,13 +329,7 @@ def update_gradients(model, mpi_comm=None):
Y = model.Y_local
X = model.X[model.N_range[0]:model.N_range[1]]
try:
model._log_marginal_likelihood, dL_dKmm, model.posterior = model.inference_method.inference_likelihood(model.kern, X, model.Z, model.likelihood, Y)
except Exception:
if model.mpi_comm is None or model.mpi_comm.rank==0:
import time
model.pickle('model_'+str(int(time.time()))+'.pickle')
raise
model._log_marginal_likelihood, dL_dKmm, model.posterior = model.inference_method.inference_likelihood(model.kern, X, model.Z, model.likelihood, Y)
het_noise = model.likelihood.variance.size > 1

View file

@ -10,11 +10,11 @@ class Hierarchical(Kernpart):
A kernel part which can reopresent a hierarchy of indepencnce: a generalisation of independent_outputs
"""
def __init__(self,parts):
def __init__(self,parts,name='hierarchy'):
self.levels = len(parts)
self.input_dim = parts[0].input_dim + 1
self.num_params = np.sum([k.num_params for k in parts])
self.name = 'hierarchy'
self.name = name
self.parts = parts
self.param_starts = np.hstack((0,np.cumsum([k.num_params for k in self.parts[:-1]])))

View file

@ -20,8 +20,6 @@ class RBF(Stationary):
_support_GPU = True
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False):
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
self.weave_options = {}
self.group_spike_prob = False
self.psicomp = PSICOMP_RBF()
if self.useGPU:
self.psicomp = PSICOMP_RBF_GPU()

View file

@ -171,7 +171,8 @@ class Stationary(Kern):
#the lower memory way with a loop
ret = np.empty(X.shape, dtype=np.float64)
[np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=ret[:,q]) for q in xrange(self.input_dim)]
for q in xrange(self.input_dim):
np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=ret[:,q])
ret /= self.lengthscale**2
return ret
@ -309,6 +310,19 @@ class Matern52(Stationary):
class ExpQuad(Stationary):
"""
The Exponentiated quadratic covariance function.
.. math::
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r)
notes::
- Yes, this is exactly the same as the RBF covariance function, but the
RBF implementation also has some features for doing variational kernels
(the psi-statistics).
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='ExpQuad'):
super(ExpQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)

View file

@ -3,14 +3,10 @@
import numpy as np
from scipy import weave
from kern import Kern
from ...util.linalg import tdot
from ...util.misc import param_to_array
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from ...util.caching import Cache_this
from ...core.parameterization import variational
from ...util.config import *
class TruncLinear(Kern):

View file

@ -3,8 +3,6 @@
import numpy as np
import pylab as pb
import sys, pdb
from ..core import GP
from ..models import GPLVM
from ..mappings import *

View file

@ -3,7 +3,6 @@
import numpy as np
import pylab as pb
from .. import kern
from ..core import GP, Param
from ..likelihoods import Gaussian
@ -55,7 +54,7 @@ class GPLVM(GP):
#J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
J = self.jacobian(X)
for i in range(X.shape[0]):
target[i]=np.sqrt(pb.det(np.dot(J[i,:,:],np.transpose(J[i,:,:]))))
target[i]=np.sqrt(np.linalg.det(np.dot(J[i,:,:],np.transpose(J[i,:,:]))))
return target
def plot(self):
@ -63,6 +62,7 @@ class GPLVM(GP):
pb.scatter(self.likelihood.Y[:, 0], self.likelihood.Y[:, 1], 40, self.X[:, 0].copy(), linewidth=0, cmap=pb.cm.jet) # @UndefinedVariable
Xnew = np.linspace(self.X.min(), self.X.max(), 200)[:, None]
mu, _ = self.predict(Xnew)
import pylab as pb
pb.plot(mu[:, 0], mu[:, 1], 'k', linewidth=1.5)
def plot_latent(self, labels=None, which_indices=None,

View file

@ -3,13 +3,8 @@
import numpy as np
import pylab as pb
import sys, pdb
import sys
from GPy.models.sparse_gp_regression import SparseGPRegression
from GPy.models.gplvm import GPLVM
# from .. import kern
# from ..core import model
# from ..util.linalg import pdinv, PCA
class SparseGPLVM(SparseGPRegression):
"""

View file

@ -1,4 +1,7 @@
# Copyright (c) 2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import matplot_dep
try:
import matplot_dep
except (ImportError, NameError):
print 'Fail to load GPy.plotting.matplot_dep.'

View file

@ -2,8 +2,11 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import Tango
import pylab as pb
try:
import Tango
import pylab as pb
except:
pass
import numpy as np
def ax_default(fignum, ax):

View file

@ -1,12 +1,16 @@
import pylab as pb
import numpy as np
from latent_space_visualizations.controllers.imshow_controller import ImshowController,ImAnnotateController
from ...util.misc import param_to_array
from ...core.parameterization.variational import VariationalPosterior
from .base_plots import x_frame2D
import itertools
import Tango
from matplotlib.cm import get_cmap
try:
import Tango
from matplotlib.cm import get_cmap
import pylab as pb
except:
pass
def most_significant_input_dimensions(model, which_indices):
"""

View file

@ -1,8 +1,10 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import pylab as pb
import sys
try:
import pylab as pb
except:
pass
#import numpy as np
#import Tango
#from base_plots import gpplot, x_frame1D, x_frame2D

View file

@ -100,9 +100,7 @@ def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False, filtering=Non
return ax
def plot(kernel, x=None, plot_limits=None, which_parts='all', resolution=None, *args, **kwargs):
if which_parts == 'all':
which_parts = [True] * kernel.size
def plot(kernel, x=None, plot_limits=None, resolution=None, *args, **kwargs):
if kernel.input_dim == 1:
if x is None:
x = np.zeros((1, 1))
@ -133,7 +131,7 @@ def plot(kernel, x=None, plot_limits=None, which_parts='all', resolution=None, *
assert x.size == 2, "The size of the fixed variable x is not 2"
x = x.reshape((1, 2))
if plot_limits == None:
if plot_limits is None:
xmin, xmax = (x - 5).flatten(), (x + 5).flatten()
elif len(plot_limits) == 2:
xmin, xmax = plot_limits
@ -142,12 +140,10 @@ def plot(kernel, x=None, plot_limits=None, which_parts='all', resolution=None, *
resolution = resolution or 51
xx, yy = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:xmax[1]:1j * resolution]
xg = np.linspace(xmin[0], xmax[0], resolution)
yg = np.linspace(xmin[1], xmax[1], resolution)
Xnew = np.vstack((xx.flatten(), yy.flatten())).T
Kx = kernel.K(Xnew, x, which_parts)
Kx = kernel.K(Xnew, x)
Kx = Kx.reshape(resolution, resolution).T
pb.contour(xg, yg, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) # @UndefinedVariable
pb.contour(xx, xx, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) # @UndefinedVariable
pb.xlim(xmin[0], xmax[0])
pb.ylim(xmin[1], xmax[1])
pb.xlabel("x1")

View file

@ -1,9 +1,12 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import pylab as pb
import numpy as np
import Tango
try:
import Tango
import pylab as pb
except:
pass
from base_plots import x_frame1D, x_frame2D

View file

@ -1,13 +1,14 @@
import numpy as np
import pylab as pb
import matplotlib.patches as patches
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
#from matplotlib import cm
try:
import pylab as pb
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
#from matplotlib import cm
pb.ion()
except:
pass
import re
pb.ion()
def plot(shape_records,facecolor='w',edgecolor='k',linewidths=.5, ax=None,xlims=None,ylims=None):
"""
Plot the geometry of a shapefile

View file

@ -1,9 +1,12 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import pylab as pb
try:
import Tango
import pylab as pb
except:
pass
import numpy as np
import Tango
from base_plots import gpplot, x_frame1D, x_frame2D
from ...util.misc import param_to_array
from ...models.gp_coregionalized_regression import GPCoregionalizedRegression

View file

@ -3,7 +3,10 @@
import numpy as np
import pylab as pb
try:
import pylab as pb
except:
pass
def univariate_plot(prior):

View file

@ -6,7 +6,6 @@ import pylab
from ...models import SSGPLVM
from img_plots import plot_2D_images
from ...util.misc import param_to_array
class SSGPLVM_plot(object):
def __init__(self,model, imgsize):

View file

@ -215,7 +215,10 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb
if verbose:
print("Checking gradients of Kdiag(X) wrt X.")
try:
result = Kern_check_dKdiag_dX(kern, X=X).checkgrad(verbose=verbose)
testmodel = Kern_check_dKdiag_dX(kern, X=X)
if fixed_X_dims is not None:
testmodel.X[:,fixed_X_dims].fix()
result = testmodel.checkgrad(verbose=verbose)
except NotImplementedError:
result=True
if verbose:
@ -346,6 +349,7 @@ class KernelTestsNonContinuous(unittest.TestCase):
kern = GPy.kern.IndependentOutputs(k, -1, name='ind_split')
self.assertTrue(check_kernel_gradient_functions(kern, X=self.X, X2=self.X2, verbose=verbose, fixed_X_dims=-1))
def test_ODE_UY(self):
kern = GPy.kern.ODE_UY(2, active_dims=[0, self.D])
X = self.X[self.X[:,-1]!=2]

View file

@ -143,8 +143,9 @@ class ParameterizedTest(unittest.TestCase):
def test_randomize(self):
ps = self.test1.param.view(np.ndarray).copy()
self.test1.param[2:5].fix()
self.test1.param.randomize()
self.assertFalse(np.all(ps==self.test1.param))
self.assertFalse(np.all(ps==self.test1.param),str(ps)+str(self.test1.param))
def test_fixing_randomize_parameter_handling(self):
self.rbf.fix(warning=True)
@ -164,10 +165,8 @@ class ParameterizedTest(unittest.TestCase):
def test_fixing_optimize(self):
self.testmodel.kern.lengthscale.fix()
val = float(self.testmodel.kern.lengthscale)
val2 = float(self.testmodel.kern.variance)
self.testmodel.randomize()
self.assertEqual(val, self.testmodel.kern.lengthscale)
self.assertNotEqual(val2, self.testmodel.kern.variance)
def test_add_parameter_in_hierarchy(self):
from GPy.core import Param

View file

@ -2,7 +2,6 @@ import csv
import os
import copy
import numpy as np
import pylab as pb
import GPy
import scipy.io
import cPickle as pickle
@ -346,6 +345,7 @@ def football_data(season='1314', data_set='football_data'):
data_resources[data_set_season]['files'] = [files]
if not data_available(data_set_season):
download_data(data_set_season)
import pylab as pb
for file in reversed(files):
filename = os.path.join(data_path, data_set_season, file)
# rewrite files removing blank rows.

View file

@ -5,8 +5,11 @@ Created on 10 Sep 2012
@copyright: Max Zwiessele 2012
'''
import numpy
import pylab
import matplotlib
try:
import pylab
import matplotlib
except:
pass
from numpy.linalg.linalg import LinAlgError
class pca(object):
@ -88,13 +91,15 @@ class pca(object):
def plot_2d(self, X, labels=None, s=20, marker='o',
dimensions=(0, 1), ax=None, colors=None,
fignum=None, cmap=matplotlib.cm.jet, # @UndefinedVariable
fignum=None, cmap=None, # @UndefinedVariable
** kwargs):
"""
Plot dimensions `dimensions` with given labels against each other in
PC space. Labels can be any sequence of labels of dimensions X.shape[0].
Labels can be drawn with a subsequent call to legend()
"""
if cmap is None:
cmap = matplotlib.cm.jet
if ax is None:
fig = pylab.figure(fignum)
ax = fig.add_subplot(111)

View file

@ -84,6 +84,14 @@ GPy.testing.prior_tests module
:undoc-members:
:show-inheritance:
GPy.testing.tie_tests module
----------------------------
.. automodule:: GPy.testing.tie_tests
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------

View file

@ -19,6 +19,9 @@ You may also be interested by some examples in the GPy/examples folder.
Contents:
.. toctree::
:maxdepth: 2
installation
GPy

31
doc/installation.rst Normal file
View file

@ -0,0 +1,31 @@
==============
Installation
==============
Linux
============
Windows
======================
One easy way to get a Python distribution with the required packages is to use the Anaconda environment from Continuum Analytics.
* Download and install the free version of Anaconda according to your operating system from `their website <https://store.continuum.io>`_.
* Open a (new) terminal window:
* Navigate to Applications/Accessories/cmd, or
* open *anaconda Command Prompt* from windows *start*
You should now be able to launch a Python interpreter by typing *ipython* in the terminal. In the ipython prompt, you can check your installation by importing the libraries we will need later:
::
$ import numpy
$ import pylab
To install the latest version of GPy, *git* is required. A *git* client on Windows can be found `here <http://git-scm.com/download/win>`_. It is recommened to install with the option "*Use Git from the Windows Command Prompt*". Then, GPy can be installed with the following command
::
pip install git+https://github.com/SheffieldML/GPy.git@devel
MacOSX
===================================

View file

@ -24,9 +24,9 @@ setup(name = 'GPy',
package_data = {'GPy': ['defaults.cfg', 'installation.cfg', 'util/data_resources.json', 'util/football_teams.json']},
py_modules = ['GPy.__init__'],
long_description=read('README.md'),
install_requires=['numpy>=1.6', 'scipy>=0.9','matplotlib>=1.1', 'nose'],
install_requires=['numpy>=1.6', 'scipy>=0.9'],
extras_require = {
'docs':['Sphinx', 'ipython'],
'docs':['matplotlib>=1.1','Sphinx','ipython'],
},
classifiers=[
"License :: OSI Approved :: BSD License"],