mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-07 19:12:40 +02:00
[updates] merged update structure
This commit is contained in:
commit
bccd8e094a
36 changed files with 190 additions and 107 deletions
|
|
@ -51,7 +51,7 @@ class GP(Model):
|
||||||
assert Y.ndim == 2
|
assert Y.ndim == 2
|
||||||
logger.info("initializing Y")
|
logger.info("initializing Y")
|
||||||
|
|
||||||
if normalizer is None:
|
if normalizer is True:
|
||||||
self.normalizer = MeanNorm()
|
self.normalizer = MeanNorm()
|
||||||
elif normalizer is False:
|
elif normalizer is False:
|
||||||
self.normalizer = None
|
self.normalizer = None
|
||||||
|
|
|
||||||
|
|
@ -213,6 +213,7 @@ class Model(Parameterized):
|
||||||
def optimize(self, optimizer=None, start=None, **kwargs):
|
def optimize(self, optimizer=None, start=None, **kwargs):
|
||||||
"""
|
"""
|
||||||
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
|
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
|
||||||
|
|
||||||
kwargs are passed to the optimizer. They can be:
|
kwargs are passed to the optimizer. They can be:
|
||||||
|
|
||||||
:param max_f_eval: maximum number of function evaluations
|
:param max_f_eval: maximum number of function evaluations
|
||||||
|
|
@ -222,7 +223,15 @@ class Model(Parameterized):
|
||||||
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
|
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
|
||||||
:type optimizer: string
|
:type optimizer: string
|
||||||
|
|
||||||
TODO: valid args
|
Valid optimizers are:
|
||||||
|
- 'scg': scaled conjugate gradient method, recommended for stability.
|
||||||
|
See also GPy.inference.optimization.scg
|
||||||
|
- 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc)
|
||||||
|
- 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin),
|
||||||
|
- 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b),
|
||||||
|
- 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only!
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self.is_fixed:
|
if self.is_fixed:
|
||||||
raise RuntimeError, "Cannot optimize, when everything is fixed"
|
raise RuntimeError, "Cannot optimize, when everything is fixed"
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from sparse_gp import SparseGP
|
from sparse_gp import SparseGP
|
||||||
|
from numpy.linalg.linalg import LinAlgError
|
||||||
from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch
|
from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
@ -42,10 +43,10 @@ class SparseGP_MPI(SparseGP):
|
||||||
assert isinstance(inference_method, VarDTC_minibatch), 'inference_method has to support MPI!'
|
assert isinstance(inference_method, VarDTC_minibatch), 'inference_method has to support MPI!'
|
||||||
|
|
||||||
super(SparseGP_MPI, self).__init__(X, Y, Z, kernel, likelihood, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer)
|
super(SparseGP_MPI, self).__init__(X, Y, Z, kernel, likelihood, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer)
|
||||||
self.updates = False
|
self.update_model(False)
|
||||||
self.add_parameter(self.X, index=0)
|
self.link_parameter(self.X, index=0)
|
||||||
if variational_prior is not None:
|
if variational_prior is not None:
|
||||||
self.add_parameter(variational_prior)
|
self.link_parameter(variational_prior)
|
||||||
# self.X.fix()
|
# self.X.fix()
|
||||||
|
|
||||||
self.mpi_comm = mpi_comm
|
self.mpi_comm = mpi_comm
|
||||||
|
|
@ -58,7 +59,8 @@ class SparseGP_MPI(SparseGP):
|
||||||
self.Y_local = self.Y[N_start:N_end]
|
self.Y_local = self.Y[N_start:N_end]
|
||||||
print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range)
|
print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range)
|
||||||
mpi_comm.Bcast(self.param_array, root=0)
|
mpi_comm.Bcast(self.param_array, root=0)
|
||||||
self.updates = True
|
self.update_model(True)
|
||||||
|
|
||||||
|
|
||||||
def __getstate__(self):
|
def __getstate__(self):
|
||||||
dc = super(SparseGP_MPI, self).__getstate__()
|
dc = super(SparseGP_MPI, self).__getstate__()
|
||||||
|
|
@ -83,10 +85,6 @@ class SparseGP_MPI(SparseGP):
|
||||||
if self._IN_OPTIMIZATION_ and self.mpi_comm.rank==0:
|
if self._IN_OPTIMIZATION_ and self.mpi_comm.rank==0:
|
||||||
self.mpi_comm.Bcast(np.int32(1),root=0)
|
self.mpi_comm.Bcast(np.int32(1),root=0)
|
||||||
self.mpi_comm.Bcast(p, root=0)
|
self.mpi_comm.Bcast(p, root=0)
|
||||||
|
|
||||||
from ..util.debug import checkFinite
|
|
||||||
checkFinite(p, 'optimizer_array')
|
|
||||||
|
|
||||||
SparseGP.optimizer_array.fset(self,p)
|
SparseGP.optimizer_array.fset(self,p)
|
||||||
|
|
||||||
def optimize(self, optimizer=None, start=None, **kwargs):
|
def optimize(self, optimizer=None, start=None, **kwargs):
|
||||||
|
|
@ -102,7 +100,13 @@ class SparseGP_MPI(SparseGP):
|
||||||
while True:
|
while True:
|
||||||
self.mpi_comm.Bcast(flag,root=0)
|
self.mpi_comm.Bcast(flag,root=0)
|
||||||
if flag==1:
|
if flag==1:
|
||||||
|
try:
|
||||||
self.optimizer_array = x
|
self.optimizer_array = x
|
||||||
|
self._fail_count = 0
|
||||||
|
except (LinAlgError, ZeroDivisionError, ValueError):
|
||||||
|
if self._fail_count >= self._allowed_failures:
|
||||||
|
raise
|
||||||
|
self._fail_count += 1
|
||||||
elif flag==-1:
|
elif flag==-1:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
|
|
@ -5,9 +5,13 @@
|
||||||
"""
|
"""
|
||||||
Gaussian Processes classification
|
Gaussian Processes classification
|
||||||
"""
|
"""
|
||||||
import pylab as pb
|
|
||||||
import GPy
|
import GPy
|
||||||
|
|
||||||
|
try:
|
||||||
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
default_seed = 10000
|
default_seed = 10000
|
||||||
|
|
||||||
def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
|
def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,8 @@
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
try:
|
||||||
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
import GPy
|
import GPy
|
||||||
pb.ion()
|
pb.ion()
|
||||||
pb.close('all')
|
pb.close('all')
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,10 @@
|
||||||
import GPy
|
import GPy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from GPy.util import datasets
|
from GPy.util import datasets
|
||||||
|
try:
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
def student_t_approx(optimize=True, plot=True):
|
def student_t_approx(optimize=True, plot=True):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,10 @@
|
||||||
"""
|
"""
|
||||||
Gaussian Processes regression examples
|
Gaussian Processes regression examples
|
||||||
"""
|
"""
|
||||||
import pylab as pb
|
try:
|
||||||
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import GPy
|
import GPy
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,10 @@
|
||||||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
|
||||||
import pylab as pb
|
try:
|
||||||
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import GPy
|
import GPy
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -6,8 +6,11 @@
|
||||||
Code of Tutorials
|
Code of Tutorials
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import pylab as pb
|
try:
|
||||||
pb.ion()
|
import pylab as pb
|
||||||
|
pb.ion()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import GPy
|
import GPy
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -124,6 +124,7 @@ class vDTC(object):
|
||||||
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
|
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
|
||||||
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
|
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
|
||||||
P = tdot(tmp.T)
|
P = tdot(tmp.T)
|
||||||
|
stop
|
||||||
|
|
||||||
#compute log marginal
|
#compute log marginal
|
||||||
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
|
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
|
||||||
from posterior import Posterior
|
from posterior import Posterior
|
||||||
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs
|
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri,pdinv
|
||||||
from ...util import diag
|
from ...util import diag
|
||||||
from ...core.parameterization.variational import VariationalPosterior
|
from ...core.parameterization.variational import VariationalPosterior
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
@ -144,6 +144,7 @@ class VarDTC_minibatch(LatentFunctionInference):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
num_data, output_dim = Y.shape
|
num_data, output_dim = Y.shape
|
||||||
|
input_dim = Z.shape[0]
|
||||||
if self.mpi_comm != None:
|
if self.mpi_comm != None:
|
||||||
num_data_all = np.array(num_data,dtype=np.int32)
|
num_data_all = np.array(num_data,dtype=np.int32)
|
||||||
self.mpi_comm.Allreduce([np.int32(num_data), MPI.INT], [num_data_all, MPI.INT])
|
self.mpi_comm.Allreduce([np.int32(num_data), MPI.INT], [num_data_all, MPI.INT])
|
||||||
|
|
@ -166,31 +167,18 @@ class VarDTC_minibatch(LatentFunctionInference):
|
||||||
# Compute Common Components
|
# Compute Common Components
|
||||||
#======================================================================
|
#======================================================================
|
||||||
|
|
||||||
from ...util.debug import checkFullRank
|
|
||||||
|
|
||||||
Kmm = kern.K(Z).copy()
|
Kmm = kern.K(Z).copy()
|
||||||
diag.add(Kmm, self.const_jitter)
|
diag.add(Kmm, self.const_jitter)
|
||||||
r1 = checkFullRank(Kmm,name='Kmm')
|
KmmInv,Lm,LmInv,_ = pdinv(Kmm)
|
||||||
Lm = jitchol(Kmm)
|
|
||||||
|
|
||||||
LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right')
|
LmInvPsi2LmInvT = LmInv.dot(psi2_full).dot(LmInv.T)
|
||||||
Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT
|
Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT
|
||||||
r2 = checkFullRank(Lambda,name='Lambda')
|
LInv,LL,LLInv,logdet_L = pdinv(Lambda)
|
||||||
if (not r1) or (not r2):
|
b = LLInv.dot(LmInv.dot(psi1Y_full.T))
|
||||||
raise
|
|
||||||
LL = jitchol(Lambda)
|
|
||||||
LL = np.dot(Lm,LL)
|
|
||||||
b,_ = dtrtrs(LL, psi1Y_full.T)
|
|
||||||
bbt = np.square(b).sum()
|
bbt = np.square(b).sum()
|
||||||
v,_ = dtrtrs(LL.T,b,lower=False)
|
v = LmInv.T.dot(LLInv.T.dot(b))
|
||||||
vvt = np.einsum('md,od->mo',v,v)
|
|
||||||
|
|
||||||
Psi2LLInvT = dtrtrs(LL,psi2_full)[0].T
|
dL_dpsi2R = LmInv.T.dot(-LLInv.T.dot(tdot(b)+output_dim*np.eye(input_dim)).dot(LLInv)+output_dim*np.eye(input_dim)).dot(LmInv)/2.
|
||||||
LmInvPsi2LLInvT= dtrtrs(Lm,Psi2LLInvT)[0]
|
|
||||||
KmmInvPsi2LLInvT = dtrtrs(Lm,LmInvPsi2LLInvT,trans=True)[0]
|
|
||||||
KmmInvPsi2P = dtrtrs(LL,KmmInvPsi2LLInvT.T, trans=True)[0].T
|
|
||||||
|
|
||||||
dL_dpsi2R = (output_dim*KmmInvPsi2P - vvt)/2. # dL_dpsi2 with R inside psi2
|
|
||||||
|
|
||||||
# Cache intermediate results
|
# Cache intermediate results
|
||||||
self.midRes['dL_dpsi2R'] = dL_dpsi2R
|
self.midRes['dL_dpsi2R'] = dL_dpsi2R
|
||||||
|
|
@ -203,20 +191,20 @@ class VarDTC_minibatch(LatentFunctionInference):
|
||||||
logL_R = -np.log(beta).sum()
|
logL_R = -np.log(beta).sum()
|
||||||
else:
|
else:
|
||||||
logL_R = -num_data*np.log(beta)
|
logL_R = -num_data*np.log(beta)
|
||||||
logL = -(output_dim*(num_data*log_2_pi+logL_R+psi0_full-np.trace(LmInvPsi2LmInvT))+YRY_full-bbt)/2.-output_dim*(-np.log(np.diag(Lm)).sum()+np.log(np.diag(LL)).sum())
|
logL = -(output_dim*(num_data*log_2_pi+logL_R+psi0_full-np.trace(LmInvPsi2LmInvT))+YRY_full-bbt)/2.-output_dim*logdet_L/2.
|
||||||
|
|
||||||
#======================================================================
|
#======================================================================
|
||||||
# Compute dL_dKmm
|
# Compute dL_dKmm
|
||||||
#======================================================================
|
#======================================================================
|
||||||
|
|
||||||
dL_dKmm = -(output_dim*np.einsum('md,od->mo',KmmInvPsi2LLInvT,KmmInvPsi2LLInvT) + vvt)/2.
|
dL_dKmm = dL_dpsi2R - output_dim*KmmInv.dot(psi2_full).dot(KmmInv)/2.
|
||||||
|
|
||||||
#======================================================================
|
#======================================================================
|
||||||
# Compute the Posterior distribution of inducing points p(u|Y)
|
# Compute the Posterior distribution of inducing points p(u|Y)
|
||||||
#======================================================================
|
#======================================================================
|
||||||
|
|
||||||
if not self.Y_speedup or het_noise:
|
if not self.Y_speedup or het_noise:
|
||||||
post = Posterior(woodbury_inv=KmmInvPsi2P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm)
|
post = Posterior(woodbury_inv=LmInv.T.dot(np.eye(input_dim)-LInv).dot(LmInv), woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm)
|
||||||
else:
|
else:
|
||||||
post = None
|
post = None
|
||||||
|
|
||||||
|
|
@ -341,13 +329,7 @@ def update_gradients(model, mpi_comm=None):
|
||||||
Y = model.Y_local
|
Y = model.Y_local
|
||||||
X = model.X[model.N_range[0]:model.N_range[1]]
|
X = model.X[model.N_range[0]:model.N_range[1]]
|
||||||
|
|
||||||
try:
|
|
||||||
model._log_marginal_likelihood, dL_dKmm, model.posterior = model.inference_method.inference_likelihood(model.kern, X, model.Z, model.likelihood, Y)
|
model._log_marginal_likelihood, dL_dKmm, model.posterior = model.inference_method.inference_likelihood(model.kern, X, model.Z, model.likelihood, Y)
|
||||||
except Exception:
|
|
||||||
if model.mpi_comm is None or model.mpi_comm.rank==0:
|
|
||||||
import time
|
|
||||||
model.pickle('model_'+str(int(time.time()))+'.pickle')
|
|
||||||
raise
|
|
||||||
|
|
||||||
het_noise = model.likelihood.variance.size > 1
|
het_noise = model.likelihood.variance.size > 1
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -10,11 +10,11 @@ class Hierarchical(Kernpart):
|
||||||
A kernel part which can reopresent a hierarchy of indepencnce: a generalisation of independent_outputs
|
A kernel part which can reopresent a hierarchy of indepencnce: a generalisation of independent_outputs
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self,parts):
|
def __init__(self,parts,name='hierarchy'):
|
||||||
self.levels = len(parts)
|
self.levels = len(parts)
|
||||||
self.input_dim = parts[0].input_dim + 1
|
self.input_dim = parts[0].input_dim + 1
|
||||||
self.num_params = np.sum([k.num_params for k in parts])
|
self.num_params = np.sum([k.num_params for k in parts])
|
||||||
self.name = 'hierarchy'
|
self.name = name
|
||||||
self.parts = parts
|
self.parts = parts
|
||||||
|
|
||||||
self.param_starts = np.hstack((0,np.cumsum([k.num_params for k in self.parts[:-1]])))
|
self.param_starts = np.hstack((0,np.cumsum([k.num_params for k in self.parts[:-1]])))
|
||||||
|
|
|
||||||
|
|
@ -20,8 +20,6 @@ class RBF(Stationary):
|
||||||
_support_GPU = True
|
_support_GPU = True
|
||||||
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False):
|
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf', useGPU=False):
|
||||||
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
|
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=useGPU)
|
||||||
self.weave_options = {}
|
|
||||||
self.group_spike_prob = False
|
|
||||||
self.psicomp = PSICOMP_RBF()
|
self.psicomp = PSICOMP_RBF()
|
||||||
if self.useGPU:
|
if self.useGPU:
|
||||||
self.psicomp = PSICOMP_RBF_GPU()
|
self.psicomp = PSICOMP_RBF_GPU()
|
||||||
|
|
|
||||||
|
|
@ -171,7 +171,8 @@ class Stationary(Kern):
|
||||||
|
|
||||||
#the lower memory way with a loop
|
#the lower memory way with a loop
|
||||||
ret = np.empty(X.shape, dtype=np.float64)
|
ret = np.empty(X.shape, dtype=np.float64)
|
||||||
[np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=ret[:,q]) for q in xrange(self.input_dim)]
|
for q in xrange(self.input_dim):
|
||||||
|
np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=ret[:,q])
|
||||||
ret /= self.lengthscale**2
|
ret /= self.lengthscale**2
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
@ -309,6 +310,19 @@ class Matern52(Stationary):
|
||||||
|
|
||||||
|
|
||||||
class ExpQuad(Stationary):
|
class ExpQuad(Stationary):
|
||||||
|
"""
|
||||||
|
The Exponentiated quadratic covariance function.
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
|
||||||
|
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r)
|
||||||
|
|
||||||
|
notes::
|
||||||
|
- Yes, this is exactly the same as the RBF covariance function, but the
|
||||||
|
RBF implementation also has some features for doing variational kernels
|
||||||
|
(the psi-statistics).
|
||||||
|
|
||||||
|
"""
|
||||||
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='ExpQuad'):
|
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='ExpQuad'):
|
||||||
super(ExpQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
|
super(ExpQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,14 +3,10 @@
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from scipy import weave
|
|
||||||
from kern import Kern
|
from kern import Kern
|
||||||
from ...util.linalg import tdot
|
|
||||||
from ...util.misc import param_to_array
|
|
||||||
from ...core.parameterization import Param
|
from ...core.parameterization import Param
|
||||||
from ...core.parameterization.transformations import Logexp
|
from ...core.parameterization.transformations import Logexp
|
||||||
from ...util.caching import Cache_this
|
from ...util.caching import Cache_this
|
||||||
from ...core.parameterization import variational
|
|
||||||
from ...util.config import *
|
from ...util.config import *
|
||||||
|
|
||||||
class TruncLinear(Kern):
|
class TruncLinear(Kern):
|
||||||
|
|
|
||||||
|
|
@ -3,8 +3,6 @@
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
|
||||||
import sys, pdb
|
|
||||||
from ..core import GP
|
from ..core import GP
|
||||||
from ..models import GPLVM
|
from ..models import GPLVM
|
||||||
from ..mappings import *
|
from ..mappings import *
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
|
||||||
from .. import kern
|
from .. import kern
|
||||||
from ..core import GP, Param
|
from ..core import GP, Param
|
||||||
from ..likelihoods import Gaussian
|
from ..likelihoods import Gaussian
|
||||||
|
|
@ -55,7 +54,7 @@ class GPLVM(GP):
|
||||||
#J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
|
#J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
|
||||||
J = self.jacobian(X)
|
J = self.jacobian(X)
|
||||||
for i in range(X.shape[0]):
|
for i in range(X.shape[0]):
|
||||||
target[i]=np.sqrt(pb.det(np.dot(J[i,:,:],np.transpose(J[i,:,:]))))
|
target[i]=np.sqrt(np.linalg.det(np.dot(J[i,:,:],np.transpose(J[i,:,:]))))
|
||||||
return target
|
return target
|
||||||
|
|
||||||
def plot(self):
|
def plot(self):
|
||||||
|
|
@ -63,6 +62,7 @@ class GPLVM(GP):
|
||||||
pb.scatter(self.likelihood.Y[:, 0], self.likelihood.Y[:, 1], 40, self.X[:, 0].copy(), linewidth=0, cmap=pb.cm.jet) # @UndefinedVariable
|
pb.scatter(self.likelihood.Y[:, 0], self.likelihood.Y[:, 1], 40, self.X[:, 0].copy(), linewidth=0, cmap=pb.cm.jet) # @UndefinedVariable
|
||||||
Xnew = np.linspace(self.X.min(), self.X.max(), 200)[:, None]
|
Xnew = np.linspace(self.X.min(), self.X.max(), 200)[:, None]
|
||||||
mu, _ = self.predict(Xnew)
|
mu, _ = self.predict(Xnew)
|
||||||
|
import pylab as pb
|
||||||
pb.plot(mu[:, 0], mu[:, 1], 'k', linewidth=1.5)
|
pb.plot(mu[:, 0], mu[:, 1], 'k', linewidth=1.5)
|
||||||
|
|
||||||
def plot_latent(self, labels=None, which_indices=None,
|
def plot_latent(self, labels=None, which_indices=None,
|
||||||
|
|
|
||||||
|
|
@ -3,13 +3,8 @@
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
import sys
|
||||||
import sys, pdb
|
|
||||||
from GPy.models.sparse_gp_regression import SparseGPRegression
|
from GPy.models.sparse_gp_regression import SparseGPRegression
|
||||||
from GPy.models.gplvm import GPLVM
|
|
||||||
# from .. import kern
|
|
||||||
# from ..core import model
|
|
||||||
# from ..util.linalg import pdinv, PCA
|
|
||||||
|
|
||||||
class SparseGPLVM(SparseGPRegression):
|
class SparseGPLVM(SparseGPRegression):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,7 @@
|
||||||
# Copyright (c) 2014, GPy authors (see AUTHORS.txt).
|
# Copyright (c) 2014, GPy authors (see AUTHORS.txt).
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
|
||||||
import matplot_dep
|
try:
|
||||||
|
import matplot_dep
|
||||||
|
except (ImportError, NameError):
|
||||||
|
print 'Fail to load GPy.plotting.matplot_dep.'
|
||||||
|
|
@ -2,8 +2,11 @@
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
|
||||||
|
|
||||||
import Tango
|
try:
|
||||||
import pylab as pb
|
import Tango
|
||||||
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
def ax_default(fignum, ax):
|
def ax_default(fignum, ax):
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,16 @@
|
||||||
import pylab as pb
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from latent_space_visualizations.controllers.imshow_controller import ImshowController,ImAnnotateController
|
from latent_space_visualizations.controllers.imshow_controller import ImshowController,ImAnnotateController
|
||||||
from ...util.misc import param_to_array
|
from ...util.misc import param_to_array
|
||||||
from ...core.parameterization.variational import VariationalPosterior
|
from ...core.parameterization.variational import VariationalPosterior
|
||||||
from .base_plots import x_frame2D
|
from .base_plots import x_frame2D
|
||||||
import itertools
|
import itertools
|
||||||
import Tango
|
try:
|
||||||
from matplotlib.cm import get_cmap
|
import Tango
|
||||||
|
from matplotlib.cm import get_cmap
|
||||||
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
def most_significant_input_dimensions(model, which_indices):
|
def most_significant_input_dimensions(model, which_indices):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,10 @@
|
||||||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
|
||||||
import pylab as pb
|
try:
|
||||||
import sys
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
#import numpy as np
|
#import numpy as np
|
||||||
#import Tango
|
#import Tango
|
||||||
#from base_plots import gpplot, x_frame1D, x_frame2D
|
#from base_plots import gpplot, x_frame1D, x_frame2D
|
||||||
|
|
|
||||||
|
|
@ -100,9 +100,7 @@ def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False, filtering=Non
|
||||||
return ax
|
return ax
|
||||||
|
|
||||||
|
|
||||||
def plot(kernel, x=None, plot_limits=None, which_parts='all', resolution=None, *args, **kwargs):
|
def plot(kernel, x=None, plot_limits=None, resolution=None, *args, **kwargs):
|
||||||
if which_parts == 'all':
|
|
||||||
which_parts = [True] * kernel.size
|
|
||||||
if kernel.input_dim == 1:
|
if kernel.input_dim == 1:
|
||||||
if x is None:
|
if x is None:
|
||||||
x = np.zeros((1, 1))
|
x = np.zeros((1, 1))
|
||||||
|
|
@ -133,7 +131,7 @@ def plot(kernel, x=None, plot_limits=None, which_parts='all', resolution=None, *
|
||||||
assert x.size == 2, "The size of the fixed variable x is not 2"
|
assert x.size == 2, "The size of the fixed variable x is not 2"
|
||||||
x = x.reshape((1, 2))
|
x = x.reshape((1, 2))
|
||||||
|
|
||||||
if plot_limits == None:
|
if plot_limits is None:
|
||||||
xmin, xmax = (x - 5).flatten(), (x + 5).flatten()
|
xmin, xmax = (x - 5).flatten(), (x + 5).flatten()
|
||||||
elif len(plot_limits) == 2:
|
elif len(plot_limits) == 2:
|
||||||
xmin, xmax = plot_limits
|
xmin, xmax = plot_limits
|
||||||
|
|
@ -142,12 +140,10 @@ def plot(kernel, x=None, plot_limits=None, which_parts='all', resolution=None, *
|
||||||
|
|
||||||
resolution = resolution or 51
|
resolution = resolution or 51
|
||||||
xx, yy = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:xmax[1]:1j * resolution]
|
xx, yy = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:xmax[1]:1j * resolution]
|
||||||
xg = np.linspace(xmin[0], xmax[0], resolution)
|
|
||||||
yg = np.linspace(xmin[1], xmax[1], resolution)
|
|
||||||
Xnew = np.vstack((xx.flatten(), yy.flatten())).T
|
Xnew = np.vstack((xx.flatten(), yy.flatten())).T
|
||||||
Kx = kernel.K(Xnew, x, which_parts)
|
Kx = kernel.K(Xnew, x)
|
||||||
Kx = Kx.reshape(resolution, resolution).T
|
Kx = Kx.reshape(resolution, resolution).T
|
||||||
pb.contour(xg, yg, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) # @UndefinedVariable
|
pb.contour(xx, xx, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs) # @UndefinedVariable
|
||||||
pb.xlim(xmin[0], xmax[0])
|
pb.xlim(xmin[0], xmax[0])
|
||||||
pb.ylim(xmin[1], xmax[1])
|
pb.ylim(xmin[1], xmax[1])
|
||||||
pb.xlabel("x1")
|
pb.xlabel("x1")
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,12 @@
|
||||||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
|
||||||
import pylab as pb
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import Tango
|
try:
|
||||||
|
import Tango
|
||||||
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
from base_plots import x_frame1D, x_frame2D
|
from base_plots import x_frame1D, x_frame2D
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,14 @@
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
try:
|
||||||
import matplotlib.patches as patches
|
import pylab as pb
|
||||||
from matplotlib.patches import Polygon
|
from matplotlib.patches import Polygon
|
||||||
from matplotlib.collections import PatchCollection
|
from matplotlib.collections import PatchCollection
|
||||||
#from matplotlib import cm
|
#from matplotlib import cm
|
||||||
|
pb.ion()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
import re
|
import re
|
||||||
|
|
||||||
pb.ion()
|
|
||||||
|
|
||||||
def plot(shape_records,facecolor='w',edgecolor='k',linewidths=.5, ax=None,xlims=None,ylims=None):
|
def plot(shape_records,facecolor='w',edgecolor='k',linewidths=.5, ax=None,xlims=None,ylims=None):
|
||||||
"""
|
"""
|
||||||
Plot the geometry of a shapefile
|
Plot the geometry of a shapefile
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,12 @@
|
||||||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
|
||||||
import pylab as pb
|
try:
|
||||||
|
import Tango
|
||||||
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import Tango
|
|
||||||
from base_plots import gpplot, x_frame1D, x_frame2D
|
from base_plots import gpplot, x_frame1D, x_frame2D
|
||||||
from ...util.misc import param_to_array
|
from ...util.misc import param_to_array
|
||||||
from ...models.gp_coregionalized_regression import GPCoregionalizedRegression
|
from ...models.gp_coregionalized_regression import GPCoregionalizedRegression
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,10 @@
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
try:
|
||||||
|
import pylab as pb
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def univariate_plot(prior):
|
def univariate_plot(prior):
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,6 @@ import pylab
|
||||||
|
|
||||||
from ...models import SSGPLVM
|
from ...models import SSGPLVM
|
||||||
from img_plots import plot_2D_images
|
from img_plots import plot_2D_images
|
||||||
from ...util.misc import param_to_array
|
|
||||||
|
|
||||||
class SSGPLVM_plot(object):
|
class SSGPLVM_plot(object):
|
||||||
def __init__(self,model, imgsize):
|
def __init__(self,model, imgsize):
|
||||||
|
|
|
||||||
|
|
@ -215,7 +215,10 @@ def check_kernel_gradient_functions(kern, X=None, X2=None, output_ind=None, verb
|
||||||
if verbose:
|
if verbose:
|
||||||
print("Checking gradients of Kdiag(X) wrt X.")
|
print("Checking gradients of Kdiag(X) wrt X.")
|
||||||
try:
|
try:
|
||||||
result = Kern_check_dKdiag_dX(kern, X=X).checkgrad(verbose=verbose)
|
testmodel = Kern_check_dKdiag_dX(kern, X=X)
|
||||||
|
if fixed_X_dims is not None:
|
||||||
|
testmodel.X[:,fixed_X_dims].fix()
|
||||||
|
result = testmodel.checkgrad(verbose=verbose)
|
||||||
except NotImplementedError:
|
except NotImplementedError:
|
||||||
result=True
|
result=True
|
||||||
if verbose:
|
if verbose:
|
||||||
|
|
@ -346,6 +349,7 @@ class KernelTestsNonContinuous(unittest.TestCase):
|
||||||
kern = GPy.kern.IndependentOutputs(k, -1, name='ind_split')
|
kern = GPy.kern.IndependentOutputs(k, -1, name='ind_split')
|
||||||
self.assertTrue(check_kernel_gradient_functions(kern, X=self.X, X2=self.X2, verbose=verbose, fixed_X_dims=-1))
|
self.assertTrue(check_kernel_gradient_functions(kern, X=self.X, X2=self.X2, verbose=verbose, fixed_X_dims=-1))
|
||||||
|
|
||||||
|
|
||||||
def test_ODE_UY(self):
|
def test_ODE_UY(self):
|
||||||
kern = GPy.kern.ODE_UY(2, active_dims=[0, self.D])
|
kern = GPy.kern.ODE_UY(2, active_dims=[0, self.D])
|
||||||
X = self.X[self.X[:,-1]!=2]
|
X = self.X[self.X[:,-1]!=2]
|
||||||
|
|
|
||||||
|
|
@ -143,8 +143,9 @@ class ParameterizedTest(unittest.TestCase):
|
||||||
|
|
||||||
def test_randomize(self):
|
def test_randomize(self):
|
||||||
ps = self.test1.param.view(np.ndarray).copy()
|
ps = self.test1.param.view(np.ndarray).copy()
|
||||||
|
self.test1.param[2:5].fix()
|
||||||
self.test1.param.randomize()
|
self.test1.param.randomize()
|
||||||
self.assertFalse(np.all(ps==self.test1.param))
|
self.assertFalse(np.all(ps==self.test1.param),str(ps)+str(self.test1.param))
|
||||||
|
|
||||||
def test_fixing_randomize_parameter_handling(self):
|
def test_fixing_randomize_parameter_handling(self):
|
||||||
self.rbf.fix(warning=True)
|
self.rbf.fix(warning=True)
|
||||||
|
|
@ -164,10 +165,8 @@ class ParameterizedTest(unittest.TestCase):
|
||||||
def test_fixing_optimize(self):
|
def test_fixing_optimize(self):
|
||||||
self.testmodel.kern.lengthscale.fix()
|
self.testmodel.kern.lengthscale.fix()
|
||||||
val = float(self.testmodel.kern.lengthscale)
|
val = float(self.testmodel.kern.lengthscale)
|
||||||
val2 = float(self.testmodel.kern.variance)
|
|
||||||
self.testmodel.randomize()
|
self.testmodel.randomize()
|
||||||
self.assertEqual(val, self.testmodel.kern.lengthscale)
|
self.assertEqual(val, self.testmodel.kern.lengthscale)
|
||||||
self.assertNotEqual(val2, self.testmodel.kern.variance)
|
|
||||||
|
|
||||||
def test_add_parameter_in_hierarchy(self):
|
def test_add_parameter_in_hierarchy(self):
|
||||||
from GPy.core import Param
|
from GPy.core import Param
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ import csv
|
||||||
import os
|
import os
|
||||||
import copy
|
import copy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pylab as pb
|
|
||||||
import GPy
|
import GPy
|
||||||
import scipy.io
|
import scipy.io
|
||||||
import cPickle as pickle
|
import cPickle as pickle
|
||||||
|
|
@ -346,6 +345,7 @@ def football_data(season='1314', data_set='football_data'):
|
||||||
data_resources[data_set_season]['files'] = [files]
|
data_resources[data_set_season]['files'] = [files]
|
||||||
if not data_available(data_set_season):
|
if not data_available(data_set_season):
|
||||||
download_data(data_set_season)
|
download_data(data_set_season)
|
||||||
|
import pylab as pb
|
||||||
for file in reversed(files):
|
for file in reversed(files):
|
||||||
filename = os.path.join(data_path, data_set_season, file)
|
filename = os.path.join(data_path, data_set_season, file)
|
||||||
# rewrite files removing blank rows.
|
# rewrite files removing blank rows.
|
||||||
|
|
|
||||||
|
|
@ -5,8 +5,11 @@ Created on 10 Sep 2012
|
||||||
@copyright: Max Zwiessele 2012
|
@copyright: Max Zwiessele 2012
|
||||||
'''
|
'''
|
||||||
import numpy
|
import numpy
|
||||||
import pylab
|
try:
|
||||||
import matplotlib
|
import pylab
|
||||||
|
import matplotlib
|
||||||
|
except:
|
||||||
|
pass
|
||||||
from numpy.linalg.linalg import LinAlgError
|
from numpy.linalg.linalg import LinAlgError
|
||||||
|
|
||||||
class pca(object):
|
class pca(object):
|
||||||
|
|
@ -88,13 +91,15 @@ class pca(object):
|
||||||
|
|
||||||
def plot_2d(self, X, labels=None, s=20, marker='o',
|
def plot_2d(self, X, labels=None, s=20, marker='o',
|
||||||
dimensions=(0, 1), ax=None, colors=None,
|
dimensions=(0, 1), ax=None, colors=None,
|
||||||
fignum=None, cmap=matplotlib.cm.jet, # @UndefinedVariable
|
fignum=None, cmap=None, # @UndefinedVariable
|
||||||
** kwargs):
|
** kwargs):
|
||||||
"""
|
"""
|
||||||
Plot dimensions `dimensions` with given labels against each other in
|
Plot dimensions `dimensions` with given labels against each other in
|
||||||
PC space. Labels can be any sequence of labels of dimensions X.shape[0].
|
PC space. Labels can be any sequence of labels of dimensions X.shape[0].
|
||||||
Labels can be drawn with a subsequent call to legend()
|
Labels can be drawn with a subsequent call to legend()
|
||||||
"""
|
"""
|
||||||
|
if cmap is None:
|
||||||
|
cmap = matplotlib.cm.jet
|
||||||
if ax is None:
|
if ax is None:
|
||||||
fig = pylab.figure(fignum)
|
fig = pylab.figure(fignum)
|
||||||
ax = fig.add_subplot(111)
|
ax = fig.add_subplot(111)
|
||||||
|
|
|
||||||
|
|
@ -84,6 +84,14 @@ GPy.testing.prior_tests module
|
||||||
:undoc-members:
|
:undoc-members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
|
||||||
|
GPy.testing.tie_tests module
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
.. automodule:: GPy.testing.tie_tests
|
||||||
|
:members:
|
||||||
|
:undoc-members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
|
||||||
Module contents
|
Module contents
|
||||||
---------------
|
---------------
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,9 @@ You may also be interested by some examples in the GPy/examples folder.
|
||||||
Contents:
|
Contents:
|
||||||
|
|
||||||
.. toctree::
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
installation
|
||||||
GPy
|
GPy
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
31
doc/installation.rst
Normal file
31
doc/installation.rst
Normal file
|
|
@ -0,0 +1,31 @@
|
||||||
|
==============
|
||||||
|
Installation
|
||||||
|
==============
|
||||||
|
|
||||||
|
|
||||||
|
Linux
|
||||||
|
============
|
||||||
|
|
||||||
|
|
||||||
|
Windows
|
||||||
|
======================
|
||||||
|
One easy way to get a Python distribution with the required packages is to use the Anaconda environment from Continuum Analytics.
|
||||||
|
|
||||||
|
* Download and install the free version of Anaconda according to your operating system from `their website <https://store.continuum.io>`_.
|
||||||
|
* Open a (new) terminal window:
|
||||||
|
|
||||||
|
* Navigate to Applications/Accessories/cmd, or
|
||||||
|
* open *anaconda Command Prompt* from windows *start*
|
||||||
|
|
||||||
|
You should now be able to launch a Python interpreter by typing *ipython* in the terminal. In the ipython prompt, you can check your installation by importing the libraries we will need later:
|
||||||
|
::
|
||||||
|
$ import numpy
|
||||||
|
$ import pylab
|
||||||
|
|
||||||
|
To install the latest version of GPy, *git* is required. A *git* client on Windows can be found `here <http://git-scm.com/download/win>`_. It is recommened to install with the option "*Use Git from the Windows Command Prompt*". Then, GPy can be installed with the following command
|
||||||
|
::
|
||||||
|
pip install git+https://github.com/SheffieldML/GPy.git@devel
|
||||||
|
|
||||||
|
MacOSX
|
||||||
|
===================================
|
||||||
|
|
||||||
4
setup.py
4
setup.py
|
|
@ -24,9 +24,9 @@ setup(name = 'GPy',
|
||||||
package_data = {'GPy': ['defaults.cfg', 'installation.cfg', 'util/data_resources.json', 'util/football_teams.json']},
|
package_data = {'GPy': ['defaults.cfg', 'installation.cfg', 'util/data_resources.json', 'util/football_teams.json']},
|
||||||
py_modules = ['GPy.__init__'],
|
py_modules = ['GPy.__init__'],
|
||||||
long_description=read('README.md'),
|
long_description=read('README.md'),
|
||||||
install_requires=['numpy>=1.6', 'scipy>=0.9','matplotlib>=1.1', 'nose'],
|
install_requires=['numpy>=1.6', 'scipy>=0.9'],
|
||||||
extras_require = {
|
extras_require = {
|
||||||
'docs':['Sphinx', 'ipython'],
|
'docs':['matplotlib>=1.1','Sphinx','ipython'],
|
||||||
},
|
},
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"License :: OSI Approved :: BSD License"],
|
"License :: OSI Approved :: BSD License"],
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue