diff --git a/GPy/core/gp.py b/GPy/core/gp.py
index fc76ad68..9a199faa 100644
--- a/GPy/core/gp.py
+++ b/GPy/core/gp.py
@@ -108,9 +108,15 @@ class GP(Model):
# The predictive variable to be used to predict using the posterior object's
# woodbury_vector and woodbury_inv is defined as predictive_variable
+ # as long as the posterior has the right woodbury entries.
+ # It is the input variable used for the covariance between
+ # X_star and the posterior of the GP.
# This is usually just a link to self.X (full GP) or self.Z (sparse GP).
# Make sure to name this variable and the predict functions will "just work"
- # as long as the posterior has the right woodbury entries.
+ # In maths the predictive variable is:
+ # K_{xx} - K_{xp}W_{pp}^{-1}K_{px}
+ # W_{pp} := \texttt{Woodbury inv}
+ # p := _predictive_variable
self._predictive_variable = self.X
@@ -205,7 +211,7 @@ class GP(Model):
if kern is None:
kern = self.kern
- Kx = kern.K(self.X, Xnew)
+ Kx = kern.K(self._predictive_variable, Xnew)
mu = np.dot(Kx.T, self.posterior.woodbury_vector)
if len(mu.shape)==1:
mu = mu.reshape(-1,1)
@@ -213,7 +219,7 @@ class GP(Model):
Kxx = kern.K(Xnew)
if self.posterior.woodbury_inv.ndim == 2:
var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
- elif self.posterior.woodbury_inv.ndim == 3:
+ elif self.posterior.woodbury_inv.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],Kxx.shape[1],self.posterior.woodbury_inv.shape[2]))
from ..util.linalg import mdot
for i in range(var.shape[2]):
@@ -223,7 +229,7 @@ class GP(Model):
Kxx = kern.Kdiag(Xnew)
if self.posterior.woodbury_inv.ndim == 2:
var = (Kxx - np.sum(np.dot(self.posterior.woodbury_inv.T, Kx) * Kx, 0))[:,None]
- elif self.posterior.woodbury_inv.ndim == 3:
+ elif self.posterior.woodbury_inv.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],self.posterior.woodbury_inv.shape[2]))
for i in range(var.shape[1]):
var[:, i] = (Kxx - (np.sum(np.dot(self.posterior.woodbury_inv[:, :, i].T, Kx) * Kx, 0)))
@@ -364,11 +370,15 @@ class GP(Model):
var_jac = dK2_dXdX - np.einsum('qim,miq->iq', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
return var_jac
- if self.posterior.woodbury_inv.ndim == 3:
- var_jac = []
- for d in range(self.posterior.woodbury_inv.shape[2]):
- var_jac.append(compute_cov_inner(self.posterior.woodbury_inv[:, :, d]))
- var_jac = np.concatenate(var_jac)
+ if self.posterior.woodbury_inv.ndim == 3: # Missing data:
+ if full_cov:
+ var_jac = np.empty((Xnew.shape[0],Xnew.shape[0],Xnew.shape[1],self.output_dim))
+ for d in range(self.posterior.woodbury_inv.shape[2]):
+ var_jac[:, :, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d])
+ else:
+ var_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim))
+ for d in range(self.posterior.woodbury_inv.shape[2]):
+ var_jac[:, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d])
else:
var_jac = compute_cov_inner(self.posterior.woodbury_inv)
return mean_jac, var_jac
@@ -391,10 +401,11 @@ class GP(Model):
mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False)
mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac)
+ Sigma = np.zeros(mumuT.shape)
if var_jac.ndim == 3:
- Sigma = np.einsum('iqd,ipd->iqp', var_jac, var_jac)
+ Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = var_jac.sum(-1)
else:
- Sigma = self.output_dim*np.einsum('iq,ip->iqp', var_jac, var_jac)
+ Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = self.output_dim*var_jac
G = 0.
if mean:
G += mumuT
@@ -412,8 +423,13 @@ class GP(Model):
"""
G = self.predict_wishard_embedding(Xnew, kern, mean, covariance)
from ..util.linalg import jitchol
- return np.array([np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :])))))) for n in range(Xnew.shape[0])])
- #return np.array([np.sqrt(np.linalg.det(G[n, :, :])) for n in range(Xnew.shape[0])])
+ mag = np.empty(Xnew.shape[0])
+ for n in range(Xnew.shape[0]):
+ try:
+ mag[n] = np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :]))))))
+ except:
+ mag[n] = np.sqrt(np.linalg.det(G[n, :, :]))
+ return mag
def posterior_samples_f(self,X,size=10, full_cov=True):
"""
diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py
index 112af0fa..5aa95695 100644
--- a/GPy/core/parameterization/parameterized.py
+++ b/GPy/core/parameterization/parameterized.py
@@ -74,7 +74,7 @@ class Parameterized(Parameterizable):
# Metaclass for parameters changed after init.
# This makes sure, that parameters changed will always be called after __init__
# **Never** call parameters_changed() yourself
- #This is ignored in Python 3 -- you need to put the meta class in the function definition.
+ #This is ignored in Python 3 -- you need to put the meta class in the function definition.
#__metaclass__ = ParametersChangedMeta
#The six module is used to support both Python 2 and 3 simultaneously
#===========================================================================
@@ -316,7 +316,7 @@ class Parameterized(Parameterizable):
param[:] = val; return
except AttributeError:
pass
- object.__setattr__(self, name, val);
+ return object.__setattr__(self, name, val);
#===========================================================================
# Pickling
diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py
index 01a1f44b..6d6633cb 100644
--- a/GPy/core/parameterization/transformations.py
+++ b/GPy/core/parameterization/transformations.py
@@ -488,7 +488,7 @@ class Logistic(Transformation):
return instance()
newfunc = super(Transformation, cls).__new__
if newfunc is object.__new__:
- o = newfunc(cls)
+ o = newfunc(cls)
else:
o = newfunc(cls, lower, upper, *args, **kwargs)
cls._instances.append(weakref.ref(o))
diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py
index e227625d..9d2d6068 100644
--- a/GPy/core/sparse_gp.py
+++ b/GPy/core/sparse_gp.py
@@ -49,7 +49,7 @@ class SparseGP(GP):
else:
#inference_method = ??
raise NotImplementedError("what to do what to do?")
- print("defaulting to ", inference_method, "for latent function inference")
+ print(("defaulting to ", inference_method, "for latent function inference"))
self.Z = Param('inducing inputs', Z)
self.num_inducing = Z.shape[0]
@@ -128,29 +128,30 @@ class SparseGP(GP):
if kern is None: kern = self.kern
if not isinstance(Xnew, VariationalPosterior):
- Kx = kern.K(self._predictive_variable, Xnew)
- mu = np.dot(Kx.T, self.posterior.woodbury_vector)
- if full_cov:
- Kxx = kern.K(Xnew)
- if self.posterior.woodbury_inv.ndim == 2:
- var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
- elif self.posterior.woodbury_inv.ndim == 3:
- var = np.empty((Kxx.shape[0],Kxx.shape[1],self.posterior.woodbury_inv.shape[2]))
- for i in range(var.shape[2]):
- var[:, :, i] = (Kxx - mdot(Kx.T, self.posterior.woodbury_inv[:, :, i], Kx))
- var = var
- else:
- Kxx = kern.Kdiag(Xnew)
- if self.posterior.woodbury_inv.ndim == 2:
- var = (Kxx - np.sum(np.dot(self.posterior.woodbury_inv.T, Kx) * Kx, 0))[:,None]
- elif self.posterior.woodbury_inv.ndim == 3:
- var = np.empty((Kxx.shape[0],self.posterior.woodbury_inv.shape[2]))
- for i in range(var.shape[1]):
- var[:, i] = (Kxx - (np.sum(np.dot(self.posterior.woodbury_inv[:, :, i].T, Kx) * Kx, 0)))
- var = var
- #add in the mean function
- if self.mean_function is not None:
- mu += self.mean_function.f(Xnew)
+ # Kx = kern.K(self._predictive_variable, Xnew)
+ # mu = np.dot(Kx.T, self.posterior.woodbury_vector)
+ # if full_cov:
+ # Kxx = kern.K(Xnew)
+ # if self.posterior.woodbury_inv.ndim == 2:
+ # var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
+ # elif self.posterior.woodbury_inv.ndim == 3:
+ # var = np.empty((Kxx.shape[0],Kxx.shape[1],self.posterior.woodbury_inv.shape[2]))
+ # for i in range(var.shape[2]):
+ # var[:, :, i] = (Kxx - mdot(Kx.T, self.posterior.woodbury_inv[:, :, i], Kx))
+ # var = var
+ # else:
+ # Kxx = kern.Kdiag(Xnew)
+ # if self.posterior.woodbury_inv.ndim == 2:
+ # var = (Kxx - np.sum(np.dot(self.posterior.woodbury_inv.T, Kx) * Kx, 0))[:,None]
+ # elif self.posterior.woodbury_inv.ndim == 3:
+ # var = np.empty((Kxx.shape[0],self.posterior.woodbury_inv.shape[2]))
+ # for i in range(var.shape[1]):
+ # var[:, i] = (Kxx - (np.sum(np.dot(self.posterior.woodbury_inv[:, :, i].T, Kx) * Kx, 0)))
+ # var = var
+ # #add in the mean function
+ # if self.mean_function is not None:
+ # mu += self.mean_function.f(Xnew)
+ mu, var = super(SparseGP, self)._raw_predict(Xnew, full_cov, kern)
else:
psi0_star = kern.psi0(self._predictive_variable, Xnew)
psi1_star = kern.psi1(self._predictive_variable, Xnew)
@@ -159,7 +160,7 @@ class SparseGP(GP):
mu = np.dot(psi1_star, la) # TODO: dimensions?
if full_cov:
- raise NotImplementedError, "Full covariance for Sparse GP predicted with uncertain inputs not implemented yet."
+ raise NotImplementedError("Full covariance for Sparse GP predicted with uncertain inputs not implemented yet.")
var = np.empty((Xnew.shape[0], la.shape[1], la.shape[1]))
di = np.diag_indices(la.shape[1])
else:
diff --git a/GPy/core/svgp.py b/GPy/core/svgp.py
index 6c6ed3cc..b87fd493 100644
--- a/GPy/core/svgp.py
+++ b/GPy/core/svgp.py
@@ -1,5 +1,5 @@
# Copyright (c) 2014, James Hensman, Alex Matthews
-# Distributed under the terms of the GNU General public License, see LICENSE.txt
+# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..util import choleskies
diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py
index 00a2c2b0..2f089141 100644
--- a/GPy/inference/latent_function_inference/laplace.py
+++ b/GPy/inference/latent_function_inference/laplace.py
@@ -171,7 +171,7 @@ class Laplace(LatentFunctionInference):
#define the objective function (to be maximised)
def obj(Ki_f, f):
ll = -0.5*np.sum(np.dot(Ki_f.T, f)) + np.sum(likelihood.logpdf(f, Y, Y_metadata=Y_metadata))
- print ll
+ print(ll)
if np.isnan(ll):
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
return -np.inf
diff --git a/GPy/inference/optimization/stochastics.py b/GPy/inference/optimization/stochastics.py
index 0fc488a2..902c4290 100644
--- a/GPy/inference/optimization/stochastics.py
+++ b/GPy/inference/optimization/stochastics.py
@@ -40,7 +40,7 @@ class SparseGPMissing(StochasticStorage):
bdict = {}
#For N > 1000 array2string default crops
opt = np.get_printoptions()
- np.set_printoptions(threshold='nan')
+ np.set_printoptions(threshold=np.inf)
for d in range(self.Y.shape[1]):
inan = np.isnan(self.Y)[:, d]
arr_str = np.array2string(inan, np.inf, 0, True, '', formatter={'bool':lambda x: '1' if x else '0'})
@@ -74,7 +74,7 @@ class SparseGPStochastics(StochasticStorage):
bdict = {}
if self.missing_data:
opt = np.get_printoptions()
- np.set_printoptions(threshold='nan')
+ np.set_printoptions(threshold=np.inf)
for d in self.d:
inan = np.isnan(self.Y[:, d])
arr_str = np.array2string(inan,np.inf, 0,True, '',formatter={'bool':lambda x: '1' if x else '0'})
diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py
index ff5d49d3..924694e9 100644
--- a/GPy/kern/_src/kern.py
+++ b/GPy/kern/_src/kern.py
@@ -70,6 +70,9 @@ class Kern(Parameterized):
"""
Compute the kernel function.
+ .. math::
+ K_{ij} = k(X_i, X_j)
+
:param X: the first set of inputs to the kernel
:param X2: (optional) the second set of arguments to the kernel. If X2
is None, this is passed throgh to the 'part' object, which
@@ -77,22 +80,64 @@ class Kern(Parameterized):
"""
raise NotImplementedError
def Kdiag(self, X):
+ """
+ The diagonal of the kernel matrix K
+
+ .. math::
+ Kdiag_{i} = k(X_i, X_i)
+ """
raise NotImplementedError
def psi0(self, Z, variational_posterior):
+ """
+ .. math::
+ \psi_0 = \sum_{i=0}^{n}E_{q(X)}[k(X_i, X_i)]
+ """
return self.psicomp.psicomputations(self, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior):
+ """
+ .. math::
+ \psi_1^{n,m} = E_{q(X)}[k(X_n, Z_m)]
+ """
return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior):
+ """
+ .. math::
+ \psi_2^{m,m'} = \sum_{i=0}^{n}E_{q(X)}[ k(Z_m, X_i) k(X_i, Z_{m'})]
+ """
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=False)[2]
def psi2n(self, Z, variational_posterior):
+ """
+ .. math::
+ \psi_2^{n,m,m'} = E_{q(X)}[ k(Z_m, X_n) k(X_n, Z_{m'})]
+
+ Thus, we do not sum out n, compared to psi2
+ """
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
def gradients_X(self, dL_dK, X, X2):
+ """
+ .. math::
+
+ \\frac{\partial L}{\partial X} = \\frac{\partial L}{\partial K}\\frac{\partial K}{\partial X}
+ """
raise NotImplementedError
+ def gradients_X_X2(self, dL_dK, X, X2):
+ return self.gradients_X(dL_dK, X, X2), self.gradients_X(dL_dK.T, X2, X)
def gradients_XX(self, dL_dK, X, X2):
+ """
+ .. math::
+
+ \\frac{\partial^2 L}{\partial X\partial X_2} = \\frac{\partial L}{\partial K}\\frac{\partial^2 K}{\partial X\partial X_2}
+ """
raise(NotImplementedError, "This is the second derivative of K wrt X and X2, and not implemented for this kernel")
def gradients_XX_diag(self, dL_dKdiag, X):
+ """
+ The diagonal of the second derivative w.r.t. X and X2
+ """
raise(NotImplementedError, "This is the diagonal of the second derivative of K wrt X and X2, and not implemented for this kernel")
def gradients_X_diag(self, dL_dKdiag, X):
+ """
+ The diagonal of the derivative w.r.t. X
+ """
raise NotImplementedError
def update_gradients_diag(self, dL_dKdiag, X):
@@ -108,11 +153,17 @@ class Kern(Parameterized):
Set the gradients of all parameters when doing inference with
uncertain inputs, using expectations of the kernel.
- The esential maths is
+ The essential maths is
- dL_d{theta_i} = dL_dpsi0 * dpsi0_d{theta_i} +
- dL_dpsi1 * dpsi1_d{theta_i} +
- dL_dpsi2 * dpsi2_d{theta_i}
+ .. math::
+
+ \\frac{\partial L}{\partial \\theta_i} & = \\frac{\partial L}{\partial \psi_0}\\frac{\partial \psi_0}{\partial \\theta_i}\\
+ & \quad + \\frac{\partial L}{\partial \psi_1}\\frac{\partial \psi_1}{\partial \\theta_i}\\
+ & \quad + \\frac{\partial L}{\partial \psi_2}\\frac{\partial \psi_2}{\partial \\theta_i}
+
+ Thus, we push the different derivatives through the gradients of the psi
+ statistics. Be sure to set the gradients for all kernel
+ parameters here.
"""
dtheta = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[0]
self.gradient[:] = dtheta
diff --git a/GPy/kern/_src/kernel_slice_operations.py b/GPy/kern/_src/kernel_slice_operations.py
index 8c06d0c0..719d6b56 100644
--- a/GPy/kern/_src/kernel_slice_operations.py
+++ b/GPy/kern/_src/kernel_slice_operations.py
@@ -1,7 +1,11 @@
'''
Created on 11 Mar 2014
-@author: maxz
+@author: @mzwiessele
+
+This module provides a meta class for the kernels. The meta class is for
+slicing the inputs (X, X2) for the kernels, before K (or any other method involving X)
+gets calls. The `active_dims` of a kernel decide which dimensions the kernel works on.
'''
from ...core.parameterization.parameterized import ParametersChangedMeta
import numpy as np
@@ -19,6 +23,7 @@ class KernCallsViaSlicerMeta(ParametersChangedMeta):
put_clean(dct, 'update_gradients_full', _slice_update_gradients_full)
put_clean(dct, 'update_gradients_diag', _slice_update_gradients_diag)
put_clean(dct, 'gradients_X', _slice_gradients_X)
+ put_clean(dct, 'gradients_X_X2', _slice_gradients_X)
put_clean(dct, 'gradients_XX', _slice_gradients_XX)
put_clean(dct, 'gradients_XX_diag', _slice_gradients_X_diag)
put_clean(dct, 'gradients_X_diag', _slice_gradients_X_diag)
diff --git a/GPy/kern/_src/linear.py b/GPy/kern/_src/linear.py
index 1a9793b1..0a582ac8 100644
--- a/GPy/kern/_src/linear.py
+++ b/GPy/kern/_src/linear.py
@@ -17,7 +17,7 @@ class Linear(Kern):
.. math::
- k(x,y) = \sum_{i=1}^input_dim \sigma^2_i x_iy_i
+ k(x,y) = \sum_{i=1}^{\\text{input_dim}} \sigma^2_i x_iy_i
:param input_dim: the number of input dimensions
:type input_dim: int
diff --git a/GPy/kern/_src/mlp.py b/GPy/kern/_src/mlp.py
index b65fb2e0..c495b77b 100644
--- a/GPy/kern/_src/mlp.py
+++ b/GPy/kern/_src/mlp.py
@@ -5,6 +5,7 @@ from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
import numpy as np
+from ...util.linalg import tdot
from ...util.caching import Cache_this
four_over_tau = 2./np.pi
@@ -40,6 +41,7 @@ class MLP(Kern):
self.link_parameters(self.variance, self.weight_variance, self.bias_variance)
+ @Cache_this(limit=20, ignore_args=())
def K(self, X, X2=None):
if X2 is None:
X_denom = np.sqrt(self._comp_prod(X)+1.)
@@ -51,6 +53,7 @@ class MLP(Kern):
XTX = self._comp_prod(X,X2)/X_denom[:,None]/X2_denom[None,:]
return self.variance*four_over_tau*np.arcsin(XTX)
+ @Cache_this(limit=20, ignore_args=())
def Kdiag(self, X):
"""Compute the diagonal of the covariance matrix for X."""
X_prod = self._comp_prod(X)
@@ -73,6 +76,10 @@ class MLP(Kern):
"""Derivative of the covariance matrix with respect to X"""
return self._comp_grads(dL_dK, X, X2)[3]
+ def gradients_X_X2(self, dL_dK, X, X2):
+ """Derivative of the covariance matrix with respect to X"""
+ return self._comp_grads(dL_dK, X, X2)[3:]
+
def gradients_X_diag(self, dL_dKdiag, X):
"""Gradient of diagonal of covariance with respect to X"""
return self._comp_grads_diag(dL_dKdiag, X)[3]
diff --git a/GPy/kern/_src/psi_comp/gaussherm.py b/GPy/kern/_src/psi_comp/gaussherm.py
index afbca545..8e54e6a0 100644
--- a/GPy/kern/_src/psi_comp/gaussherm.py
+++ b/GPy/kern/_src/psi_comp/gaussherm.py
@@ -80,8 +80,9 @@ class PSICOMP_GH(PSICOMP):
dL_dkfu = (dL_dpsi1+ 2.*Kfu.dot(dL_dpsi2))*self.weights[i]
kern.update_gradients_full(dL_dkfu, X, Z)
dtheta += kern.gradient
- dX += kern.gradients_X(dL_dkfu, X, Z)
- dZ += kern.gradients_X(dL_dkfu.T, Z, X)
+ dX_i, dZ_i = kern.gradients_X_X2(dL_dkfu, X, Z)
+ dX += dX_i
+ dZ += dZ_i
dmu += dX
dS += dX*self.locs[i]/(2.*S_sq)
kern.gradient[:] = dtheta_old
diff --git a/GPy/kern/_src/psi_comp/rbf_psi_comp.py b/GPy/kern/_src/psi_comp/rbf_psi_comp.py
index 892eb1a0..735a354d 100644
--- a/GPy/kern/_src/psi_comp/rbf_psi_comp.py
+++ b/GPy/kern/_src/psi_comp/rbf_psi_comp.py
@@ -6,12 +6,6 @@ import numpy as np
from GPy.util.caching import Cacher
def psicomputations(variance, lengthscale, Z, variational_posterior, return_psi2_n=False):
- """
- Z - MxQ
- mu - NxQ
- S - NxQ
- gamma - NxQ
- """
# here are the "statistics" for psi0, psi1 and psi2
# Produced intermediate results:
# _psi1 NxM
@@ -26,12 +20,6 @@ def psicomputations(variance, lengthscale, Z, variational_posterior, return_psi2
return psi0, psi1, psi2
def __psi1computations(variance, lengthscale, Z, mu, S):
- """
- Z - MxQ
- mu - NxQ
- S - NxQ
- gamma - NxQ
- """
# here are the "statistics" for psi1
# Produced intermediate results:
# _psi1 NxM
@@ -46,12 +34,6 @@ def __psi1computations(variance, lengthscale, Z, mu, S):
return _psi1
def __psi2computations(variance, lengthscale, Z, mu, S):
- """
- Z - MxQ
- mu - NxQ
- S - NxQ
- gamma - NxQ
- """
# here are the "statistics" for psi2
# Produced intermediate results:
# _psi2 MxM
@@ -86,13 +68,6 @@ def psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscal
return dL_dvar, dL_dlengscale, dL_dZ, dL_dmu, dL_dS
def _psi1compDer(dL_dpsi1, variance, lengthscale, Z, mu, S):
- """
- dL_dpsi1 - NxM
- Z - MxQ
- mu - NxQ
- S - NxQ
- gamma - NxQ
- """
# here are the "statistics" for psi1
# Produced intermediate results: dL_dparams w.r.t. psi1
# _dL_dvariance 1
@@ -118,13 +93,6 @@ def _psi1compDer(dL_dpsi1, variance, lengthscale, Z, mu, S):
return _dL_dvar, _dL_dl, _dL_dZ, _dL_dmu, _dL_dS
def _psi2compDer(dL_dpsi2, variance, lengthscale, Z, mu, S):
- """
- Z - MxQ
- mu - NxQ
- S - NxQ
- gamma - NxQ
- dL_dpsi2 - MxM
- """
# here are the "statistics" for psi2
# Produced the derivatives w.r.t. psi2:
# _dL_dvariance 1
diff --git a/GPy/kern/_src/psi_comp/rbf_psi_gpucomp.py b/GPy/kern/_src/psi_comp/rbf_psi_gpucomp.py
index 03c4c8af..b3de8363 100644
--- a/GPy/kern/_src/psi_comp/rbf_psi_gpucomp.py
+++ b/GPy/kern/_src/psi_comp/rbf_psi_gpucomp.py
@@ -7,13 +7,6 @@ from ....util.caching import Cache_this
from . import PSICOMP_RBF
from ....util import gpu_init
-try:
- import pycuda.gpuarray as gpuarray
- from pycuda.compiler import SourceModule
- from ....util.linalg_gpu import sum_axis
-except:
- pass
-
gpu_code = """
// define THREADNUM
@@ -242,6 +235,10 @@ gpu_code = """
class PSICOMP_RBF_GPU(PSICOMP_RBF):
def __init__(self, threadnum=256, blocknum=30, GPU_direct=False):
+ from pycuda.compiler import SourceModule
+ from ....util.gpu_init import initGPU
+ initGPU()
+
self.GPU_direct = GPU_direct
self.gpuCache = None
@@ -264,7 +261,8 @@ class PSICOMP_RBF_GPU(PSICOMP_RBF):
memo[id(self)] = s
return s
- def _initGPUCache(self, N, M, Q):
+ def _initGPUCache(self, N, M, Q):
+ import pycuda.gpuarray as gpuarray
if self.gpuCache == None:
self.gpuCache = {
'l_gpu' :gpuarray.empty((Q,),np.float64,order='F'),
@@ -320,13 +318,14 @@ class PSICOMP_RBF_GPU(PSICOMP_RBF):
def get_dimensions(self, Z, variational_posterior):
return variational_posterior.mean.shape[0], Z.shape[0], Z.shape[1]
- @Cache_this(limit=1, ignore_args=(0,))
- def psicomputations(self, variance, lengthscale, Z, variational_posterior):
+ @Cache_this(limit=5, ignore_args=(0,))
+ def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
"""
Z - MxQ
mu - NxQ
S - NxQ
"""
+ variance, lengthscale = kern.variance, kern.lengthscale
N,M,Q = self.get_dimensions(Z, variational_posterior)
self._initGPUCache(N,M,Q)
self.sync_params(lengthscale, Z, variational_posterior.mean, variational_posterior.variance)
@@ -355,8 +354,10 @@ class PSICOMP_RBF_GPU(PSICOMP_RBF):
else:
return psi0, psi1_gpu.get(), psi2_gpu.get()
- @Cache_this(limit=1, ignore_args=(0,1,2,3))
- def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
+ @Cache_this(limit=5, ignore_args=(0,2,3,4))
+ def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
+ variance, lengthscale = kern.variance, kern.lengthscale
+ from ....util.linalg_gpu import sum_axis
ARD = (len(lengthscale)!=1)
N,M,Q = self.get_dimensions(Z, variational_posterior)
diff --git a/GPy/kern/_src/psi_comp/sslinear_psi_comp.py b/GPy/kern/_src/psi_comp/sslinear_psi_comp.py
index d431cd61..7e9b4fdc 100644
--- a/GPy/kern/_src/psi_comp/sslinear_psi_comp.py
+++ b/GPy/kern/_src/psi_comp/sslinear_psi_comp.py
@@ -9,7 +9,7 @@ from ....util.linalg import tdot
import numpy as np
-def psicomputations(variance, Z, variational_posterior):
+def psicomputations(variance, Z, variational_posterior, return_psi2_n=False):
"""
Compute psi-statistics for ss-linear kernel
"""
diff --git a/GPy/kern/_src/psi_comp/ssrbf_psi_gpucomp.py b/GPy/kern/_src/psi_comp/ssrbf_psi_gpucomp.py
index 1a9d2058..46f4a06e 100644
--- a/GPy/kern/_src/psi_comp/ssrbf_psi_gpucomp.py
+++ b/GPy/kern/_src/psi_comp/ssrbf_psi_gpucomp.py
@@ -6,14 +6,7 @@ The module for psi-statistics for RBF kernel for Spike-and-Slab GPLVM
import numpy as np
from ....util.caching import Cache_this
from . import PSICOMP_RBF
-from ....util import gpu_init
-try:
- import pycuda.gpuarray as gpuarray
- from pycuda.compiler import SourceModule
- from ....util.linalg_gpu import sum_axis
-except:
- pass
gpu_code = """
// define THREADNUM
@@ -292,6 +285,11 @@ gpu_code = """
class PSICOMP_SSRBF_GPU(PSICOMP_RBF):
def __init__(self, threadnum=128, blocknum=15, GPU_direct=False):
+
+ from pycuda.compiler import SourceModule
+ from ....util.gpu_init import initGPU
+ initGPU()
+
self.GPU_direct = GPU_direct
self.gpuCache = None
@@ -314,7 +312,8 @@ class PSICOMP_SSRBF_GPU(PSICOMP_RBF):
memo[id(self)] = s
return s
- def _initGPUCache(self, N, M, Q):
+ def _initGPUCache(self, N, M, Q):
+ import pycuda.gpuarray as gpuarray
if self.gpuCache == None:
self.gpuCache = {
'l_gpu' :gpuarray.empty((Q,),np.float64,order='F'),
@@ -377,12 +376,13 @@ class PSICOMP_SSRBF_GPU(PSICOMP_RBF):
return variational_posterior.mean.shape[0], Z.shape[0], Z.shape[1]
@Cache_this(limit=1, ignore_args=(0,))
- def psicomputations(self, variance, lengthscale, Z, variational_posterior):
+ def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
"""
Z - MxQ
mu - NxQ
S - NxQ
"""
+ variance, lengthscale = kern.variance, kern.lengthscale
N,M,Q = self.get_dimensions(Z, variational_posterior)
self._initGPUCache(N,M,Q)
self.sync_params(lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob)
@@ -409,8 +409,10 @@ class PSICOMP_SSRBF_GPU(PSICOMP_RBF):
else:
return psi0, psi1_gpu.get(), psi2_gpu.get()
- @Cache_this(limit=1, ignore_args=(0,1,2,3))
- def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
+ @Cache_this(limit=1, ignore_args=(0,2,3,4))
+ def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
+ variance, lengthscale = kern.variance, kern.lengthscale
+ from ....util.linalg_gpu import sum_axis
ARD = (len(lengthscale)!=1)
N,M,Q = self.get_dimensions(Z, variational_posterior)
diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py
index ab1ec282..d5f26798 100644
--- a/GPy/kern/_src/stationary.py
+++ b/GPy/kern/_src/stationary.py
@@ -25,13 +25,16 @@ class Stationary(Kern):
Stationary covariance fucntion depend only on r, where r is defined as
- r = \sqrt{ \sum_{q=1}^Q (x_q - x'_q)^2 }
+ .. math::
+ r(x, x') = \\sqrt{ \\sum_{q=1}^Q (x_q - x'_q)^2 }
The covariance function k(x, x' can then be written k(r).
In this implementation, r is scaled by the lengthscales parameter(s):
- r = \sqrt{ \sum_{q=1}^Q \frac{(x_q - x'_q)^2}{\ell_q^2} }.
+ .. math::
+
+ r(x, x') = \\sqrt{ \\sum_{q=1}^Q \\frac{(x_q - x'_q)^2}{\ell_q^2} }.
By default, there's only one lengthscale: seaprate lengthscales for each
dimension can be enables by setting ARD=True.
@@ -39,11 +42,12 @@ class Stationary(Kern):
To implement a stationary covariance function using this class, one need
only define the covariance function k(r), and it derivative.
- ...
- def K_of_r(self, r):
- return foo
- def dK_dr(self, r):
- return bar
+ ```
+ def K_of_r(self, r):
+ return foo
+ def dK_dr(self, r):
+ return bar
+ ```
The lengthscale(s) and variance parameters are added to the structure automatically.
@@ -128,7 +132,8 @@ class Stationary(Kern):
"""
Efficiently compute the scaled distance, r.
- r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 )
+ ..math::
+ r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 )
Note that if thre is only one lengthscale, l comes outside the sum. In
this case we compute the unscaled distance first (in a separate
@@ -321,7 +326,7 @@ class OU(Stationary):
.. math::
- k(r) = \\sigma^2 \exp(- r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} }
+ k(r) = \\sigma^2 \exp(- r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^{\text{input_dim}} \\frac{(x_i-y_i)^2}{\ell_i^2} }
"""
@@ -341,7 +346,7 @@ class Matern32(Stationary):
.. math::
- k(r) = \\sigma^2 (1 + \\sqrt{3} r) \exp(- \sqrt{3} r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^input_dim \\frac{(x_i-y_i)^2}{\ell_i^2} }
+ k(r) = \\sigma^2 (1 + \\sqrt{3} r) \exp(- \sqrt{3} r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^{\\text{input_dim}} \\frac{(x_i-y_i)^2}{\ell_i^2} }
"""
@@ -388,7 +393,7 @@ class Matern52(Stationary):
.. math::
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r)
- """
+ """
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Mat52'):
super(Matern52, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
diff --git a/GPy/kern/_src/trunclinear.py b/GPy/kern/_src/trunclinear.py
index 8c48f134..af90c4a5 100644
--- a/GPy/kern/_src/trunclinear.py
+++ b/GPy/kern/_src/trunclinear.py
@@ -15,7 +15,7 @@ class TruncLinear(Kern):
.. math::
- k(x,y) = \sum_{i=1}^input_dim \sigma^2_i \max(0, x_iy_i - \simga_q)
+ k(x,y) = \sum_{i=1}^input_dim \sigma^2_i \max(0, x_iy_i - \sigma_q)
:param input_dim: the number of input dimensions
:type input_dim: int
@@ -54,7 +54,7 @@ class TruncLinear(Kern):
self.delta = Param('delta', delta)
self.add_parameter(self.variances)
self.add_parameter(self.delta)
-
+
@Cache_this(limit=2)
def K(self, X, X2=None):
XX = self.variances*self._product(X, X2)
@@ -114,7 +114,7 @@ class TruncLinear_inf(Kern):
.. math::
- k(x,y) = \sum_{i=1}^input_dim \sigma^2_i \max(0, x_iy_i - \simga_q)
+ k(x,y) = \sum_{i=1}^input_dim \sigma^2_i \max(0, x_iy_i - \sigma_q)
:param input_dim: the number of input dimensions
:type input_dim: int
@@ -148,8 +148,8 @@ class TruncLinear_inf(Kern):
self.variances = Param('variances', variances, Logexp())
self.add_parameter(self.variances)
-
-
+
+
# @Cache_this(limit=2)
def K(self, X, X2=None):
tmp = self._product(X, X2)
diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py
index 424a7f5a..e1299f73 100644
--- a/GPy/likelihoods/gaussian.py
+++ b/GPy/likelihoods/gaussian.py
@@ -48,7 +48,7 @@ class Gaussian(Likelihood):
def betaY(self,Y,Y_metadata=None):
#TODO: ~Ricardo this does not live here
- raise RuntimeError, "Please notify the GPy developers, this should not happen"
+ raise RuntimeError("Please notify the GPy developers, this should not happen")
return Y/self.gaussian_variance(Y_metadata)
def gaussian_variance(self, Y_metadata=None):
diff --git a/GPy/likelihoods/link_functions.py b/GPy/likelihoods/link_functions.py
index 3d753395..4947fdb8 100644
--- a/GPy/likelihoods/link_functions.py
+++ b/GPy/likelihoods/link_functions.py
@@ -2,6 +2,7 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
+import scipy
from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf
import scipy as sp
from ..util.misc import safe_exp, safe_square, safe_cube, safe_quad, safe_three_times
@@ -67,7 +68,7 @@ class Probit(GPTransformation):
.. math::
g(f) = \\Phi^{-1} (mu)
-
+
"""
def transf(self,f):
return std_norm_cdf(f)
@@ -140,7 +141,7 @@ class Log_ex_1(GPTransformation):
"""
def transf(self,f):
- return np.log1p(safe_exp(f))
+ return scipy.special.log1p(safe_exp(f))
def dtransf_df(self,f):
ef = safe_exp(f)
diff --git a/GPy/models/gp_kronecker_gaussian_regression.py b/GPy/models/gp_kronecker_gaussian_regression.py
index 434661d2..5b2fb41c 100644
--- a/GPy/models/gp_kronecker_gaussian_regression.py
+++ b/GPy/models/gp_kronecker_gaussian_regression.py
@@ -1,5 +1,5 @@
# Copyright (c) 2014, James Hensman, Alan Saul
-# Distributed under the terms of the GNU General public License, see LICENSE.txt
+# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core.model import Model
diff --git a/GPy/models/gp_regression.py b/GPy/models/gp_regression.py
index 7266ae92..157c0dc8 100644
--- a/GPy/models/gp_regression.py
+++ b/GPy/models/gp_regression.py
@@ -26,12 +26,12 @@ class GPRegression(GP):
"""
- def __init__(self, X, Y, kernel=None, Y_metadata=None, normalizer=None, noise_var=1.):
+ def __init__(self, X, Y, kernel=None, Y_metadata=None, normalizer=None, noise_var=1., mean_function=None):
if kernel is None:
kernel = kern.RBF(X.shape[1])
-
+
likelihood = likelihoods.Gaussian(variance=noise_var)
- super(GPRegression, self).__init__(X, Y, kernel, likelihood, name='GP regression', Y_metadata=Y_metadata, normalizer=normalizer)
+ super(GPRegression, self).__init__(X, Y, kernel, likelihood, name='GP regression', Y_metadata=Y_metadata, normalizer=normalizer, mean_function=mean_function)
diff --git a/GPy/models/gp_var_gauss.py b/GPy/models/gp_var_gauss.py
index dc35b0d9..6cce8640 100644
--- a/GPy/models/gp_var_gauss.py
+++ b/GPy/models/gp_var_gauss.py
@@ -1,5 +1,5 @@
# Copyright (c) 2014, James Hensman, Alan Saul
-# Distributed under the terms of the GNU General public License, see LICENSE.txt
+# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core import GP
diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py
index d4f4f564..17d42e5a 100644
--- a/GPy/models/gplvm.py
+++ b/GPy/models/gplvm.py
@@ -36,8 +36,10 @@ class GPLVM(GP):
likelihood = Gaussian()
super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM')
+
self.X = Param('latent_mean', X)
self.link_parameter(self.X, index=0)
+ self._predictive_variable = self.X
def parameters_changed(self):
super(GPLVM, self).parameters_changed()
diff --git a/GPy/plotting/matplot_dep/dim_reduction_plots.py b/GPy/plotting/matplot_dep/dim_reduction_plots.py
index a36f168d..f428537d 100644
--- a/GPy/plotting/matplot_dep/dim_reduction_plots.py
+++ b/GPy/plotting/matplot_dep/dim_reduction_plots.py
@@ -304,7 +304,7 @@ def plot_magnification(model, labels=None, which_indices=None,
view = ImshowController(ax, plot_function,
(xmin, ymin, xmax, ymax),
resolution, aspect=aspect, interpolation='bilinear',
- cmap=cm.gray)
+ cmap=cm.get_cmap('Greys'))
# make sure labels are in order of input:
ulabels = []
diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py
index 87ffd740..3a5a01d2 100644
--- a/GPy/plotting/matplot_dep/models_plots.py
+++ b/GPy/plotting/matplot_dep/models_plots.py
@@ -3,7 +3,7 @@
import numpy as np
from . import Tango
-from base_plots import gpplot, x_frame1D, x_frame2D,gperrors
+from .base_plots import gpplot, x_frame1D, x_frame2D,gperrors
from ...models.gp_coregionalized_regression import GPCoregionalizedRegression
from ...models.sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression
from scipy import sparse
@@ -186,8 +186,8 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
#optionally plot some samples
if samples: #NOTE not tested with fixed_inputs
Ysim = model.posterior_samples(Xgrid, samples, Y_metadata=Y_metadata)
- print Ysim.shape
- print Xnew.shape
+ print(Ysim.shape)
+ print(Xnew.shape)
for yi in Ysim.T:
plots['posterior_samples'] = ax.plot(Xnew, yi[:,None], '#3300FF', linewidth=0.25)
#ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs.
diff --git a/GPy/testing/cacher_tests.py b/GPy/testing/cacher_tests.py
new file mode 100644
index 00000000..60f79ba2
--- /dev/null
+++ b/GPy/testing/cacher_tests.py
@@ -0,0 +1,37 @@
+'''
+Created on 4 Sep 2015
+
+@author: maxz
+'''
+import unittest
+from GPy.util.caching import Cacher
+from pickle import PickleError
+
+
+class Test(unittest.TestCase):
+ def setUp(self):
+ def op(x):
+ return x
+ self.cache = Cacher(op, 1)
+
+ def test_pickling(self):
+ self.assertRaises(PickleError, self.cache.__getstate__)
+ self.assertRaises(PickleError, self.cache.__setstate__)
+
+ def test_copy(self):
+ tmp = self.cache.__deepcopy__()
+ assert(tmp.operation is self.cache.operation)
+ self.assertEqual(tmp.limit, self.cache.limit)
+
+ def test_reset(self):
+ self.cache.reset()
+ self.assertDictEqual(self.cache.cached_input_ids, {}, )
+ self.assertDictEqual(self.cache.cached_outputs, {}, )
+ self.assertDictEqual(self.cache.inputs_changed, {}, )
+
+ def test_name(self):
+ assert(self.cache.__name__ == self.cache.operation.__name__)
+
+if __name__ == "__main__":
+ #import sys;sys.argv = ['', 'Test.testName']
+ unittest.main()
\ No newline at end of file
diff --git a/GPy/testing/cython_tests.py b/GPy/testing/cython_tests.py
index 30e27fbb..c4bca5cd 100644
--- a/GPy/testing/cython_tests.py
+++ b/GPy/testing/cython_tests.py
@@ -6,13 +6,14 @@ from ..util.config import config
import unittest
try:
- from . import linalg_cython
+ from ..util import linalg_cython
+ from ..util import choleskies_cython
config.set('cython', 'working', 'True')
except ImportError:
config.set('cython', 'working', 'False')
"""
-These tests make sure that the opure python and cython codes work the same
+These tests make sure that the pure python and cython codes work the same
"""
@unittest.skipIf(not config.getboolean('cython', 'working'),"Cython modules have not been built on this machine")
@@ -67,8 +68,8 @@ class test_choleskies_backprop(np.testing.TestCase):
self.L = GPy.util.linalg.jitchol(A)
self.dL = np.random.randn(10,10)
def test(self):
- r1 = GPy.util.choleskies._backprop_gradient_pure(self.dL, self.L)
- r2 = GPy.util.choleskies.choleskies_cython.backprop_gradient(self.dL, self.L)
- r3 = GPy.util.choleskies.choleskies_cython.backprop_gradient_par_c(self.dL, self.L)
+ r1 = choleskies._backprop_gradient_pure(self.dL, self.L)
+ r2 = choleskies_cython.backprop_gradient(self.dL, self.L)
+ r3 = choleskies_cython.backprop_gradient_par_c(self.dL, self.L)
np.testing.assert_allclose(r1, r2)
np.testing.assert_allclose(r1, r3)
diff --git a/GPy/testing/gp_tests.py b/GPy/testing/gp_tests.py
new file mode 100644
index 00000000..63345c18
--- /dev/null
+++ b/GPy/testing/gp_tests.py
@@ -0,0 +1,99 @@
+'''
+Created on 4 Sep 2015
+
+@author: maxz
+'''
+import unittest
+import numpy as np, GPy
+from GPy.core.parameterization.variational import NormalPosterior
+
+class Test(unittest.TestCase):
+
+
+ def setUp(self):
+ np.random.seed(12345)
+ self.N = 20
+ self.N_new = 50
+ self.D = 1
+ self.X = np.random.uniform(-3., 3., (self.N, 1))
+ self.Y = np.sin(self.X) + np.random.randn(self.N, self.D) * 0.05
+ self.X_new = np.random.uniform(-3., 3., (self.N_new, 1))
+
+
+ def test_setxy_bgplvm(self):
+ k = GPy.kern.RBF(1)
+ m = GPy.models.BayesianGPLVM(self.Y, 2, kernel=k)
+ mu, var = m.predict(m.X)
+ X = m.X.copy()
+ Xnew = NormalPosterior(m.X.mean[:10].copy(), m.X.variance[:10].copy())
+ m.set_XY(Xnew, m.Y[:10])
+ assert(m.checkgrad())
+ m.set_XY(X, self.Y)
+ mu2, var2 = m.predict(m.X)
+ np.testing.assert_allclose(mu, mu2)
+ np.testing.assert_allclose(var, var2)
+
+ def test_setxy_gplvm(self):
+ k = GPy.kern.RBF(1)
+ m = GPy.models.GPLVM(self.Y, 2, kernel=k)
+ mu, var = m.predict(m.X)
+ X = m.X.copy()
+ Xnew = X[:10].copy()
+ m.set_XY(Xnew, m.Y[:10])
+ assert(m.checkgrad())
+ m.set_XY(X, self.Y)
+ mu2, var2 = m.predict(m.X)
+ np.testing.assert_allclose(mu, mu2)
+ np.testing.assert_allclose(var, var2)
+
+ def test_setxy_gp(self):
+ k = GPy.kern.RBF(1)
+ m = GPy.models.GPRegression(self.X, self.Y, kernel=k)
+ mu, var = m.predict(m.X)
+ X = m.X.copy()
+ m.set_XY(m.X[:10], m.Y[:10])
+ assert(m.checkgrad())
+ m.set_XY(X, self.Y)
+ mu2, var2 = m.predict(m.X)
+ np.testing.assert_allclose(mu, mu2)
+ np.testing.assert_allclose(var, var2)
+
+ def test_mean_function(self):
+ from GPy.core.parameterization.param import Param
+ from GPy.core.mapping import Mapping
+ class Parabola(Mapping):
+ def __init__(self, variance, degree=2, name='parabola'):
+ super(Parabola, self).__init__(1, 1, name)
+ self.variance = Param('variance', np.ones(degree+1) * variance)
+ self.degree = degree
+ self.link_parameter(self.variance)
+
+ def f(self, X):
+ p = self.variance[0] * np.ones(X.shape)
+ for i in range(1, self.degree+1):
+ p += self.variance[i] * X**(i)
+ return p
+
+ def gradients_X(self, dL_dF, X):
+ grad = np.zeros(X.shape)
+ for i in range(1, self.degree+1):
+ grad += (i) * self.variance[i] * X**(i-1)
+ return grad
+
+ def update_gradients(self, dL_dF, X):
+ for i in range(self.degree+1):
+ self.variance.gradient[i] = (dL_dF * X**(i)).sum(0)
+ X = np.linspace(-2, 2, 100)[:, None]
+ k = GPy.kern.RBF(1)
+ k.randomize()
+ p = Parabola(.3)
+ p.randomize()
+ Y = p.f(X) + np.random.multivariate_normal(np.zeros(X.shape[0]), k.K(X)+np.eye(X.shape[0])*1e-8)[:,None] + np.random.normal(0, .1, (X.shape[0], 1))
+ m = GPy.models.GPRegression(X, Y, mean_function=p)
+ m.randomize()
+ assert(m.checkgrad())
+ _ = m.predict(m.X)
+
+if __name__ == "__main__":
+ #import sys;sys.argv = ['', 'Test.testName']
+ unittest.main()
\ No newline at end of file
diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py
index ec005b6c..50a5aed8 100644
--- a/GPy/testing/kernel_tests.py
+++ b/GPy/testing/kernel_tests.py
@@ -11,7 +11,7 @@ from ..util.config import config
verbose = 0
try:
- from . import linalg_cython
+ from ..util import linalg_cython
config.set('cython', 'working', 'True')
except ImportError:
config.set('cython', 'working', 'False')
diff --git a/GPy/testing/link_function_tests.py b/GPy/testing/link_function_tests.py
index a4b631f8..9f41f736 100644
--- a/GPy/testing/link_function_tests.py
+++ b/GPy/testing/link_function_tests.py
@@ -1,5 +1,5 @@
import numpy as np
-import scipy as sp
+import scipy
from scipy.special import cbrt
from GPy.models import GradientChecker
_lim_val = np.finfo(np.float64).max
@@ -92,18 +92,18 @@ class LinkFunctionTests(np.testing.TestCase):
link = Log_ex_1()
lim_of_inf = _lim_val_exp
- np.testing.assert_almost_equal(np.log1p(np.exp(self.mid_f)), link.transf(self.mid_f))
- assert np.isinf(np.log1p(np.exp(np.log(self.f_upper_lim))))
+ np.testing.assert_almost_equal(scipy.special.log1p(np.exp(self.mid_f)), link.transf(self.mid_f))
+ assert np.isinf(scipy.special.log1p(np.exp(np.log(self.f_upper_lim))))
#Check the clipping works
np.testing.assert_almost_equal(link.transf(self.f_lower_lim), 0, decimal=5)
#Need to look at most significant figures here rather than the decimals
- np.testing.assert_approx_equal(link.transf(self.f_upper_lim), np.log1p(_lim_val), significant=5)
+ np.testing.assert_approx_equal(link.transf(self.f_upper_lim), scipy.special.log1p(_lim_val), significant=5)
self.check_overflow(link, lim_of_inf)
#Check that it would otherwise fail
beyond_lim_of_inf = lim_of_inf + 10.0
old_err_state = np.seterr(over='ignore')
- self.assertTrue(np.isinf(np.log1p(np.exp(beyond_lim_of_inf))))
+ self.assertTrue(np.isinf(scipy.special.log1p(np.exp(beyond_lim_of_inf))))
np.seterr(**old_err_state)
diff --git a/GPy/testing/misc_tests.py b/GPy/testing/misc_tests.py
index caf98874..a0e2d949 100644
--- a/GPy/testing/misc_tests.py
+++ b/GPy/testing/misc_tests.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
import numpy as np
import scipy as sp
import GPy
@@ -18,8 +19,8 @@ class MiscTests(np.testing.TestCase):
assert np.isinf(np.exp(self._lim_val_exp + 1))
assert np.isfinite(GPy.util.misc.safe_exp(self._lim_val_exp + 1))
- print w
- print len(w)
+ print(w)
+ print(len(w))
assert len(w)==1 # should have one overflow warning
def test_safe_exp_lower(self):
diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py
index 648e1174..1cfe8b48 100644
--- a/GPy/testing/model_tests.py
+++ b/GPy/testing/model_tests.py
@@ -55,13 +55,59 @@ class MiscTests(unittest.TestCase):
np.testing.assert_allclose(mu1, (mu2*std)+mu)
np.testing.assert_allclose(var1, var2)
+ q50n = m.predict_quantiles(m.X, (50,))
+ q50 = m2.predict_quantiles(m2.X, (50,))
+ np.testing.assert_allclose(q50n[0], (q50[0]*std)+mu)
+
+ def check_jacobian(self):
+ try:
+ import autograd.numpy as np, autograd as ag, GPy, matplotlib.pyplot as plt
+ from GPy.models import GradientChecker, GPRegression
+ except:
+ raise self.skipTest("autograd not available to check gradients")
+ def k(X, X2, alpha=1., lengthscale=None):
+ if lengthscale is None:
+ lengthscale = np.ones(X.shape[1])
+ exp = 0.
+ for q in range(X.shape[1]):
+ exp += ((X[:, [q]] - X2[:, [q]].T)/lengthscale[q])**2
+ #exp = np.sqrt(exp)
+ return alpha * np.exp(-.5*exp)
+ dk = ag.elementwise_grad(lambda x, x2: k(x, x2, alpha=ke.variance.values, lengthscale=ke.lengthscale.values))
+ dkdk = ag.elementwise_grad(dk, argnum=1)
+
+ ke = GPy.kern.RBF(1, ARD=True)
+ #ke.randomize()
+ ke.variance = .2#.randomize()
+ ke.lengthscale[:] = .5
+ ke.randomize()
+ X = np.linspace(-1, 1, 1000)[:,None]
+ X2 = np.array([[0.]]).T
+ np.testing.assert_allclose(ke.gradients_X([[1.]], X, X), dk(X, X))
+ np.testing.assert_allclose(ke.gradients_XX([[1.]], X, X).sum(0), dkdk(X, X))
+ np.testing.assert_allclose(ke.gradients_X([[1.]], X, X2), dk(X, X2))
+ np.testing.assert_allclose(ke.gradients_XX([[1.]], X, X2).sum(0), dkdk(X, X2))
+
+ m = GPRegression(self.X, self.Y)
+ def f(x):
+ m.X[:] = x
+ return m.log_likelihood()
+ def df(x):
+ m.X[:] = x
+ return m.kern.gradients_X(m.grad_dict['dL_dK'], X)
+ def ddf(x):
+ m.X[:] = x
+ return m.kern.gradients_XX(m.grad_dict['dL_dK'], X).sum(0)
+ gc = GradientChecker(f, df, self.X)
+ gc2 = GradientChecker(df, ddf, self.X)
+ assert(gc.checkgrad())
+ assert(gc2.checkgrad())
def test_sparse_raw_predict(self):
k = GPy.kern.RBF(1)
m = GPy.models.SparseGPRegression(self.X, self.Y, kernel=k)
m.randomize()
Z = m.Z[:]
- X = self.X[:]
# Not easy to check if woodbury_inv is correct in itself as it requires a large derivation and expression
Kinv = m.posterior.woodbury_inv
@@ -147,11 +193,24 @@ class MiscTests(unittest.TestCase):
m = BayesianGPLVMMiniBatch(Ymissing, Q, init="random", num_inducing=num_inducing,
kernel=k, missing_data=True)
assert(m.checkgrad())
+ mul, varl = m.predict(m.X)
k = kern.RBF(Q, ARD=True) + kern.White(Q, np.exp(-2)) # + kern.bias(Q)
- m = BayesianGPLVMMiniBatch(Ymissing, Q, init="random", num_inducing=num_inducing,
+ m2 = BayesianGPLVMMiniBatch(Ymissing, Q, init="random", num_inducing=num_inducing,
kernel=k, missing_data=True)
assert(m.checkgrad())
+ m2.kern.rbf.lengthscale[:] = 1e6
+ m2.X[:] = m.X.param_array
+ m2.likelihood[:] = m.likelihood[:]
+ m2.kern.white[:] = m.kern.white[:]
+ mu, var = m.predict(m.X)
+ np.testing.assert_allclose(mul, mu)
+ np.testing.assert_allclose(varl, var)
+
+ q50 = m.predict_quantiles(m.X, (50,))
+ np.testing.assert_allclose(mul, q50[0])
+
+
def test_likelihood_replicate_kern(self):
m = GPy.models.GPRegression(self.X, self.Y)
diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py
index 0fb129ff..3ee93f2b 100644
--- a/GPy/testing/parameterized_tests.py
+++ b/GPy/testing/parameterized_tests.py
@@ -248,10 +248,16 @@ class ParameterizedTest(unittest.TestCase):
m.randomize()
self.assertEqual(m.p1, val)
+ def test_checkgrad(self):
+ assert(self.testmodel.kern.checkgrad())
+ assert(self.testmodel.kern.lengthscale.checkgrad())
+ assert(self.testmodel.likelihood.lengthscale.checkgrad())
+
def test_printing(self):
print(self.test1)
print(self.param)
print(self.test1[''])
+ print(self.testmodel.hierarchy_name(False))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_add_parameter']
diff --git a/GPy/testing/run_coverage.sh b/GPy/testing/run_coverage.sh
index 6b6e8cb2..f2e52230 100755
--- a/GPy/testing/run_coverage.sh
+++ b/GPy/testing/run_coverage.sh
@@ -1 +1 @@
-nosetests . --with-coverage --cover-html --cover-html-dir=coverage --cover-package=GPy --cover-erase
+nosetests . --with-coverage --logging-level=INFO --cover-html --cover-html-dir=coverage --cover-package=GPy --cover-erase
diff --git a/GPy/util/__init__.py b/GPy/util/__init__.py
index a21dc84e..6919f1a8 100644
--- a/GPy/util/__init__.py
+++ b/GPy/util/__init__.py
@@ -15,6 +15,5 @@ from . import caching
from . import diag
from . import initialization
from . import multioutput
-from . import linalg_gpu
from . import parallel
diff --git a/GPy/util/choleskies.py b/GPy/util/choleskies.py
index ca055e08..e245d988 100644
--- a/GPy/util/choleskies.py
+++ b/GPy/util/choleskies.py
@@ -1,5 +1,5 @@
# Copyright James Hensman and Max Zwiessele 2014, 2015
-# Licensed under the GNU GPL version 3.0
+# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from . import linalg
diff --git a/GPy/util/erfcx.py b/GPy/util/erfcx.py
deleted file mode 100644
index f42e49f3..00000000
--- a/GPy/util/erfcx.py
+++ /dev/null
@@ -1,63 +0,0 @@
-## Copyright (C) 2010 Soren Hauberg
-##
-## Copyright James Hensman 2011
-##
-## This program is free software; you can redistribute it and/or modify it
-## under the terms of the GNU General Public License as published by
-## the Free Software Foundation; either version 3 of the License, or (at
-## your option) any later version.
-##
-## This program is distributed in the hope that it will be useful, but
-## WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-## General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program; see the file COPYING. If not, see
-## .
-
-import numpy as np
-
-def erfcx (arg):
- arg = np.atleast_1d(arg)
- assert(np.all(np.isreal(arg)),"erfcx: input must be real")
-
- ## Get precision dependent thresholds -- or not :p
- xneg = -26.628;
- xmax = 2.53e+307;
-
- ## Allocate output
- result = np.zeros (arg.shape)
-
- ## Find values where erfcx can be evaluated
- idx_neg = (arg < xneg);
- idx_max = (arg > xmax);
- idx = ~(idx_neg | idx_max);
-
- arg = arg [idx];
-
- ## Perform the actual computation
- t = 3.97886080735226 / (np.abs (arg) + 3.97886080735226);
- u = t - 0.5;
- y = (((((((((u * 0.00127109764952614092 + 1.19314022838340944e-4) * u \
- - 0.003963850973605135) * u - 8.70779635317295828e-4) * u + \
- 0.00773672528313526668) * u + 0.00383335126264887303) * u - \
- 0.0127223813782122755) * u - 0.0133823644533460069) * u + \
- 0.0161315329733252248) * u + 0.0390976845588484035) * u + \
- 0.00249367200053503304;
- y = ((((((((((((y * u - 0.0838864557023001992) * u - \
- 0.119463959964325415) * u + 0.0166207924969367356) * u + \
- 0.357524274449531043) * u + 0.805276408752910567) * u + \
- 1.18902982909273333) * u + 1.37040217682338167) * u + \
- 1.31314653831023098) * u + 1.07925515155856677) * u + \
- 0.774368199119538609) * u + 0.490165080585318424) * u + \
- 0.275374741597376782) * t;
-
- y [arg < 0] = 2 * np.exp (arg [arg < 0]**2) - y [arg < 0];
-
- ## Put the results back into something with the same size is the original input
- result [idx] = y;
- result [idx_neg] = np.inf;
- ## result (idx_max) = 0; # not needed as we initialise with zeros
- return(result)
-
diff --git a/GPy/util/gpu_init.py b/GPy/util/gpu_init.py
index 26dff0b3..0c496db3 100644
--- a/GPy/util/gpu_init.py
+++ b/GPy/util/gpu_init.py
@@ -16,33 +16,27 @@ try:
except:
pass
-try:
- if MPI_enabled and MPI.COMM_WORLD.size>1:
- from .parallel import get_id_within_node
- gpuid = get_id_within_node()
- import pycuda.driver
- pycuda.driver.init()
- if gpuid>=pycuda.driver.Device.count():
- print('['+MPI.Get_processor_name()+'] more processes than the GPU numbers!')
- #MPI.COMM_WORLD.Abort()
- raise
- gpu_device = pycuda.driver.Device(gpuid)
- gpu_context = gpu_device.make_context()
- gpu_initialized = True
- else:
- import pycuda.autoinit
- gpu_initialized = True
-except:
- pass
-try:
- from scikits.cuda import cublas
- import scikits.cuda.linalg as culinalg
- culinalg.init()
- cublas_handle = cublas.cublasCreate()
-except:
- pass
+def initGPU():
+ try:
+ if MPI_enabled and MPI.COMM_WORLD.size>1:
+ from .parallel import get_id_within_node
+ gpuid = get_id_within_node()
+ import pycuda.driver
+ pycuda.driver.init()
+ if gpuid>=pycuda.driver.Device.count():
+ print('['+MPI.Get_processor_name()+'] more processes than the GPU numbers!')
+ raise
+ gpu_device = pycuda.driver.Device(gpuid)
+ gpu_context = gpu_device.make_context()
+ gpu_initialized = True
+ else:
+ import pycuda.autoinit
+ gpu_initialized = True
+ except:
+ pass
+
def closeGPU():
if gpu_context is not None:
gpu_context.detach()
diff --git a/GPy/util/linalg_gpu.py b/GPy/util/linalg_gpu.py
index cba09dd3..db1c5317 100644
--- a/GPy/util/linalg_gpu.py
+++ b/GPy/util/linalg_gpu.py
@@ -61,12 +61,5 @@ try:
except:
pass
-try:
- import scikits.cuda.linalg as culinalg
- from scikits.cuda import cublas
- from scikits.cuda.cula import culaExceptions
-except:
- pass
-
diff --git a/GPy/util/ln_diff_erfs.py b/GPy/util/ln_diff_erfs.py
index c1137283..77610c63 100644
--- a/GPy/util/ln_diff_erfs.py
+++ b/GPy/util/ln_diff_erfs.py
@@ -2,12 +2,7 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
#Only works for scipy 0.12+
-try:
- from scipy.special import erfcx, erf
-except ImportError:
- from scipy.special import erf
- from .erfcx import erfcx
-
+from scipy.special import erfcx, erf
import numpy as np
def ln_diff_erfs(x1, x2, return_sign=False):
@@ -17,7 +12,7 @@ def ln_diff_erfs(x1, x2, return_sign=False):
:param x2 : argument of the negative erf
:type x2: ndarray
:return: tuple containing (log(abs(erf(x1) - erf(x2))), sign(erf(x1) - erf(x2)))
-
+
Based on MATLAB code that was written by Antti Honkela and modified by David Luengo and originally derived from code by Neil Lawrence.
"""
x1 = np.require(x1).real
@@ -26,7 +21,7 @@ def ln_diff_erfs(x1, x2, return_sign=False):
x1 = np.reshape(x1, (1, 1))
if x2.size==1:
x2 = np.reshape(x2, (1, 1))
-
+
if x1.shape==x2.shape:
v = np.zeros_like(x1)
else:
@@ -36,7 +31,7 @@ def ln_diff_erfs(x1, x2, return_sign=False):
v = np.zeros(x1.shape)
else:
raise ValueError("This function does not broadcast unless provided with a scalar.")
-
+
if x1.size == 1:
x1 = np.tile(x1, x2.shape)
@@ -92,7 +87,7 @@ def ln_diff_erfs(x1, x2, return_sign=False):
v[flags] = np.log(erfcx(-_x1)
-erfcx(-_x2)*np.exp(_x1**2
-_x2**2))-_x1**2
-
+
# TODO: switch back on log of zero warnings.
if return_sign:
diff --git a/GPy/util/parallel.py b/GPy/util/parallel.py
index 880dae58..a2211945 100644
--- a/GPy/util/parallel.py
+++ b/GPy/util/parallel.py
@@ -39,3 +39,22 @@ def divide_data(datanum, rank, size):
size = datanum/size
offset = size*rank+residue
return offset, offset+size, datanum_list
+
+def optimize_parallel(model, optimizer=None, messages=True, max_iters=1000, outpath='.', interval=100, name=None):
+ from math import ceil
+ from datetime import datetime
+ import os
+ if name is None: name = model.name
+ stop = 0
+ for iter in range(int(ceil(float(max_iters)/interval))):
+ model.optimize(optimizer=optimizer, messages= True if messages and model.mpi_comm.rank==model.mpi_root else False, max_iters=interval)
+ if model.mpi_comm.rank==model.mpi_root:
+ timenow = datetime.now()
+ timestr = timenow.strftime('%Y:%m:%d_%H:%M:%S')
+ model.save(os.path.join(outpath, name+'_'+timestr+'.h5'))
+ opt = model.optimization_runs[-1]
+ if opt.funct_eval
+ImportError: No module named svigp
+/home/maxz/Documents/gpy/doc/GPy.core.rst:65: WARNING: autodoc: failed to import module u'GPy.core.symbolic'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ File "/home/maxz/Documents/gpy/GPy/core/symbolic.py", line 10, in
from sympy.utilities.lambdify import lambdastr, _imp_namespace, _get_namespace
ImportError: No module named lambdify
-/home/alans/Work/GPy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Indexable.unset_priors:1: WARNING: Inline emphasis start-string without end-string.
-/home/alans/Work/GPy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Nameable.hierarchy_name:4: WARNING: Field list ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Observable.notify_observers:5: SEVERE: Unexpected section title or transition.
-
-^^^^^^^^^^^^^^^^
-/home/alans/Work/GPy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Observable.notify_observers:6: WARNING: Definition list ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Parameterizable.traverse:1: WARNING: Inline emphasis start-string without end-string.
-/home/alans/Work/GPy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Parameterizable.traverse:1: WARNING: Inline strong start-string without end-string.
-/home/alans/Work/GPy/GPy/core/parameterization/parameterized.py:docstring of GPy.core.parameterization.parameterized.Parameterized:18: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/GPy/core/parameterization/parameterized.py:docstring of GPy.core.parameterization.parameterized.Parameterized:20: WARNING: Block quote ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/core/parameterization/ties_and_remappings.py:docstring of GPy.core.parameterization.ties_and_remappings.Tie:18: SEVERE: Unexpected section title or transition.
+/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Indexable.unset_priors:1: WARNING: Inline emphasis start-string without end-string.
+/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Nameable.hierarchy_name:4: WARNING: Field list ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Parameterizable.traverse:1: WARNING: Inline emphasis start-string without end-string.
+/home/maxz/Documents/gpy/GPy/core/parameterization/parameter_core.py:docstring of GPy.core.parameterization.parameter_core.Parameterizable.traverse:1: WARNING: Inline strong start-string without end-string.
+/home/maxz/Documents/gpy/GPy/core/parameterization/parameterized.py:docstring of GPy.core.parameterization.parameterized.Parameterized:18: ERROR: Unexpected indentation.
+/home/maxz/Documents/gpy/GPy/core/parameterization/parameterized.py:docstring of GPy.core.parameterization.parameterized.Parameterized:20: WARNING: Block quote ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/core/parameterization/ties_and_remappings.py:docstring of GPy.core.parameterization.ties_and_remappings.Tie:18: SEVERE: Unexpected section title or transition.
================================
-/home/alans/Work/GPy/GPy/kern/_src/coregionalize.py:docstring of GPy.kern._src.coregionalize.Coregionalize:5: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/doc/GPy.kern._src.rst:73: WARNING: autodoc: failed to import module u'GPy.kern._src.hierarchical'; the following exception was raised:
+/home/maxz/Documents/gpy/doc/GPy.examples.rst:50: WARNING: autodoc: failed to import module u'GPy.examples.stochastic'; the following exception was raised:
Traceback (most recent call last):
- File "/home/alans/anaconda/envs/GPy/lib/python2.7/site-packages/sphinx/ext/autodoc.py", line 335, in import_object
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
__import__(self.modname)
- File "/home/alans/Work/GPy/GPy/kern/_src/hierarchical.py", line 4, in
- from kernpart import Kernpart
-ImportError: No module named kernpart
-/home/alans/Work/GPy/GPy/kern/_src/independent_outputs.py:docstring of GPy.kern._src.independent_outputs.IndependentOutputs:9: WARNING: Field list ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:12: WARNING: Block quote ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:22: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:23: WARNING: Block quote ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/doc/GPy.kern._src.rst:177: WARNING: autodoc: failed to import module u'GPy.kern._src.symbolic'; the following exception was raised:
+ImportError: No module named stochastic
+/home/maxz/Documents/gpy/doc/GPy.examples.rst:58: WARNING: autodoc: failed to import module u'GPy.examples.tutorials'; the following exception was raised:
Traceback (most recent call last):
- File "/home/alans/anaconda/envs/GPy/lib/python2.7/site-packages/sphinx/ext/autodoc.py", line 335, in import_object
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
__import__(self.modname)
- File "/home/alans/Work/GPy/GPy/kern/_src/symbolic.py", line 5, in
+ImportError: No module named tutorials
+/home/maxz/Documents/gpy/doc/GPy.inference.latent_function_inference.rst:82: WARNING: autodoc: failed to import module u'GPy.inference.latent_function_inference.var_dtc_gpu'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ImportError: No module named var_dtc_gpu
+/home/maxz/Documents/gpy/doc/GPy.inference.optimization.rst:42: WARNING: autodoc: failed to import module u'GPy.inference.optimization.sgd'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ImportError: No module named sgd
+/home/maxz/Documents/gpy/GPy/kern/_src/coregionalize.py:docstring of GPy.kern._src.coregionalize.Coregionalize:5: ERROR: Unexpected indentation.
+/home/maxz/Documents/gpy/doc/GPy.kern._src.rst:73: WARNING: autodoc: failed to import module u'GPy.kern._src.hierarchical'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ImportError: No module named hierarchical
+/home/maxz/Documents/gpy/GPy/kern/_src/independent_outputs.py:docstring of GPy.kern._src.independent_outputs.IndependentOutputs:9: WARNING: Field list ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:24: ERROR: Unexpected indentation.
+/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:22: WARNING: Inline literal start-string without end-string.
+/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:25: WARNING: Block quote ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:27: WARNING: Definition list ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:27: WARNING: Inline literal start-string without end-string.
+/home/maxz/Documents/gpy/GPy/kern/_src/stationary.py:docstring of GPy.kern._src.stationary.Stationary:27: WARNING: Inline interpreted text or phrase reference start-string without end-string.
+/home/maxz/Documents/gpy/doc/GPy.kern._src.rst:177: WARNING: autodoc: failed to import module u'GPy.kern._src.symbolic'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ File "/home/maxz/Documents/gpy/GPy/kern/_src/symbolic.py", line 5, in
from ...core.symbolic import Symbolic_core
- File "/home/alans/Work/GPy/GPy/core/symbolic.py", line 10, in
+ File "/home/maxz/Documents/gpy/GPy/core/symbolic.py", line 10, in
from sympy.utilities.lambdify import lambdastr, _imp_namespace, _get_namespace
ImportError: No module named lambdify
-/home/alans/Work/GPy/GPy/models/gp_kronecker_gaussian_regression.py:docstring of GPy.models.gp_kronecker_gaussian_regression.GPKroneckerGaussianRegression:13: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/GPy/models/gp_kronecker_gaussian_regression.py:docstring of GPy.models.gp_kronecker_gaussian_regression.GPKroneckerGaussianRegression:18: WARNING: Block quote ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/models/gp_var_gauss.py:docstring of GPy.models.gp_var_gauss.GPVariationalGaussianApproximation:9: WARNING: Definition list ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:32: WARNING: Field list ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:32: WARNING: Inline interpreted text or phrase reference start-string without end-string.
-/home/alans/Work/GPy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:34: WARNING: Definition list ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/models/sparse_gp_minibatch.py:docstring of GPy.models.sparse_gp_minibatch.SparseGPMiniBatch:2: WARNING: Block quote ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/plotting/matplot_dep/netpbmfile.py:docstring of GPy.plotting.matplot_dep.netpbmfile.imread:6: SEVERE: Unexpected section title.
+/home/maxz/Documents/gpy/GPy/models/gp_kronecker_gaussian_regression.py:docstring of GPy.models.gp_kronecker_gaussian_regression.GPKroneckerGaussianRegression:13: ERROR: Unexpected indentation.
+/home/maxz/Documents/gpy/GPy/models/gp_kronecker_gaussian_regression.py:docstring of GPy.models.gp_kronecker_gaussian_regression.GPKroneckerGaussianRegression:18: WARNING: Block quote ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/doc/GPy.models.rst:66: WARNING: autodoc: failed to import module u'GPy.models.gp_multioutput_regression'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ImportError: No module named gp_multioutput_regression
+/home/maxz/Documents/gpy/GPy/models/gp_var_gauss.py:docstring of GPy.models.gp_var_gauss.GPVariationalGaussianApproximation:9: WARNING: Definition list ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:32: WARNING: Field list ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:32: WARNING: Inline interpreted text or phrase reference start-string without end-string.
+/home/maxz/Documents/gpy/GPy/models/mrd.py:docstring of GPy.models.mrd.MRD:34: WARNING: Definition list ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/doc/GPy.models.rst:138: WARNING: autodoc: failed to import module u'GPy.models.sparse_gp_multioutput_regression'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ImportError: No module named sparse_gp_multioutput_regression
+/home/maxz/Documents/gpy/doc/GPy.models.rst:178: WARNING: autodoc: failed to import module u'GPy.models.svigp_regression'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ImportError: No module named svigp_regression
+/home/maxz/Documents/gpy/GPy/plotting/matplot_dep/netpbmfile.py:docstring of GPy.plotting.matplot_dep.netpbmfile.imread:6: SEVERE: Unexpected section title.
Examples
--------
-/home/alans/Work/GPy/GPy/plotting/matplot_dep/netpbmfile.py:docstring of GPy.plotting.matplot_dep.netpbmfile.imsave:4: SEVERE: Unexpected section title.
+/home/maxz/Documents/gpy/GPy/plotting/matplot_dep/netpbmfile.py:docstring of GPy.plotting.matplot_dep.netpbmfile.imsave:4: SEVERE: Unexpected section title.
Examples
--------
-/home/alans/Work/GPy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_checkgrad:6: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_checkgrad:7: WARNING: Block quote ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_partial:7: WARNING: Definition list ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_partial:9: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/GPy/util/datasets.py:docstring of GPy.util.datasets.hapmap3:7: WARNING: Block quote ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/doc/GPy.util.rst:2: SEVERE: Duplicate ID: "module-GPy.util.diag".
-/home/alans/Work/GPy/GPy/util/netpbmfile.py:docstring of GPy.util.netpbmfile.imread:6: SEVERE: Unexpected section title.
+/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_checkgrad:6: ERROR: Unexpected indentation.
+/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_checkgrad:7: WARNING: Block quote ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_partial:7: WARNING: Definition list ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/testing/likelihood_tests.py:docstring of GPy.testing.likelihood_tests.dparam_partial:9: ERROR: Unexpected indentation.
+docstring of GPy.util.datasets.hapmap3:7: WARNING: Block quote ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/doc/GPy.util.rst:74: WARNING: autodoc: failed to import module u'GPy.util.erfcx'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ImportError: No module named erfcx
+/home/maxz/Documents/gpy/doc/GPy.util.rst:146: WARNING: autodoc: failed to import module u'GPy.util.mpi'; the following exception was raised:
+Traceback (most recent call last):
+ File "/home/maxz/anaconda/lib/python2.7/site-packages/Sphinx-1.3.1-py2.7.egg/sphinx/ext/autodoc.py", line 385, in import_object
+ __import__(self.modname)
+ImportError: No module named mpi
+/home/maxz/Documents/gpy/GPy/util/netpbmfile.py:docstring of GPy.util.netpbmfile.imread:6: SEVERE: Unexpected section title.
Examples
--------
-/home/alans/Work/GPy/GPy/util/netpbmfile.py:docstring of GPy.util.netpbmfile.imsave:4: SEVERE: Unexpected section title.
+/home/maxz/Documents/gpy/GPy/util/netpbmfile.py:docstring of GPy.util.netpbmfile.imsave:4: SEVERE: Unexpected section title.
Examples
--------
-/home/alans/Work/GPy/doc/GPy.util.rst:2: SEVERE: Duplicate ID: "module-GPy.util.subarray_and_sorting".
-/home/alans/Work/GPy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:8: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:11: SEVERE: Unexpected section title.
+/home/maxz/Documents/gpy/doc/GPy.util.rst:2: SEVERE: Duplicate ID: "module-GPy.util.subarray_and_sorting".
+/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:8: ERROR: Unexpected indentation.
+/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:11: SEVERE: Unexpected section title.
Examples:
=========
-/home/alans/Work/GPy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:19: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:21: WARNING: Block quote ends without a blank line; unexpected unindent.
-/home/alans/Work/GPy/doc/installation.rst:22: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/doc/installation.rst:27: ERROR: Unexpected indentation.
-/home/alans/Work/GPy/doc/tuto_creating_new_kernels.rst:58: WARNING: Inline literal start-string without end-string.
-/home/alans/Work/GPy/doc/tuto_creating_new_models.rst:24: ERROR: Unknown target name: "parameterized".
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:83: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:19: ERROR: Unexpected indentation.
+/home/maxz/Documents/gpy/GPy/util/subarray_and_sorting.py:docstring of GPy.util.subarray_and_sorting.common_subarrays:21: WARNING: Block quote ends without a blank line; unexpected unindent.
+/home/maxz/Documents/gpy/GPy/util/block_matrices.py:docstring of GPy.util.block_matrices.block_dot:3: ERROR: Undefined substitution referenced: "A11.B11|B12.B12".
+/home/maxz/Documents/gpy/GPy/util/block_matrices.py:docstring of GPy.util.block_matrices.block_dot:3: ERROR: Undefined substitution referenced: "A21.B21|A22.B22".
+/home/maxz/Documents/gpy/doc/installation.rst:22: ERROR: Unexpected indentation.
+/home/maxz/Documents/gpy/doc/installation.rst:27: ERROR: Unexpected indentation.
+/home/maxz/Documents/gpy/doc/tuto_creating_new_kernels.rst:58: WARNING: Inline literal start-string without end-string.
+/home/maxz/Documents/gpy/doc/tuto_creating_new_models.rst:24: ERROR: Unknown target name: "parameterized".
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:83: WARNING: Title underline too short.
Interacting with Parameters:
=======================
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:83: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:83: WARNING: Title underline too short.
Interacting with Parameters:
=======================
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:109: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:109: WARNING: Title underline too short.
Regular expressions
----------------
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:164: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:164: WARNING: Title underline too short.
Setting and fetching parameters `parameter_array`
------------------------------------------
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:164: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:164: WARNING: Title underline too short.
Setting and fetching parameters `parameter_array`
------------------------------------------
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:220: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:220: WARNING: Title underline too short.
Getting the model parameter's gradients
============================
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:220: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:220: WARNING: Title underline too short.
Getting the model parameter's gradients
============================
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:236: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:236: WARNING: Title underline too short.
Adjusting the model's constraints
================================
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:236: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:236: WARNING: Title underline too short.
Adjusting the model's constraints
================================
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:287: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:287: WARNING: Title underline too short.
Available Constraints
==============
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:287: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:287: WARNING: Title underline too short.
Available Constraints
==============
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:299: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:299: WARNING: Title underline too short.
Tying Parameters
============
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:299: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:299: WARNING: Title underline too short.
Tying Parameters
============
-/home/alans/Work/GPy/doc/tuto_parameterized.rst:3: WARNING: Title overline too short.
+/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:3: WARNING: Title overline too short.
*******************
Parameterization handling
*******************
-/home/alans/Work/GPy/doc/tuto_parameterized.rst:10: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:10: WARNING: Title underline too short.
Parameter handles
==============
-/home/alans/Work/GPy/doc/tuto_parameterized.rst:16: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:16: WARNING: Title underline too short.
:py:class:`~GPy.core.parameterization.parameterized.Parameterized`
==========
-/home/alans/Work/GPy/doc/tuto_parameterized.rst:16: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:16: WARNING: Title underline too short.
:py:class:`~GPy.core.parameterization.parameterized.Parameterized`
==========
-/home/alans/Work/GPy/doc/tuto_parameterized.rst:21: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:21: WARNING: Title underline too short.
:py:class:`~GPy.core.parameterization.param.Param`
===========
-/home/alans/Work/GPy/doc/tuto_parameterized.rst:21: WARNING: Title underline too short.
+/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:21: WARNING: Title underline too short.
:py:class:`~GPy.core.parameterization.param.Param`
===========
-/home/alans/Work/GPy/doc/installation.rst:: WARNING: document isn't included in any toctree
-/home/alans/Work/GPy/doc/kernel_implementation.rst:: WARNING: document isn't included in any toctree
-/home/alans/Work/GPy/doc/modules.rst:: WARNING: document isn't included in any toctree
-/home/alans/Work/GPy/doc/tuto_GP_regression.rst:: WARNING: document isn't included in any toctree
-/home/alans/Work/GPy/doc/tuto_creating_new_kernels.rst:: WARNING: document isn't included in any toctree
-/home/alans/Work/GPy/doc/tuto_creating_new_models.rst:: WARNING: document isn't included in any toctree
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:: WARNING: document isn't included in any toctree
-/home/alans/Work/GPy/doc/tuto_kernel_overview.rst:: WARNING: document isn't included in any toctree
-/home/alans/Work/GPy/doc/tuto_parameterized.rst:: WARNING: document isn't included in any toctree
-WARNING: dvipng command 'dvipng' cannot be run (needed for math display), check the pngmath_dvipng setting
-/home/alans/Work/GPy/doc/tuto_interacting_with_models.rst:336: WARNING: undefined label: creating_new_kernels (if the link has no caption the label must precede a section header)
-WARNING: html_static_path entry u'/home/alans/Work/GPy/doc/_static' does not exist
+/home/maxz/Documents/gpy/doc/installation.rst:: WARNING: document isn't included in any toctree
+/home/maxz/Documents/gpy/doc/kernel_implementation.rst:: WARNING: document isn't included in any toctree
+/home/maxz/Documents/gpy/doc/modules.rst:: WARNING: document isn't included in any toctree
+/home/maxz/Documents/gpy/doc/tuto_GP_regression.rst:: WARNING: document isn't included in any toctree
+/home/maxz/Documents/gpy/doc/tuto_creating_new_kernels.rst:: WARNING: document isn't included in any toctree
+/home/maxz/Documents/gpy/doc/tuto_creating_new_models.rst:: WARNING: document isn't included in any toctree
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:: WARNING: document isn't included in any toctree
+/home/maxz/Documents/gpy/doc/tuto_kernel_overview.rst:: WARNING: document isn't included in any toctree
+/home/maxz/Documents/gpy/doc/tuto_parameterized.rst:: WARNING: document isn't included in any toctree
+/home/maxz/Documents/gpy/doc/tuto_interacting_with_models.rst:336: WARNING: undefined label: creating_new_kernels (if the link has no caption the label must precede a section header)