Fixed docstring warnings - could still be mistakes

This commit is contained in:
James McMurray 2013-09-20 13:38:20 +01:00
parent c36a6b341c
commit be3880c0bd
20 changed files with 261 additions and 144 deletions

View file

@ -17,6 +17,7 @@ def rbf_inv(input_dim,variance=1., inv_lengthscale=None,ARD=False):
:type lengthscale: float
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
:type ARD: Boolean
"""
part = parts.rbf_inv.RBFInv(input_dim,variance,inv_lengthscale,ARD)
return kern(input_dim, [part])
@ -33,6 +34,7 @@ def rbf(input_dim,variance=1., lengthscale=None,ARD=False):
:type lengthscale: float
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
:type ARD: Boolean
"""
part = parts.rbf.RBF(input_dim,variance,lengthscale,ARD)
return kern(input_dim, [part])
@ -41,11 +43,13 @@ def linear(input_dim,variances=None,ARD=False):
"""
Construct a linear kernel.
Arguments
---------
input_dimD (int), obligatory
variances (np.ndarray)
ARD (boolean)
:param input_dim: dimensionality of the kernel, obligatory
:type input_dim: int
:param variances:
:type variances: np.ndarray
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
:type ARD: Boolean
"""
part = parts.linear.Linear(input_dim,variances,ARD)
return kern(input_dim, [part])
@ -64,12 +68,14 @@ def mlp(input_dim,variance=1., weight_variance=None,bias_variance=100.,ARD=False
:type bias_variance: float
:param ARD: Auto Relevance Determination (allows for ARD version of covariance)
:type ARD: Boolean
"""
part = parts.mlp.MLP(input_dim,variance,weight_variance,bias_variance,ARD)
return kern(input_dim, [part])
def gibbs(input_dim,variance=1., mapping=None):
"""
Gibbs and MacKay non-stationary covariance function.
.. math::
@ -124,6 +130,7 @@ def poly(input_dim,variance=1., weight_variance=None,bias_variance=1.,degree=2,
:type degree: int
:param ARD: Auto Relevance Determination (allows for ARD version of covariance)
:type ARD: Boolean
"""
part = parts.poly.POLY(input_dim,variance,weight_variance,bias_variance,degree,ARD)
return kern(input_dim, [part])
@ -132,10 +139,11 @@ def white(input_dim,variance=1.):
"""
Construct a white kernel.
Arguments
---------
input_dimD (int), obligatory
variance (float)
:param input_dim: dimensionality of the kernel, obligatory
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
part = parts.white.White(input_dim,variance)
return kern(input_dim, [part])
@ -153,6 +161,7 @@ def exponential(input_dim,variance=1., lengthscale=None, ARD=False):
:type lengthscale: float
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
:type ARD: Boolean
"""
part = parts.exponential.Exponential(input_dim,variance, lengthscale, ARD)
return kern(input_dim, [part])
@ -169,6 +178,7 @@ def Matern32(input_dim,variance=1., lengthscale=None, ARD=False):
:type lengthscale: float
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
:type ARD: Boolean
"""
part = parts.Matern32.Matern32(input_dim,variance, lengthscale, ARD)
return kern(input_dim, [part])
@ -185,6 +195,7 @@ def Matern52(input_dim, variance=1., lengthscale=None, ARD=False):
:type lengthscale: float
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
:type ARD: Boolean
"""
part = parts.Matern52.Matern52(input_dim, variance, lengthscale, ARD)
return kern(input_dim, [part])
@ -193,10 +204,11 @@ def bias(input_dim, variance=1.):
"""
Construct a bias kernel.
Arguments
---------
input_dim (int), obligatory
variance (float)
:param input_dim: dimensionality of the kernel, obligatory
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
part = parts.bias.Bias(input_dim, variance)
return kern(input_dim, [part])
@ -204,10 +216,15 @@ def bias(input_dim, variance=1.):
def finite_dimensional(input_dim, F, G, variances=1., weights=None):
"""
Construct a finite dimensional kernel.
input_dim: int - the number of input dimensions
F: np.array of functions with shape (n,) - the n basis functions
G: np.array with shape (n,n) - the Gram matrix associated to F
variances : np.ndarray with shape (n,)
:param input_dim: the number of input dimensions
:type input_dim: int
:param F: np.array of functions with shape (n,) - the n basis functions
:type F: np.array
:param G: np.array with shape (n,n) - the Gram matrix associated to F
:type G: np.array
:param variances: np.ndarray with shape (n,)
:type: np.ndarray
"""
part = parts.finite_dimensional.FiniteDimensional(input_dim, F, G, variances, weights)
return kern(input_dim, [part])
@ -220,6 +237,7 @@ def spline(input_dim, variance=1.):
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
part = parts.spline.Spline(input_dim, variance)
return kern(input_dim, [part])
@ -232,6 +250,7 @@ def Brownian(input_dim, variance=1.):
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
part = parts.Brownian.Brownian(input_dim, variance)
return kern(input_dim, [part])
@ -285,6 +304,7 @@ def periodic_exponential(input_dim=1, variance=1., lengthscale=None, period=2 *
:type period: float
:param n_freq: the number of frequencies considered for the periodic subspace
:type n_freq: int
"""
part = parts.periodic_exponential.PeriodicExponential(input_dim, variance, lengthscale, period, n_freq, lower, upper)
return kern(input_dim, [part])
@ -303,6 +323,7 @@ def periodic_Matern32(input_dim, variance=1., lengthscale=None, period=2 * np.pi
:type period: float
:param n_freq: the number of frequencies considered for the periodic subspace
:type n_freq: int
"""
part = parts.periodic_Matern32.PeriodicMatern32(input_dim, variance, lengthscale, period, n_freq, lower, upper)
return kern(input_dim, [part])
@ -321,6 +342,7 @@ def periodic_Matern52(input_dim, variance=1., lengthscale=None, period=2 * np.pi
:type period: float
:param n_freq: the number of frequencies considered for the periodic subspace
:type n_freq: int
"""
part = parts.periodic_Matern52.PeriodicMatern52(input_dim, variance, lengthscale, period, n_freq, lower, upper)
return kern(input_dim, [part])
@ -334,6 +356,7 @@ def prod(k1,k2,tensor=False):
:param tensor: The kernels are either multiply as functions defined on the same input space (default) or on the product of the input spaces
:type tensor: Boolean
:rtype: kernel object
"""
part = parts.prod.Prod(k1, k2, tensor)
return kern(part.input_dim, [part])
@ -349,10 +372,12 @@ def symmetric(k):
def coregionalize(num_outputs,W_columns=1, W=None, kappa=None):
"""
Coregionlization matrix B, of the form:
.. math::
\mathbf{B} = \mathbf{W}\mathbf{W}^\top + kappa \mathbf{I}
An intrinsic/linear coregionalization kernel of the form
An intrinsic/linear coregionalization kernel of the form:
.. math::
k_2(x, y)=\mathbf{B} k(x, y)
@ -422,7 +447,7 @@ def independent_outputs(k):
def hierarchical(k):
"""
TODO THis can't be right! Construct a kernel with independent outputs from an existing kernel
TODO This can't be right! Construct a kernel with independent outputs from an existing kernel
"""
# for sl in k.input_slices:
# assert (sl.start is None) and (sl.stop is None), "cannot adjust input slices! (TODO)"
@ -440,7 +465,8 @@ def build_lcm(input_dim, num_outputs, kernel_list = [], W_columns=1,W=None,kappa
:param W_columns: number tuples of the corregionalization parameters 'coregion_W'
:type W_columns: integer
..Note the kernels dimensionality is overwritten to fit input_dim
..note the kernels dimensionality is overwritten to fit input_dim
"""
for k in kernel_list:

View file

@ -78,13 +78,15 @@ class kern(Parameterized):
def plot_ARD(self, fignum=None, ax=None, title='', legend=False):
"""If an ARD kernel is present, it bar-plots the ARD parameters,
"""If an ARD kernel is present, it bar-plots the ARD parameters.
:param fignum: figure number of the plot
:param ax: matplotlib axis to plot on
:param title:
title of the plot,
pass '' to not print a title
pass None for a generic title
"""
if ax is None:
fig = pb.figure(fignum)
@ -175,8 +177,10 @@ class kern(Parameterized):
def add(self, other, tensor=False):
"""
Add another kernel to this one. Both kernels are defined on the same _space_
:param other: the other kernel to be added
:type other: GPy.kern
"""
if tensor:
D = self.input_dim + other.input_dim
@ -223,6 +227,7 @@ class kern(Parameterized):
:type other: GPy.kern
:param tensor: whether or not to use the tensor space (default is false).
:type tensor: bool
"""
K1 = self.copy()
K2 = other.copy()
@ -321,6 +326,7 @@ class kern(Parameterized):
:type X: np.ndarray (num_samples x input_dim)
:param X2: Observed data inputs (optional, defaults to X)
:type X2: np.ndarray (num_inducing x input_dim)
"""
assert X.shape[1] == self.input_dim
target = np.zeros(self.num_params)
@ -340,6 +346,7 @@ class kern(Parameterized):
:type X: np.ndarray (num_samples x input_dim)
:param X2: Observed data inputs (optional, defaults to X)
:type X2: np.ndarray (num_inducing x input_dim)"""
target = np.zeros_like(X)
if X2 is None:
[p.dK_dX(dL_dK, X[:, i_s], None, target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)]
@ -413,6 +420,7 @@ class kern(Parameterized):
:param Z: np.ndarray of inducing inputs (num_inducing x input_dim)
:param mu, S: np.ndarrays of means and variances (each num_samples x input_dim)
:returns psi2: np.ndarray (num_samples,num_inducing,num_inducing)
"""
target = np.zeros((mu.shape[0], Z.shape[0], Z.shape[0]))
[p.psi2(Z[:, i_s], mu[:, i_s], S[:, i_s], target) for p, i_s in zip(self.parts, self.input_slices)]
@ -657,6 +665,7 @@ def kern_test(kern, X=None, X2=None, verbose=False):
:type X: ndarray
:param X2: X2 input values to test the covariance function.
:type X2: ndarray
"""
pass_checks = True
if X==None:

View file

@ -11,12 +11,14 @@ class Coregionalize(Kernpart):
"""
Covariance function for intrinsic/linear coregionalization models
This covariance has the form
This covariance has the form:
.. math::
\mathbf{B} = \mathbf{W}\mathbf{W}^\top + kappa \mathbf{I}
An intrinsic/linear coregionalization covariance function of the form
An intrinsic/linear coregionalization covariance function of the form:
.. math::
k_2(x, y)=\mathbf{B} k(x, y)
it is obtained as the tensor product between a covariance function
@ -31,7 +33,7 @@ class Coregionalize(Kernpart):
:param kappa: a vector which allows the outputs to behave independently
:type kappa: numpy array of dimensionality (num_outputs,)
.. Note: see coregionalization examples in GPy.examples.regression for some usage.
.. note: see coregionalization examples in GPy.examples.regression for some usage.
"""
def __init__(self,num_outputs,W_columns=1, W=None, kappa=None):
self.input_dim = 1

View file

@ -7,11 +7,13 @@ four_over_tau = 2./np.pi
class MLP(Kernpart):
"""
multi layer perceptron kernel (also known as arc sine kernel or neural network kernel)
Multi layer perceptron kernel (also known as arc sine kernel or neural network kernel)
.. math::
k(x,y) = \sigma^2 \frac{2}{\pi} \text{asin} \left(\frac{\sigma_w^2 x^\top y+\sigma_b^2}{\sqrt{\sigma_w^2x^\top x + \sigma_b^2 + 1}\sqrt{\sigma_w^2 y^\top y \sigma_b^2 +1}} \right)
k(x,y) = \\sigma^{2}\\frac{2}{\\pi } \\text{asin} \\left ( \\frac{ \\sigma_w^2 x^\\top y+\\sigma_b^2}{\\sqrt{\\sigma_w^2x^\\top x + \\sigma_b^2 + 1}\\sqrt{\\sigma_w^2 y^\\top y \\sigma_b^2 +1}} \\right )
:param input_dim: the number of input dimensions
:type input_dim: int
@ -24,6 +26,7 @@ class MLP(Kernpart):
:type ARD: Boolean
:rtype: Kernpart object
"""
def __init__(self, input_dim, variance=1., weight_variance=None, bias_variance=100., ARD=False):

View file

@ -7,19 +7,20 @@ four_over_tau = 2./np.pi
class POLY(Kernpart):
"""
polynomial kernel parameter initialisation. Included for completeness, but generally not recommended, is the polynomial kernel,
.. math::
k(x, y) = \sigma^2*(\sigma_w^2 x'y+\sigma_b^b)^d
The kernel parameters are \sigma^2 (variance), \sigma^2_w
(weight_variance), \sigma^2_b (bias_variance) and d
Polynomial kernel parameter initialisation. Included for completeness, but generally not recommended, is the polynomial kernel:
.. math::
k(x, y) = \sigma^2\*(\sigma_w^2 x'y+\sigma_b^b)^d
The kernel parameters are :math:`\sigma^2` (variance), :math:`\sigma^2_w`
(weight_variance), :math:`\sigma^2_b` (bias_variance) and d
(degree). Only gradients of the first three are provided for
kernel optimisation, it is assumed that polynomial degree would
be set by hand.
The kernel is not recommended as it is badly behaved when the
\sigma^2_w*x'*y + \sigma^2_b has a magnitude greater than one. For completeness
:math:`\sigma^2_w\*x'\*y + \sigma^2_b` has a magnitude greater than one. For completeness
there is an automatic relevance determination version of this
kernel provided.
@ -32,7 +33,7 @@ class POLY(Kernpart):
:param bias_variance: the variance of the prior over bias parameters :math:`\sigma^2_b`
:param degree: the degree of the polynomial.
:type degree: int
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one weight variance parameter \sigma^2_w), otherwise there is one weight variance parameter per dimension.
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one weight variance parameter :math:`\sigma^2_w`), otherwise there is one weight variance parameter per dimension.
:type ARD: Boolean
:rtype: Kernpart object