maint: Wrap very long lines (> 450 chars)

This commit is contained in:
Julien Bect 2020-06-24 06:39:40 +02:00 committed by Neil Lawrence
parent 1d549ca5c6
commit d754bc12de
5 changed files with 58 additions and 15 deletions

View file

@ -434,7 +434,14 @@ class CombinationKernel(Kern):
[setitem(i_s, (i, k._all_dims_active), k.input_sensitivity(summarize)) for i, k in enumerate(parts)]
return i_s
else:
raise NotImplementedError("Choose the kernel you want to get the sensitivity for. You need to override the default behaviour for getting the input sensitivity to be able to get the input sensitivity. For sum kernel it is the sum of all sensitivities, TODO: product kernel? Other kernels?, also TODO: shall we return all the sensitivities here in the combination kernel? So we can combine them however we want? This could lead to just plot all the sensitivities here...")
raise NotImplementedError("Choose the kernel you want to get the sensitivity for. "
"You need to override the default behaviour for getting "
"the input sensitivity to be able to get the input sensitivity. "
"For sum kernel it is the sum of all sensitivities, "
"TODO: product kernel? Other kernels?, also "
"TODO: shall we return all the sensitivities here in the combination "
"kernel? So we can combine them however we want? "
"This could lead to just plot all the sensitivities here...")
def _check_active_dims(self, X):
return

View file

@ -1,13 +1,24 @@
"""
Introduction
"""Introduction
^^^^^^^^^^^^
The likelihood is :math:`p(y|f,X)` which is how well we will predict target values given inputs :math:`X` and our latent function :math:`f` (:math:`y` without noise). Marginal likelihood :math:`p(y|X)`, is the same as likelihood except we marginalize out the model :math:`f`. The importance of likelihoods in Gaussian Processes is in determining the 'best' values of kernel and noise hyperparamters to relate known, observed and unobserved data. The purpose of optimizing a model (e.g. :py:class:`GPy.models.GPRegression`) is to determine the 'best' hyperparameters i.e. those that minimize negative log marginal likelihood.
The likelihood is :math:`p(y|f,X)` which is how well we will predict
target values given inputs :math:`X` and our latent function :math:`f`
(:math:`y` without noise). Marginal likelihood :math:`p(y|X)`, is the
same as likelihood except we marginalize out the model :math:`f`. The
importance of likelihoods in Gaussian Processes is in determining the
'best' values of kernel and noise hyperparamters to relate known,
observed and unobserved data. The purpose of optimizing a model
(e.g. :py:class:`GPy.models.GPRegression`) is to determine the 'best'
hyperparameters i.e. those that minimize negative log marginal
likelihood.
.. inheritance-diagram:: GPy.likelihoods.likelihood GPy.likelihoods.mixed_noise.MixedNoise
:top-classes: GPy.core.parameterization.parameterized.Parameterized
Most likelihood classes inherit directly from :py:class:`GPy.likelihoods.likelihood`, although an intermediary class :py:class:`GPy.likelihoods.mixed_noise.MixedNoise` is used by :py:class:`GPy.likelihoods.multioutput_likelihood`.
Most likelihood classes inherit directly from
:py:class:`GPy.likelihoods.likelihood`, although an intermediary class
:py:class:`GPy.likelihoods.mixed_noise.MixedNoise` is used by
:py:class:`GPy.likelihoods.multioutput_likelihood`.
"""
@ -22,4 +33,4 @@ from .mixed_noise import MixedNoise
from .binomial import Binomial
from .weibull import Weibull
from .loglogistic import LogLogistic
from .multioutput_likelihood import MultioutputLikelihood
from .multioutput_likelihood import MultioutputLikelihood

View file

@ -13,10 +13,18 @@ from ..util.linalg import tdot
from .sparse_gp_regression_md import SparseGPRegressionMD
class GPMultioutRegressionMD(SparseGP):
"""
Gaussian Process model for multi-output regression with missing data
"""Gaussian Process model for multi-output regression with missing data
This is an implementation of Latent Variable Multiple Output Gaussian Processes (LVMOGP) in [Dai_et_al_2017]_. This model targets at the use case, in which each output dimension is observed at a different set of inputs. The model takes a different data format: the inputs and outputs observations of all the output dimensions are stacked together correspondingly into two matrices. An extra array is used to indicate the index of output dimension for each data point. The output dimensions are indexed using integers from 0 to D-1 assuming there are D output dimensions.
This is an implementation of Latent Variable Multiple Output
Gaussian Processes (LVMOGP) in [Dai_et_al_2017]_. This model
targets at the use case, in which each output dimension is
observed at a different set of inputs. The model takes a different
data format: the inputs and outputs observations of all the output
dimensions are stacked together correspondingly into two
matrices. An extra array is used to indicate the index of output
dimension for each data point. The output dimensions are indexed
using integers from 0 to D-1 assuming there are D output
dimensions.
.. rubric:: References

View file

@ -10,10 +10,18 @@ from ..inference.latent_function_inference.vardtc_md import VarDTC_MD
from GPy.core.parameterization.variational import NormalPosterior
class SparseGPRegressionMD(SparseGP_MPI):
"""
Sparse Gaussian Process Regression with Missing Data
"""Sparse Gaussian Process Regression with Missing Data
This model targets at the use case, in which there are multiple output dimensions (different dimensions are assumed to be independent following the same GP prior) and each output dimension is observed at a different set of inputs. The model takes a different data format: the inputs and outputs observations of all the output dimensions are stacked together correspondingly into two matrices. An extra array is used to indicate the index of output dimension for each data point. The output dimensions are indexed using integers from 0 to D-1 assuming there are D output dimensions.
This model targets at the use case, in which there are multiple
output dimensions (different dimensions are assumed to be
independent following the same GP prior) and each output dimension
is observed at a different set of inputs. The model takes a
different data format: the inputs and outputs observations of all
the output dimensions are stacked together correspondingly into
two matrices. An extra array is used to indicate the index of
output dimension for each data point. The output dimensions are
indexed using integers from 0 to D-1 assuming there are D output
dimensions.
:param X: input observations.
:type X: numpy.ndarray
@ -29,6 +37,7 @@ class SparseGPRegressionMD(SparseGP_MPI):
:type num_inducing: (int, int)
:param boolean individual_Y_noise: whether individual output dimensions have their own noise variance or not, boolean
:param str name: the name of the model
"""
def __init__(self, X, Y, indexD, kernel=None, Z=None, num_inducing=10, normalizer=None, mpi_comm=None, individual_Y_noise=False, name='sparse_gp'):

View file

@ -1,8 +1,16 @@
"""
Introduction
"""Introduction
^^^^^^^^^^^^
:py:class:`GPy.plotting` effectively extends models based on :py:class:`GPy.core.gp.GP` (and other classes) by adding methods to plot useful charts. 'matplotlib', 'plotly' (online) and 'plotly' (offline) are supported. The methods in :py:class:`GPy.plotting` (and child classes :py:class:`GPy.plotting.gpy_plot` and :py:class:`GPy.plotting.matplot_dep`) are not intended to be called directly, but rather are 'injected' into other classes (notably :py:class:`GPy.core.gp.GP`). Documentation describing plots is best found associated with the model being plotted e.g. :py:class:`GPy.core.gp.GP.plot_confidence`.
:py:class:`GPy.plotting` effectively extends models based on
:py:class:`GPy.core.gp.GP` (and other classes) by adding methods to
plot useful charts. 'matplotlib', 'plotly' (online) and 'plotly'
(offline) are supported. The methods in :py:class:`GPy.plotting` (and
child classes :py:class:`GPy.plotting.gpy_plot` and
:py:class:`GPy.plotting.matplot_dep`) are not intended to be called
directly, but rather are 'injected' into other classes (notably
:py:class:`GPy.core.gp.GP`). Documentation describing plots is best
found associated with the model being plotted
e.g. :py:class:`GPy.core.gp.GP.plot_confidence`.
"""