Datasets.py updates should have been committed before.

This commit is contained in:
Neil Lawrence 2014-06-05 18:49:15 +01:00
parent 3003718ea9
commit ec1b9190f5
6 changed files with 0 additions and 792 deletions

View file

@ -1,14 +0,0 @@
from _src.kern import Kern
from _src.rbf import RBF
from _src.linear import Linear, LinearFull
from _src.static import Bias, White
from _src.brownian import Brownian
from _src.sympykern import Sympykern
from _src.stationary import Exponential, Matern32, Matern52, ExpQuad, RatQuad, Cosine
from _src.mlp import MLP
from _src.periodic import PeriodicExponential, PeriodicMatern32, PeriodicMatern52
from _src.independent_outputs import IndependentOutputs, Hierarchical
from _src.coregionalize import Coregionalize
from _src.ssrbf import SSRBF # TODO: ZD: did you remove this?
from _src.ODE_UY import ODE_UY

View file

@ -1,31 +0,0 @@
# Copyright (c) 2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf
import link_functions
from likelihood import Likelihood
from scipy import stats
class Negative_binomial(Symbolic):
"""
Negative binomial
.. math::
p(y_{i}|\pi(f_{i})) = \\lambda(f_{i})^{y_{i}}(1-f_{i})^{1-y_{i}}
.. Note::
Y takes values in either {-1, 1} or {0, 1}.
link function should have the domain [0, 1], e.g. probit (default) or Heaviside
.. See also::
likelihood.py, for the parent class
"""
def __init__(self, gp_link=None):
if gp_link is None:
gp_link = link_functions.Probit()
super(Bernoulli, self).__init__(gp_link, 'Bernoulli')
if isinstance(gp_link , (link_functions.Heaviside, link_functions.Probit)):
self.log_concave = True

View file

@ -1,46 +0,0 @@
# Copyright (c) 2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
try:
import sympy as sym
sympy_available=True
from sympy.utilities.lambdify import lambdify
from GPy.util.symbolic import gammaln, ln_cum_gaussian, cum_gaussian
except ImportError:
sympy_available=False
import numpy as np
from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf
import link_functions
from symbolic import Symbolic
from scipy import stats
if sympy_available:
class Negative_binomial(Symbolic):
"""
Negative binomial
.. math::
p(y_{i}|\pi(f_{i})) = \left(\frac{r}{r+f_i}\right)^r \frac{\Gamma(r+y_i)}{y!\Gamma(r)}\left(\frac{f_i}{r+f_i}\right)^{y_i}
.. Note::
Y takes non zero integer values..
link function should have a positive domain, e.g. log (default).
.. See also::
symbolic.py, for the parent class
"""
def __init__(self, gp_link=None):
if gp_link is None:
gp_link = link_functions.Log()
dispersion = sym.Symbol('dispersion', positive=True, real=True)
y = sym.Symbol('y', nonnegative=True, integer=True)
f = sym.Symbol('f', positive=True, real=True)
log_pdf=dispersion*sym.log(dispersion) - (dispersion+y)*sym.log(dispersion+f) + gammaln(y+dispersion) - gammaln(y+1) - gammaln(dispersion) + y*sym.log(f)
super(Negative_binomial, self).__init__(log_pdf=log_pdf, gp_link=gp_link, name='Negative_binomial')
# TODO: Check this.
self.log_concave = False

View file

@ -1,234 +0,0 @@
# Copyright (c) 2014 GPy Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import sympy as sp
from scipy import stats, special
import scipy as sp
import link_functions
from scipy import stats, integrate
from scipy.special import gammaln, gamma
from likelihood import Likelihood
from ..core.parameterization import Param
from ..core.parameterization.transformations import Logexp
class Symbolic(Likelihood):
"""
Symbolic likelihood.
Likelihood where the form of the likelihood is provided by a sympy expression.
"""
def __init__(self, likelihood=None, log_likelihood=None, gp_link=None, name='symbolic', log_concave=False):
if gp_link is None:
gp_link = link_functions.Identity()
if likelihood is None and log_likelihood is None:
raise ValueError, "You must provide an argument for the likelihood or the log likelihood."
super(Symbolic, self).__init__(gp_link, name=name)
if likelihood is None:
self._sp_likelihood = sp.exp(log_likelihood).simplify()
self._sp_log_likelihood = log_likelihood
if log_likelihood is None:
self._sp_likelihood = likelihood
self._sp_log_likelihood = sp.log(likelihood).simplify()
# extract parameter names from the covariance
# pull the variable names out of the symbolic covariance function.
sp_vars = [e for e in self._sp_likelihood.atoms() if e.is_Symbol]
# f subscript allows the likelihood of y to be dependent on multiple functions of f. The index of these functions would need to be specified in y_meta.
self._sp_f = sorted([e for e in sp_vars if e.name[0:2]=='f_'],key=lambda x:int(x.name[2:]))
self._sp_y = sorted([e for e in sp_vars if e.name=='y'],key=lambda x:int(x.name[2:]))
self._sp_theta = sorted([e for e in sp_vars if not (e.name[0:2]=='f_' or e.name=='y')],key=lambda e:e.name)
self.arg_list = self._sp_y + self._sp_f + self._sp_theta
# this gives us the arguments for first derivative
first_derivative_arguments = self._sp_f + self._sp_theta
second_derivative_arguments = {}
for arg in first_derivative_arguments:
if arg.name[0:2] == 'f_':
# take all second derivatives with respect to everything
second_derivative_arguments[arg.name] = first_derivative_arguments
second_derivative_arguments = self._sp_f + self._sp_theta
third_derivative_arguments = self._sp_f + self._sp_theta
self._likelihood_derivatives = {theta.name : sp.diff(self._sp_likelihood,theta).simplify() for theta in derivative_arguments}
self._log_likelihood_derivatives = {theta.name : sp.diff(self._sp_log_likelihood,theta).simplify() for theta in derivative_arguments}
# Add parameters to the model.
for theta in self._sp_theta:
val = 1.0
# TODO: what if user has passed a parameter vector, how should that be stored and interpreted? This is the old way before params class.
if param is not None:
if param.has_key(theta):
val = param[theta]
setattr(self, theta.name, Param(theta.name, val, None))
self.add_parameters(getattr(self, theta.name))
# By default it won't be log concave. It would be nice to check for this somehow though!
self.log_concave = log_concave
# initialise code arguments
self._arguments = {}
# generate the code for the likelihood and derivatives
self._gen_code()
def _gen_code(self):
# Potentially run theano here as an option.
self._likelihood_function = lambdify(self.arg_list, self._sp_likelihood, 'numpy')
self._log_likelihood_function = lambdify(self.arg_list, self._sp_log_likelihood, 'numpy')
# for derivatives we need gradient of the likelihood, gradient of log likelihood
for key in self.derivatives.keys():
setattr(self, '_log_likelihood_diff_' + key, lambdify(self.arg_list, self.derivatives[key], 'numpy'))
pass
def parameters_changed(self):
pass
def update_gradients(self, grads):
"""
Pull out the gradients, be careful as the order must match the order
in which the parameters are added
"""
for grad, theta in zip(grads, self._sp_theta):
parameter = getattr(self, theta.name)
setattr(parameter, 'gradient', grad)
def _arguments_update(self, f, y):
"""Set up argument lists for the derivatives."""
# Could check if this needs doing or not, there could
# definitely be some computational savings by checking for
# parameter updates here.
for i, f in enumerate(self._sp_f):
self._arguments[f.name] = f[:, i][:, None]
for i, y in enumerate(self._sp_y):
self._arguments[y.name] = y[:, i][:, None]
for theta in self._sp_theta:
self._arguments[theta.name] = np.asarray(getattr(self, theta.name))
def pdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Likelihood function given inverse link of f.
:param inv_link_f: inverse link of latent variables.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
self._arguments_update(inv_link_f, y)
l = self._likelihood_function(**self._arguments)
return np.prod(l)
def logpdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Log Likelihood Function given inverse link of latent variables.
:param inv_inv_link_f: latent variables (inverse link of f)
:type inv_inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
self._arguments_update(inv_link_f, y)
ll = self._log_likelihood_function(**self._arguments)
return np.sum(ll)
def dlogpdf_dlink(self, inv_link_f, y, Y_metadata=None):
"""
Gradient of log likelihood with respect to the inverse link function.
:param inv_inv_link_f: latent variables (inverse link of f)
:type inv_inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata
:returns: gradient of likelihood with respect to each point.
:rtype: Nx1 array
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
self._arguments_update(inv_link_f, y)
return self._log_likelihood_diff['f_0'](**self._arguments)
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def dlogpdf_link_dtheta(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def dlogpdf_dlink_dtheta(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def d2logpdf_dlink2_dtheta(self, inv_link_f, y, Y_metadata=None):
raise NotImplementedError
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None):
"""
Hessian at y, given link(f), w.r.t link(f)
i.e. second derivative logpdf at y given link(f_i) and link(f_j) w.r.t link(f_i) and link(f_j)
The hessian will be 0 unless i == j
.. math::
\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}\\lambda(f)} = \\frac{(v+1)((y_{i}-\lambda(f_{i}))^{2} - \\sigma^{2}v)}{((y_{i}-\lambda(f_{i}))^{2} + \\sigma^{2}v)^{2}}
:param inv_link_f: latent variables link(f)
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: Diagonal of hessian matrix (second derivative of likelihood evaluated at points f)
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on link(f_i) not on link(f_(j!=i))
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
e = y - inv_link_f
hess = ((self.v + 1)*(e**2 - self.v*self.sigma2)) / ((self.sigma2*self.v + e**2)**2)
return hess
def predictive_mean(self, mu, sigma, Y_metadata=None):
return self.gp_link.transf(mu) # only true in link is monotoci, which it is.
def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None):
if self.deg_free <2.:
return np.empty(mu.shape)*np.nan #not defined for small degress fo freedom
else:
return super(StudentT, self).predictive_variance(mu, variance, predictive_mean, Y_metadata)
def conditional_mean(self, gp):
return self.gp_link.transf(gp)
def conditional_variance(self, gp):
return self.deg_free/(self.deg_free - 2.)
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
orig_shape = gp.shape
gp = gp.flatten()
pass

View file

@ -1,318 +0,0 @@
# Copyright (c) 2012, 2013 Ricardo Andrade
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import sympy as sp
from scipy import stats, special
import scipy as sp
import link_functions
from scipy import stats, integrate
from scipy.special import gammaln, gamma
from likelihood import Likelihood
from ..core.parameterization import Param
from ..core.parameterization.transformations import Logexp
class Symbolic(Likelihood):
"""
Symbolic likelihood.
Likelihood where the form of the likelihood is provided by a sympy expression.
"""
def __init__(self, likelihood=None, log_likelihood=None, gp_link=None, name='symbolic', log_concave=False):
if gp_link is None:
gp_link = link_functions.Identity()
if likelihood is None and log_likelihood is None:
raise ValueError, "You must provide an argument for the likelihood or the log likelihood."
super(Symbolic, self).__init__(gp_link, name=name)
if likelihood is None:
self._sp_likelihood = sp.exp(log_likelihood).simplify()
self._sp_log_likelihood = log_likelihood
if log_likelihood is None:
self._sp_likelihood = likelihood
self._sp_log_likelihood = sp.log(likelihood).simplify()
# extract parameter names from the covariance
# pull the variable names out of the symbolic covariance function.
sp_vars = [e for e in self._sp_likelihood.atoms() if e.is_Symbol]
self._sp_f = sorted([e for e in sp_vars if e.name[0:2]=='f_'],key=lambda x:int(x.name[2:]))
self._sp_y = sorted([e for e in sp_vars if e.name[0:2]=='y_'],key=lambda x:int(x.name[2:]))
self._sp_theta = sorted([e for e in sp_vars if not (e.name[0:2]=='f_' or e.name[0:2]=='y_')],key=lambda e:e.name)
self.arg_list = self._sp_y + self._sp_f + self._sp_theta
first_derivative_arguments = self._sp_f + self._sp_theta
second_derivative_arguments = self._sp_f + self._sp_theta
third_derivative_arguments = self._sp_f + self._sp_theta
self._likelihood_derivatives = {theta.name : sp.diff(self._sp_likelihood,theta).simplify() for theta in derivative_arguments}
self._log_likelihood_derivatives = {theta.name : sp.diff(self._sp_log_likelihood,theta).simplify() for theta in derivative_arguments}
# Add parameters to the model.
for theta in self._sp_theta:
val = 1.0
# TODO: what if user has passed a parameter vector, how should that be stored and interpreted? This is the old way before params class.
if param is not None:
if param.has_key(theta):
val = param[theta]
setattr(self, theta.name, Param(theta.name, val, None))
self.add_parameters(getattr(self, theta.name))
# By default it won't be log concave. It would be nice to check for this somehow though!
self.log_concave = log_concave
# initialise code arguments
self._arguments = {}
# generate the code for the covariance functions
self._gen_code()
def _gen_code(self):
# Potentially run theano here as an option.
self._likelihood_function = lambdify(self.arg_list, self._sp_likelihood, 'numpy')
self._log_likelihood_function = lambdify(self.arg_list, self._sp_log_likelihood, 'numpy')
for key in self.derivatives.keys():
setattr(self, '_likelihood_diff_' + key, lambdify(self.arg_list, self.derivatives[key], 'numpy'))
pass
def parameters_changed(self):
pass
def update_gradients(self, grads):
"""
Pull out the gradients, be careful as the order must match the order
in which the parameters are added
"""
#self.sigma2.gradient = grads[0]
#self.v.gradient = grads[1]
pass
def _arguments_update(self, f, y):
"""Set up argument lists for the derivatives."""
# Could check if this needs doing or not, there could
# definitely be some computational savings by checking for
# parameter updates here.
for i, f in enumerate(self._sp_f):
self._arguments[f.name] = f[:, i][:, None]
for i, y in enumerate(self._sp_y):
self._arguments[f.name] = y[:, i][:, None]
for theta in self._sp_theta:
self._arguments[theta.name] = np.asarray(getattr(self, theta.name))
def pdf_link(self, link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\Gamma\\left(\\frac{v+1}{2}\\right)}{\\Gamma\\left(\\frac{v}{2}\\right)\\sqrt{v\\pi\\sigma^{2}}}\\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \\lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right)^{\\frac{-v+1}{2}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
self._arguments_update(link_f, y)
l = self._likelihood_function(**self._arguments)
return np.prod(l)
def logpdf_link(self, link_f, y, Y_metadata=None):
"""
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = \\ln \\Gamma\\left(\\frac{v+1}{2}\\right) - \\ln \\Gamma\\left(\\frac{v}{2}\\right) - \\ln \\sqrt{v \\pi\\sigma^{2}} - \\frac{v+1}{2}\\ln \\left(1 + \\frac{1}{v}\\left(\\frac{(y_{i} - \lambda(f_{i}))^{2}}{\\sigma^{2}}\\right)\\right)
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
self._arguments_update(link_f, y)
ll = self._log_likelihood_function(**self._arguments)
return np.sum(ll)
def dlogpdf_dlink(self, link_f, y, Y_metadata=None):
"""
Gradient of the log likelihood function at y, given link(f) w.r.t link(f)
.. math::
\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{d\\lambda(f)} = \\frac{(v+1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^{2} + \\sigma^{2}v}
:param link_f: latent variables (f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: gradient of likelihood evaluated at points
:rtype: Nx1 array
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
grad = ((self.v + 1) * e) / (self.v * self.sigma2 + (e**2))
return grad
def d2logpdf_dlink2(self, link_f, y, Y_metadata=None):
"""
Hessian at y, given link(f), w.r.t link(f)
i.e. second derivative logpdf at y given link(f_i) and link(f_j) w.r.t link(f_i) and link(f_j)
The hessian will be 0 unless i == j
.. math::
\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}\\lambda(f)} = \\frac{(v+1)((y_{i}-\lambda(f_{i}))^{2} - \\sigma^{2}v)}{((y_{i}-\lambda(f_{i}))^{2} + \\sigma^{2}v)^{2}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: Diagonal of hessian matrix (second derivative of likelihood evaluated at points f)
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on link(f_i) not on link(f_(j!=i))
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
hess = ((self.v + 1)*(e**2 - self.v*self.sigma2)) / ((self.sigma2*self.v + e**2)**2)
return hess
def d3logpdf_dlink3(self, link_f, y, Y_metadata=None):
"""
Third order derivative log-likelihood function at y given link(f) w.r.t link(f)
.. math::
\\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = \\frac{-2(v+1)((y_{i} - \lambda(f_{i}))^3 - 3(y_{i} - \lambda(f_{i})) \\sigma^{2} v))}{((y_{i} - \lambda(f_{i})) + \\sigma^{2} v)^3}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: third derivative of likelihood evaluated at points f
:rtype: Nx1 array
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
d3lik_dlink3 = ( -(2*(self.v + 1)*(-e)*(e**2 - 3*self.v*self.sigma2)) /
((e**2 + self.sigma2*self.v)**3)
)
return d3lik_dlink3
def dlogpdf_link_dvar(self, link_f, y, Y_metadata=None):
"""
Gradient of the log-likelihood function at y given f, w.r.t variance parameter (t_noise)
.. math::
\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{d\\sigma^{2}} = \\frac{v((y_{i} - \lambda(f_{i}))^{2} - \\sigma^{2})}{2\\sigma^{2}(\\sigma^{2}v + (y_{i} - \lambda(f_{i}))^{2})}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
dlogpdf_dvar = self.v*(e**2 - self.sigma2)/(2*self.sigma2*(self.sigma2*self.v + e**2))
return np.sum(dlogpdf_dvar)
def dlogpdf_dlink_dvar(self, link_f, y, Y_metadata=None):
"""
Derivative of the dlogpdf_dlink w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d \\ln p(y_{i}|\lambda(f_{i}))}{df}) = \\frac{-2\\sigma v(v + 1)(y_{i}-\lambda(f_{i}))}{(y_{i}-\lambda(f_{i}))^2 + \\sigma^2 v)^2}
:param link_f: latent variables link_f
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of likelihood evaluated at points f w.r.t variance parameter
:rtype: Nx1 array
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
dlogpdf_dlink_dvar = (self.v*(self.v+1)*(-e))/((self.sigma2*self.v + e**2)**2)
return dlogpdf_dlink_dvar
def d2logpdf_dlink2_dvar(self, link_f, y, Y_metadata=None):
"""
Gradient of the hessian (d2logpdf_dlink2) w.r.t variance parameter (t_noise)
.. math::
\\frac{d}{d\\sigma^{2}}(\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}f}) = \\frac{v(v+1)(\\sigma^{2}v - 3(y_{i} - \lambda(f_{i}))^{2})}{(\\sigma^{2}v + (y_{i} - \lambda(f_{i}))^{2})^{3}}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: derivative of hessian evaluated at points f and f_j w.r.t variance parameter
:rtype: Nx1 array
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
e = y - link_f
d2logpdf_dlink2_dvar = ( (self.v*(self.v+1)*(self.sigma2*self.v - 3*(e**2)))
/ ((self.sigma2*self.v + (e**2))**3)
)
return d2logpdf_dlink2_dvar
def dlogpdf_link_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dvar = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata)
dlogpdf_dv = np.zeros_like(dlogpdf_dvar) #FIXME: Not done yet
return np.hstack((dlogpdf_dvar, dlogpdf_dv))
def dlogpdf_dlink_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dlink_dvar = self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata)
dlogpdf_dlink_dv = np.zeros_like(dlogpdf_dlink_dvar) #FIXME: Not done yet
return np.hstack((dlogpdf_dlink_dvar, dlogpdf_dlink_dv))
def d2logpdf_dlink2_dtheta(self, f, y, Y_metadata=None):
d2logpdf_dlink2_dvar = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata)
d2logpdf_dlink2_dv = np.zeros_like(d2logpdf_dlink2_dvar) #FIXME: Not done yet
return np.hstack((d2logpdf_dlink2_dvar, d2logpdf_dlink2_dv))
def predictive_mean(self, mu, sigma, Y_metadata=None):
return self.gp_link.transf(mu) # only true in link is monotoci, which it is.
def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None):
if self.deg_free <2.:
return np.empty(mu.shape)*np.nan #not defined for small degress fo freedom
else:
return super(StudentT, self).predictive_variance(mu, variance, predictive_mean, Y_metadata)
def conditional_mean(self, gp):
return self.gp_link.transf(gp)
def conditional_variance(self, gp):
return self.deg_free/(self.deg_free - 2.)
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
orig_shape = gp.shape
gp = gp.flatten()
pass

View file

@ -1,149 +0,0 @@
# Copyright (c) 2014 GPy Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
try:
import sympy as sym
sympy_available=True
from sympy.utilities.lambdify import lambdify
from GPy.util.symbolic import stabilise
except ImportError:
sympy_available=False
from ..core.mapping import Mapping, Bijective_mapping
import numpy as np
from scipy.special import gammaln, gamma, erf, erfc, erfcx, polygamma
from GPy.util.functions import normcdf, normcdfln, logistic, logisticln
from ..core.parameterization import Param
if sympy_available:
class Symbolic(Mapping):
"""
Symbolic likelihood.
Likelihood where the form of the likelihood is provided by a sympy expression.
"""
def __init__(self, f=None, logZ=None, name='symbolic', param=None, func_modules=[]):
if f is None:
raise ValueError, "You must provide an argument for the function."
self.func_modules = func_modules
self.func_modules += [{'gamma':gamma,
'gammaln':gammaln,
'erf':erf, 'erfc':erfc,
'erfcx':erfcx,
'polygamma':polygamma,
'normcdf':normcdf,
'normcdfln':normcdfln,
'logistic':logistic,
'logisticln':logisticln},
'numpy']
super(Symbolic, self).__init__(gp_link, name=name)
self.symbolic['function'] = f
# pull the variable names out of the symbolic pdf
sym_vars = [e for e in f.atoms() if e.is_Symbol]
self.symbolic['x'] = [e for e in sym_vars if e.name[:2]=='x_']
if not self.symbolic['f']:
raise ValueError('No variable x in f().')
self.symbolic['theta'] = sorted([e for e in sym_vars if not e.name[:2]=='x_'],key=lambda e:e.name)
theta_names = [theta.name for theta in self.symbolic['theta']
# These are all the arguments need to compute the mapping.
self.arg_list = self.symbolic['x'] + self.symbolic['theta']
# these are arguments for computing derivatives.
derivative_arguments = self.arg_list
# Do symbolic work to compute derivatives.
self.symbolic['derivatives'] = {theta.name : stabilise(sym.diff(f,theta)) for theta in derivative_arguments}
# Add parameters to the model.
for theta in self._sym_theta:
val = 1.0
# TODO: need to decide how to handle user passing values for the se parameter vectors.
if param is not None:
if param.has_key(theta.name):
val = param[theta.name]
setattr(self, theta.name, Param(theta.name, val, None))
self.add_parameters(getattr(self, theta.name))
# initialise code arguments
self._arguments = {}
# generate the code for the pdf and derivatives
self._gen_code()
def _gen_code(self):
"""Generate the code from the symbolic parts that will be used for likleihod computation."""
self.code = GPy.util.function.gen_code(self.symbolic)
def parameters_changed(self):
# do all the precomputation codes.
for variable, code in self.code['precompute'].items():
self.setattr(variable, eval(code, self.namespace))
def update_gradients(self, grads):
"""
"""
for param, code in self.code['derivatives'].items():
self.getattr(param).setattr('gradient',
eval(code, self.namespace))
pass
def _arguments_update(self, x):
"""Set up argument lists for the derivatives."""
# If we do make use of Theano, then at this point we would
# need to do a lot of precomputation to ensure that the
# likelihoods and gradients are computed together, then check
# for parameter changes before updating.
for i, fvar in enumerate(self._sym_x):
self._arguments[fvar.name] = x[:, i]
for theta in self._sym_theta:
self._arguments[theta.name] = np.asarray(getattr(self, theta.name))
def f(self, x):
"""
Likelihood function given inverse link of f.
:param inv_link_f: inverse link of latent variables.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in student t distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
self._arguments_update(inv_link_f, y)
return self._f_function(x)
def df_dX(self, X):
"""
Gradient of log likelihood with respect to the inverse link function.
:param inv_inv_link_f: latent variables (inverse link of f)
:type inv_inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata
:returns: gradient of likelihood with respect to each point.
:rtype: Nx1 array
"""
self._arguments_update(X)
return self._derivative_code['X'](**self._arguments)
def df_dtheta(self, X):
self._arguments_update(X)
g = np.zeros((np.atleast_1d(X).shape[0], len(self._sym_theta)))
for i, theta in enumerate(self._sym_theta):
g[:, i:i+1] = self._derivative_code[theta.name](**self._arguments)
return g.sum(0)