mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 20:12:38 +02:00
starting to sort out likelihoods WARNING not working
This commit is contained in:
parent
bc765134c0
commit
055eb3b70b
10 changed files with 247 additions and 142 deletions
|
|
@ -25,22 +25,18 @@ class GP(GPBase):
|
||||||
"""
|
"""
|
||||||
def __init__(self, X, likelihood, kernel, normalize_X=False):
|
def __init__(self, X, likelihood, kernel, normalize_X=False):
|
||||||
GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
#self._set_params(self._get_params())
|
||||||
|
|
||||||
def getstate(self):
|
def getstate(self):
|
||||||
return GPBase.getstate(self)
|
return GPBase.getstate(self)
|
||||||
|
|
||||||
def setstate(self, state):
|
def setstate(self, state):
|
||||||
GPBase.setstate(self, state)
|
GPBase.setstate(self, state)
|
||||||
self._set_params(self._get_params())
|
#self._set_params(self._get_params())
|
||||||
|
|
||||||
def _set_params(self, p):
|
|
||||||
self.kern._set_params_transformed(p[:self.kern.num_params_transformed()])
|
|
||||||
self.likelihood._set_params(p[self.kern.num_params_transformed():])
|
|
||||||
|
|
||||||
|
def parameters_changed(self):
|
||||||
self.K = self.kern.K(self.X)
|
self.K = self.kern.K(self.X)
|
||||||
self.K += self.likelihood.covariance_matrix
|
self.K += self.likelihood.covariance_matrix
|
||||||
|
|
||||||
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
|
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
|
||||||
|
|
||||||
# the gradient of the likelihood wrt the covariance matrix
|
# the gradient of the likelihood wrt the covariance matrix
|
||||||
|
|
@ -102,6 +98,7 @@ class GP(GPBase):
|
||||||
Note, we use the chain rule: dL_dtheta = dL_dK * d_K_dtheta
|
Note, we use the chain rule: dL_dtheta = dL_dK * d_K_dtheta
|
||||||
"""
|
"""
|
||||||
#return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
|
#return np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
|
||||||
|
|
||||||
if not isinstance(self.likelihood,EP):
|
if not isinstance(self.likelihood,EP):
|
||||||
tmp = np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
|
tmp = np.hstack((self.kern.dK_dtheta(dL_dK=self.dL_dK, X=self.X), self.likelihood._gradients(partial=np.diag(self.dL_dK))))
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
|
|
@ -29,11 +29,18 @@ class GPBase(Model):
|
||||||
self._Xoffset = np.zeros((1, self.input_dim))
|
self._Xoffset = np.zeros((1, self.input_dim))
|
||||||
self._Xscale = np.ones((1, self.input_dim))
|
self._Xscale = np.ones((1, self.input_dim))
|
||||||
|
|
||||||
|
self.set_as_parameter(self.kern._parameters_)
|
||||||
|
self.set_as_parameter(self.likelihood._parameters_)
|
||||||
|
|
||||||
super(GPBase, self).__init__()
|
super(GPBase, self).__init__()
|
||||||
# Model.__init__(self)
|
# Model.__init__(self)
|
||||||
# All leaf nodes should call self._set_params(self._get_params()) at
|
# All leaf nodes should call self._set_params(self._get_params()) at
|
||||||
# the end
|
# the end
|
||||||
|
|
||||||
|
def parameters_changed(self):
|
||||||
|
self.kern.parameters_changed()
|
||||||
|
self.likelihood.parameters_changed()
|
||||||
|
|
||||||
def getstate(self):
|
def getstate(self):
|
||||||
"""
|
"""
|
||||||
Get the current state of the class, here we return everything that is needed to recompute the model.
|
Get the current state of the class, here we return everything that is needed to recompute the model.
|
||||||
|
|
|
||||||
|
|
@ -5,12 +5,13 @@
|
||||||
from .. import likelihoods
|
from .. import likelihoods
|
||||||
from ..inference import optimization
|
from ..inference import optimization
|
||||||
from ..util.linalg import jitchol
|
from ..util.linalg import jitchol
|
||||||
from GPy.util.misc import opt_wrapper
|
from ..util.misc import opt_wrapper
|
||||||
from parameterized import Parameterized
|
from parameterized import Parameterized
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from GPy.core.domains import _POSITIVE, _REAL
|
from domains import _POSITIVE, _REAL
|
||||||
from numpy.linalg.linalg import LinAlgError
|
from numpy.linalg.linalg import LinAlgError
|
||||||
|
from index_operations import ParameterIndexOperations
|
||||||
# import numdifftools as ndt
|
# import numdifftools as ndt
|
||||||
|
|
||||||
class Model(Parameterized):
|
class Model(Parameterized):
|
||||||
|
|
@ -18,7 +19,7 @@ class Model(Parameterized):
|
||||||
_allowed_failures = 10 # number of allowed failures
|
_allowed_failures = 10 # number of allowed failures
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
Parameterized.__init__(self)
|
Parameterized.__init__(self)
|
||||||
self.priors = None
|
self._priors = ParameterIndexOperations()
|
||||||
self.optimization_runs = []
|
self.optimization_runs = []
|
||||||
self.sampling_runs = []
|
self.sampling_runs = []
|
||||||
self.preferred_optimizer = 'scg'
|
self.preferred_optimizer = 'scg'
|
||||||
|
|
@ -399,36 +400,36 @@ class Model(Parameterized):
|
||||||
return np.nan
|
return np.nan
|
||||||
return 0.5 * self._get_params().size * np.log(2 * np.pi) + self.log_likelihood() - hld
|
return 0.5 * self._get_params().size * np.log(2 * np.pi) + self.log_likelihood() - hld
|
||||||
|
|
||||||
def __str__(self):
|
# def __str__(self):
|
||||||
s = Parameterized.__str__(self).split('\n')
|
# s = Parameterized.__str__(self).split('\n')
|
||||||
#def __str__(self, names=None):
|
# #def __str__(self, names=None):
|
||||||
# if names is None:
|
# # if names is None:
|
||||||
# names = self._get_print_names()
|
# # names = self._get_print_names()
|
||||||
#s = Parameterized.__str__(self, names=names).split('\n')
|
# #s = Parameterized.__str__(self, names=names).split('\n')
|
||||||
# add priors to the string
|
# # add priors to the string
|
||||||
if self.priors is not None:
|
# if self.priors is not None:
|
||||||
strs = [str(p) if p is not None else '' for p in self.priors]
|
# strs = [str(p) if p is not None else '' for p in self.priors]
|
||||||
else:
|
# else:
|
||||||
strs = [''] * len(self._get_params())
|
# strs = [''] * len(self._get_params())
|
||||||
# strs = [''] * len(self._get_param_names())
|
# # strs = [''] * len(self._get_param_names())
|
||||||
# name_indices = self.grep_param_names("|".join(names))
|
# # name_indices = self.grep_param_names("|".join(names))
|
||||||
# strs = np.array(strs)[name_indices]
|
# # strs = np.array(strs)[name_indices]
|
||||||
width = np.array(max([len(p) for p in strs] + [5])) + 4
|
# width = np.array(max([len(p) for p in strs] + [5])) + 4
|
||||||
|
#
|
||||||
log_like = self.log_likelihood()
|
# log_like = self.log_likelihood()
|
||||||
log_prior = self.log_prior()
|
# log_prior = self.log_prior()
|
||||||
obj_funct = '\nLog-likelihood: {0:.3e}'.format(log_like)
|
# obj_funct = '\nLog-likelihood: {0:.3e}'.format(log_like)
|
||||||
if len(''.join(strs)) != 0:
|
# if len(''.join(strs)) != 0:
|
||||||
obj_funct += ', Log prior: {0:.3e}, LL+prior = {0:.3e}'.format(log_prior, log_like + log_prior)
|
# obj_funct += ', Log prior: {0:.3e}, LL+prior = {0:.3e}'.format(log_prior, log_like + log_prior)
|
||||||
obj_funct += '\n\n'
|
# obj_funct += '\n\n'
|
||||||
s[0] = obj_funct + s[0]
|
# s[0] = obj_funct + s[0]
|
||||||
s[0] += "|{h:^{col}}".format(h='prior', col=width)
|
# s[0] += "|{h:^{col}}".format(h='prior', col=width)
|
||||||
s[1] += '-' * (width + 1)
|
# s[1] += '-' * (width + 1)
|
||||||
|
#
|
||||||
for p in range(2, len(strs) + 2):
|
# for p in range(2, len(strs) + 2):
|
||||||
s[p] += '|{prior:^{width}}'.format(prior=strs[p - 2], width=width)
|
# s[p] += '|{prior:^{width}}'.format(prior=strs[p - 2], width=width)
|
||||||
|
#
|
||||||
return '\n'.join(s)
|
# return '\n'.join(s)
|
||||||
|
|
||||||
|
|
||||||
def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3):
|
def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3):
|
||||||
|
|
|
||||||
|
|
@ -8,9 +8,11 @@ from scipy.special import gammaln, digamma
|
||||||
from ..util.linalg import pdinv
|
from ..util.linalg import pdinv
|
||||||
from GPy.core.domains import _REAL, _POSITIVE
|
from GPy.core.domains import _REAL, _POSITIVE
|
||||||
import warnings
|
import warnings
|
||||||
|
import weakref
|
||||||
|
|
||||||
class Prior:
|
class Prior:
|
||||||
domain = None
|
domain = None
|
||||||
|
|
||||||
def pdf(self, x):
|
def pdf(self, x):
|
||||||
return np.exp(self.lnpdf(x))
|
return np.exp(self.lnpdf(x))
|
||||||
|
|
||||||
|
|
@ -33,6 +35,16 @@ class Gaussian(Prior):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
domain = _REAL
|
domain = _REAL
|
||||||
|
_instances = []
|
||||||
|
def __new__(cls, mu, sigma): # Singleton:
|
||||||
|
if cls._instances:
|
||||||
|
cls._instances[:] = [instance for instance in cls._instances if instance()]
|
||||||
|
for instance in cls._instances:
|
||||||
|
if instance().mu == mu and instance().sigma == sigma:
|
||||||
|
return instance()
|
||||||
|
o = super(Prior, cls).__new__(cls, mu, sigma)
|
||||||
|
cls._instances.append(weakref.ref(o))
|
||||||
|
return cls._instances[-1]()
|
||||||
def __init__(self, mu, sigma):
|
def __init__(self, mu, sigma):
|
||||||
self.mu = float(mu)
|
self.mu = float(mu)
|
||||||
self.sigma = float(sigma)
|
self.sigma = float(sigma)
|
||||||
|
|
@ -63,6 +75,16 @@ class LogGaussian(Prior):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
domain = _POSITIVE
|
domain = _POSITIVE
|
||||||
|
_instances = []
|
||||||
|
def __new__(cls, mu, sigma): # Singleton:
|
||||||
|
if cls._instances:
|
||||||
|
cls._instances[:] = [instance for instance in cls._instances if instance()]
|
||||||
|
for instance in cls._instances:
|
||||||
|
if instance().mu == mu and instance().sigma == sigma:
|
||||||
|
return instance()
|
||||||
|
o = super(Prior, cls).__new__(cls, mu, sigma)
|
||||||
|
cls._instances.append(weakref.ref(o))
|
||||||
|
return cls._instances[-1]()
|
||||||
def __init__(self, mu, sigma):
|
def __init__(self, mu, sigma):
|
||||||
self.mu = float(mu)
|
self.mu = float(mu)
|
||||||
self.sigma = float(sigma)
|
self.sigma = float(sigma)
|
||||||
|
|
@ -93,6 +115,16 @@ class MultivariateGaussian:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
domain = _REAL
|
domain = _REAL
|
||||||
|
_instances = []
|
||||||
|
def __new__(cls, mu, var): # Singleton:
|
||||||
|
if cls._instances:
|
||||||
|
cls._instances[:] = [instance for instance in cls._instances if instance()]
|
||||||
|
for instance in cls._instances:
|
||||||
|
if np.all(instance().mu == mu) and np.all(instance().var == var):
|
||||||
|
return instance()
|
||||||
|
o = super(Prior, cls).__new__(cls, mu, var)
|
||||||
|
cls._instances.append(weakref.ref(o))
|
||||||
|
return cls._instances[-1]()
|
||||||
def __init__(self, mu, var):
|
def __init__(self, mu, var):
|
||||||
self.mu = np.array(mu).flatten()
|
self.mu = np.array(mu).flatten()
|
||||||
self.var = np.array(var)
|
self.var = np.array(var)
|
||||||
|
|
@ -148,6 +180,16 @@ class Gamma(Prior):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
domain = _POSITIVE
|
domain = _POSITIVE
|
||||||
|
_instances = []
|
||||||
|
def __new__(cls, a, b): # Singleton:
|
||||||
|
if cls._instances:
|
||||||
|
cls._instances[:] = [instance for instance in cls._instances if instance()]
|
||||||
|
for instance in cls._instances:
|
||||||
|
if instance().a == a and instance().b == b:
|
||||||
|
return instance()
|
||||||
|
o = super(Prior, cls).__new__(cls, a, b)
|
||||||
|
cls._instances.append(weakref.ref(o))
|
||||||
|
return cls._instances[-1]()
|
||||||
def __init__(self, a, b):
|
def __init__(self, a, b):
|
||||||
self.a = float(a)
|
self.a = float(a)
|
||||||
self.b = float(b)
|
self.b = float(b)
|
||||||
|
|
@ -199,6 +241,15 @@ class inverse_gamma(Prior):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
domain = _POSITIVE
|
domain = _POSITIVE
|
||||||
|
def __new__(cls, a, b): # Singleton:
|
||||||
|
if cls._instances:
|
||||||
|
cls._instances[:] = [instance for instance in cls._instances if instance()]
|
||||||
|
for instance in cls._instances:
|
||||||
|
if instance().a == a and instance().b == b:
|
||||||
|
return instance()
|
||||||
|
o = super(Prior, cls).__new__(cls, a, b)
|
||||||
|
cls._instances.append(weakref.ref(o))
|
||||||
|
return cls._instances[-1]()
|
||||||
def __init__(self, a, b):
|
def __init__(self, a, b):
|
||||||
self.a = float(a)
|
self.a = float(a)
|
||||||
self.b = float(b)
|
self.b = float(b)
|
||||||
|
|
|
||||||
|
|
@ -53,9 +53,14 @@ class kern(Parameterized):
|
||||||
assert isinstance(p, Kernpart), "bad kernel part"
|
assert isinstance(p, Kernpart), "bad kernel part"
|
||||||
|
|
||||||
self.compute_param_slices()
|
self.compute_param_slices()
|
||||||
|
self._parameters_ = []
|
||||||
|
for p in self.parts:
|
||||||
|
self._parameters_.extend(p._parameters_)
|
||||||
Parameterized.__init__(self)
|
Parameterized.__init__(self)
|
||||||
|
|
||||||
|
def parameters_changed(self):
|
||||||
|
[p.parameters_changed() for p in self.parts]
|
||||||
|
|
||||||
def getstate(self):
|
def getstate(self):
|
||||||
"""
|
"""
|
||||||
Get the current state of the class,
|
Get the current state of the class,
|
||||||
|
|
@ -303,20 +308,20 @@ class kern(Parameterized):
|
||||||
for i, t in zip(prev_constr_ind, prev_constr):
|
for i, t in zip(prev_constr_ind, prev_constr):
|
||||||
self.constrain(np.where(index_param == i)[0], t)
|
self.constrain(np.where(index_param == i)[0], t)
|
||||||
|
|
||||||
def _get_params(self):
|
# def _get_params(self):
|
||||||
return np.hstack([p._get_params() for p in self.parts])
|
# return np.hstack([p._get_params() for p in self.parts])
|
||||||
|
#
|
||||||
def _set_params(self, x):
|
# def _set_params(self, x):
|
||||||
[p._set_params(x[s]) for p, s in zip(self.parts, self.param_slices)]
|
# [p._set_params(x[s]) for p, s in zip(self.parts, self.param_slices)]
|
||||||
|
#
|
||||||
def _get_param_names(self):
|
# def _get_param_names(self):
|
||||||
# this is a bit nasty: we want to distinguish between parts with the same name by appending a count
|
# # this is a bit nasty: we want to distinguish between parts with the same name by appending a count
|
||||||
part_names = np.array([k.name for k in self.parts], dtype=np.str)
|
# part_names = np.array([k.name for k in self.parts], dtype=np.str)
|
||||||
counts = [np.sum(part_names == ni) for i, ni in enumerate(part_names)]
|
# counts = [np.sum(part_names == ni) for i, ni in enumerate(part_names)]
|
||||||
cum_counts = [np.sum(part_names[i:] == ni) for i, ni in enumerate(part_names)]
|
# cum_counts = [np.sum(part_names[i:] == ni) for i, ni in enumerate(part_names)]
|
||||||
names = [name + '_' + str(cum_count) if count > 1 else name for name, count, cum_count in zip(part_names, counts, cum_counts)]
|
# names = [name + '_' + str(cum_count) if count > 1 else name for name, count, cum_count in zip(part_names, counts, cum_counts)]
|
||||||
|
#
|
||||||
return sum([[name + '_' + n for n in k._get_param_names()] for name, k in zip(names, self.parts)], [])
|
# return sum([[name + '_' + n for n in k._get_param_names()] for name, k in zip(names, self.parts)], [])
|
||||||
|
|
||||||
def K(self, X, X2=None, which_parts='all'):
|
def K(self, X, X2=None, which_parts='all'):
|
||||||
"""
|
"""
|
||||||
|
|
@ -606,14 +611,14 @@ class Kern_check_model(Model):
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _get_params(self):
|
# def _get_params(self):
|
||||||
return self.kernel._get_params()
|
# return self.kernel._get_params()
|
||||||
|
#
|
||||||
def _get_param_names(self):
|
# def _get_param_names(self):
|
||||||
return self.kernel._get_param_names()
|
# return self.kernel._get_param_names()
|
||||||
|
#
|
||||||
def _set_params(self, x):
|
# def _set_params(self, x):
|
||||||
self.kernel._set_params(x)
|
# self.kernel._set_params(x)
|
||||||
|
|
||||||
def log_likelihood(self):
|
def log_likelihood(self):
|
||||||
return (self.dL_dK*self.kernel.K(self.X, self.X2)).sum()
|
return (self.dL_dK*self.kernel.K(self.X, self.X2)).sum()
|
||||||
|
|
|
||||||
|
|
@ -22,15 +22,17 @@ class Exponential(Kernpart):
|
||||||
:type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter)
|
:type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter)
|
||||||
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension.
|
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension.
|
||||||
:type ARD: Boolean
|
:type ARD: Boolean
|
||||||
|
:param name: the name of the kernel
|
||||||
:rtype: kernel object
|
:rtype: kernel object
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False):
|
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='exp'):
|
||||||
self.input_dim = input_dim
|
self.input_dim = input_dim
|
||||||
self.ARD = ARD
|
self.ARD = ARD
|
||||||
|
self.variance = variance
|
||||||
|
self.name = name
|
||||||
if ARD == False:
|
if ARD == False:
|
||||||
self.num_params = 2
|
self.num_params = 2
|
||||||
self.name = 'exp'
|
|
||||||
if lengthscale is not None:
|
if lengthscale is not None:
|
||||||
lengthscale = np.asarray(lengthscale)
|
lengthscale = np.asarray(lengthscale)
|
||||||
assert lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel"
|
assert lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel"
|
||||||
|
|
@ -38,30 +40,30 @@ class Exponential(Kernpart):
|
||||||
lengthscale = np.ones(1)
|
lengthscale = np.ones(1)
|
||||||
else:
|
else:
|
||||||
self.num_params = self.input_dim + 1
|
self.num_params = self.input_dim + 1
|
||||||
self.name = 'exp'
|
|
||||||
if lengthscale is not None:
|
if lengthscale is not None:
|
||||||
lengthscale = np.asarray(lengthscale)
|
lengthscale = np.asarray(lengthscale)
|
||||||
assert lengthscale.size == self.input_dim, "bad number of lengthscales"
|
assert lengthscale.size == self.input_dim, "bad number of lengthscales"
|
||||||
else:
|
else:
|
||||||
lengthscale = np.ones(self.input_dim)
|
lengthscale = np.ones(self.input_dim)
|
||||||
self._set_params(np.hstack((variance, lengthscale.flatten())))
|
#self._set_params(np.hstack((variance, lengthscale.flatten())))
|
||||||
|
self.set_as_parameter('variance', 'lengthscale')
|
||||||
|
|
||||||
def _get_params(self):
|
# def _get_params(self):
|
||||||
"""return the value of the parameters."""
|
# """return the value of the parameters."""
|
||||||
return np.hstack((self.variance, self.lengthscale))
|
# return np.hstack((self.variance, self.lengthscale))
|
||||||
|
#
|
||||||
def _set_params(self, x):
|
# def _set_params(self, x):
|
||||||
"""set the value of the parameters."""
|
# """set the value of the parameters."""
|
||||||
assert x.size == self.num_params
|
# assert x.size == self.num_params
|
||||||
self.variance = x[0]
|
# self.variance = x[0]
|
||||||
self.lengthscale = x[1:]
|
# self.lengthscale = x[1:]
|
||||||
|
#
|
||||||
def _get_param_names(self):
|
# def _get_param_names(self):
|
||||||
"""return parameter names."""
|
# """return parameter names."""
|
||||||
if self.num_params == 2:
|
# if self.num_params == 2:
|
||||||
return ['variance', 'lengthscale']
|
# return ['variance', 'lengthscale']
|
||||||
else:
|
# else:
|
||||||
return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)]
|
# return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)]
|
||||||
|
|
||||||
def K(self, X, X2, target):
|
def K(self, X, X2, target):
|
||||||
"""Compute the covariance matrix between X and X2."""
|
"""Compute the covariance matrix between X and X2."""
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,13 @@
|
||||||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
from ...core.parameter import Param
|
||||||
|
#from ...core.parameterized.Parameterized import set_as_parameter
|
||||||
|
|
||||||
class Kernpart(object):
|
class Kernpart(object):
|
||||||
def __init__(self,input_dim):
|
def __init__(self,input_dim):
|
||||||
"""
|
"""
|
||||||
The base class for a kernpart: a positive definite function which forms part of a covariance function (kernel).
|
The base class for a kernpart: a positive definite function
|
||||||
|
which forms part of a covariance function (kernel).
|
||||||
|
|
||||||
:param input_dim: the number of input dimensions to the function
|
:param input_dim: the number of input dimensions to the function
|
||||||
:type input_dim: int
|
:type input_dim: int
|
||||||
|
|
@ -18,13 +20,42 @@ class Kernpart(object):
|
||||||
self.num_params = 1
|
self.num_params = 1
|
||||||
# the name of the covariance function.
|
# the name of the covariance function.
|
||||||
self.name = 'unnamed'
|
self.name = 'unnamed'
|
||||||
|
# link to parameterized objects
|
||||||
|
self._parameters_ = []
|
||||||
|
def set_as_parameter_named(self, name, gradient, index=None, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
:param names: name of parameter to set as parameter
|
||||||
|
:param gradient: gradient method to get the gradient of this parameter
|
||||||
|
:param index: index of where to place parameter in printing
|
||||||
|
:param args, kwargs: additional arguments to gradient
|
||||||
|
|
||||||
def _get_params(self):
|
Convenience method to connect Kernpart parameters:
|
||||||
raise NotImplementedError
|
parameter with name (attribute of this Kernpart) will be set as parameter with following name:
|
||||||
def _set_params(self,x):
|
|
||||||
raise NotImplementedError
|
kernel_name + _ + parameter_name
|
||||||
def _get_param_names(self):
|
|
||||||
raise NotImplementedError
|
To add the kernels name to the parameter name use this method to
|
||||||
|
add parameters.
|
||||||
|
"""
|
||||||
|
self.set_as_parameter(name, getattr(self, name), gradient, index, *args, **kwargs)
|
||||||
|
def set_as_parameter(self, name, array, gradient, index=None, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
See :py:func:`GPy.core.parameterized.Parameterized.set_as_parameter`
|
||||||
|
|
||||||
|
Note: this method adds the kernels name in front of the parameter.
|
||||||
|
"""
|
||||||
|
p = Param(self.name+"_"+name, array, gradient, *args, **kwargs)
|
||||||
|
if index is None:
|
||||||
|
self._parameters_.append(p)
|
||||||
|
else:
|
||||||
|
self._parameters_.insert(index, p)
|
||||||
|
#set_as_parameter.__doc__ += set_as_parameter.__doc__ # @UndefinedVariable
|
||||||
|
# def _get_params(self):
|
||||||
|
# raise NotImplementedError
|
||||||
|
# def _set_params(self,x):
|
||||||
|
# raise NotImplementedError
|
||||||
|
# def _get_param_names(self):
|
||||||
|
# raise NotImplementedError
|
||||||
def K(self,X,X2,target):
|
def K(self,X,X2,target):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
def Kdiag(self,X,target):
|
def Kdiag(self,X,target):
|
||||||
|
|
@ -87,13 +118,13 @@ class Kernpart_stationary(Kernpart):
|
||||||
|
|
||||||
# initialize cache
|
# initialize cache
|
||||||
self._Z, self._mu, self._S = np.empty(shape=(3, 1))
|
self._Z, self._mu, self._S = np.empty(shape=(3, 1))
|
||||||
self._X, self._X2, self._params = np.empty(shape=(3, 1))
|
self._X, self._X2, self._parameters_ = np.empty(shape=(3, 1))
|
||||||
|
|
||||||
def _set_params(self, x):
|
def _set_params(self, x):
|
||||||
self.lengthscale = x
|
self.lengthscale = x
|
||||||
self.lengthscale2 = np.square(self.lengthscale)
|
self.lengthscale2 = np.square(self.lengthscale)
|
||||||
# reset cached results
|
# reset cached results
|
||||||
self._X, self._X2, self._params = np.empty(shape=(3, 1))
|
self._X, self._X2, self._parameters_ = np.empty(shape=(3, 1))
|
||||||
self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S
|
self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -120,4 +151,4 @@ class Kernpart_inner(Kernpart):
|
||||||
|
|
||||||
# initialize cache
|
# initialize cache
|
||||||
self._Z, self._mu, self._S = np.empty(shape=(3, 1))
|
self._Z, self._mu, self._S = np.empty(shape=(3, 1))
|
||||||
self._X, self._X2, self._params = np.empty(shape=(3, 1))
|
self._X, self._X2, self._parameters_ = np.empty(shape=(3, 1))
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ import numpy as np
|
||||||
from scipy import weave
|
from scipy import weave
|
||||||
from ...util.linalg import tdot
|
from ...util.linalg import tdot
|
||||||
from ...util.misc import fast_array_equal
|
from ...util.misc import fast_array_equal
|
||||||
|
from GPy.core.parameter import Param
|
||||||
|
|
||||||
class RBF(Kernpart):
|
class RBF(Kernpart):
|
||||||
"""
|
"""
|
||||||
|
|
@ -31,10 +32,11 @@ class RBF(Kernpart):
|
||||||
.. Note: this object implements both the ARD and 'spherical' version of the function
|
.. Note: this object implements both the ARD and 'spherical' version of the function
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False):
|
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='rbf'):
|
||||||
self.input_dim = input_dim
|
self.input_dim = input_dim
|
||||||
self.name = 'rbf'
|
self.name = name
|
||||||
self.ARD = ARD
|
self.ARD = ARD
|
||||||
|
self.variance = variance
|
||||||
if not ARD:
|
if not ARD:
|
||||||
self.num_params = 2
|
self.num_params = 2
|
||||||
if lengthscale is not None:
|
if lengthscale is not None:
|
||||||
|
|
@ -50,36 +52,43 @@ class RBF(Kernpart):
|
||||||
else:
|
else:
|
||||||
lengthscale = np.ones(self.input_dim)
|
lengthscale = np.ones(self.input_dim)
|
||||||
|
|
||||||
self._set_params(np.hstack((variance, lengthscale.flatten())))
|
#self._set_params(np.hstack((variance, lengthscale.flatten())))
|
||||||
|
self.set_as_parameter_named('variance', )
|
||||||
# initialize cache
|
# initialize cache
|
||||||
self._Z, self._mu, self._S = np.empty(shape=(3, 1))
|
self._Z, self._mu, self._S = np.empty(shape=(3, 1))
|
||||||
self._X, self._X2, self._params = np.empty(shape=(3, 1))
|
self._X, self._X2, self._params_save = np.empty(shape=(3, 1))
|
||||||
|
|
||||||
# a set of optional args to pass to weave
|
# a set of optional args to pass to weave
|
||||||
self.weave_options = {'headers' : ['<omp.h>'],
|
self.weave_options = {'headers' : ['<omp.h>'],
|
||||||
'extra_compile_args': ['-fopenmp -O3'], # -march=native'],
|
'extra_compile_args': ['-fopenmp -O3'], # -march=native'],
|
||||||
'extra_link_args' : ['-lgomp']}
|
'extra_link_args' : ['-lgomp']}
|
||||||
|
|
||||||
|
def parameters_changed(self):
|
||||||
|
|
||||||
def _get_params(self):
|
|
||||||
return np.hstack((self.variance, self.lengthscale))
|
|
||||||
|
|
||||||
def _set_params(self, x):
|
|
||||||
assert x.size == (self.num_params)
|
|
||||||
self.variance = x[0]
|
|
||||||
self.lengthscale = x[1:]
|
|
||||||
self.lengthscale2 = np.square(self.lengthscale)
|
self.lengthscale2 = np.square(self.lengthscale)
|
||||||
# reset cached results
|
# reset cached results
|
||||||
self._X, self._X2, self._params = np.empty(shape=(3, 1))
|
self._X, self._X2, self._params_save = np.empty(shape=(3, 1))
|
||||||
self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S
|
self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S
|
||||||
|
|
||||||
def _get_param_names(self):
|
# def _get_params(self):
|
||||||
if self.num_params == 2:
|
# return np.hstack((self.variance, self.lengthscale))
|
||||||
return ['variance', 'lengthscale']
|
#
|
||||||
else:
|
# def _set_params(self, x):
|
||||||
return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)]
|
# assert x.size == (self.num_params)
|
||||||
|
# self.variance = x[0]
|
||||||
|
# self.lengthscale = x[1:]
|
||||||
|
# self.lengthscale2 = np.square(self.lengthscale)
|
||||||
|
# # reset cached results
|
||||||
|
# self._X, self._X2, self._params_save = np.empty(shape=(3, 1))
|
||||||
|
# self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S
|
||||||
|
#
|
||||||
|
# def _get_param_names(self):
|
||||||
|
# if self.num_params == 2:
|
||||||
|
# return ['variance', 'lengthscale']
|
||||||
|
# else:
|
||||||
|
# return ['variance'] + ['lengthscale_%i' % i for i in range(self.lengthscale.size)]
|
||||||
|
|
||||||
|
def _dK_dvariance(self, model):
|
||||||
|
pass
|
||||||
|
|
||||||
def K(self, X, X2, target):
|
def K(self, X, X2, target):
|
||||||
self._K_computations(X, X2)
|
self._K_computations(X, X2)
|
||||||
|
|
@ -225,9 +234,9 @@ class RBF(Kernpart):
|
||||||
|
|
||||||
def _K_computations(self, X, X2):
|
def _K_computations(self, X, X2):
|
||||||
params = self._get_params()
|
params = self._get_params()
|
||||||
if not (fast_array_equal(X, self._X) and fast_array_equal(X2, self._X2) and fast_array_equal(self._params , params)):
|
if not (fast_array_equal(X, self._X) and fast_array_equal(X2, self._X2) and fast_array_equal(self._params_save , params)):
|
||||||
self._X = X.copy()
|
self._X = X.copy()
|
||||||
self._params = params.copy()
|
self._params_save = params.copy()
|
||||||
if X2 is None:
|
if X2 is None:
|
||||||
self._X2 = None
|
self._X2 = None
|
||||||
X = X / self.lengthscale
|
X = X / self.lengthscale
|
||||||
|
|
|
||||||
|
|
@ -31,8 +31,10 @@ class Gaussian(likelihood):
|
||||||
|
|
||||||
self.set_data(data)
|
self.set_data(data)
|
||||||
|
|
||||||
self._variance = np.asarray(variance) + 1.
|
self._variance = np.asarray(variance) + 1
|
||||||
self._set_params(np.asarray(variance))
|
self.variance = np.asarray(variance)
|
||||||
|
self.set_as_parameter(noise_variance=self.variance)
|
||||||
|
# self._set_params(np.asarray(variance))
|
||||||
|
|
||||||
super(Gaussian, self).__init__()
|
super(Gaussian, self).__init__()
|
||||||
|
|
||||||
|
|
@ -50,24 +52,24 @@ class Gaussian(likelihood):
|
||||||
self.trYYT = np.sum(np.square(self.Y))
|
self.trYYT = np.sum(np.square(self.Y))
|
||||||
self.YYT_factor = self.Y
|
self.YYT_factor = self.Y
|
||||||
|
|
||||||
def _get_params(self):
|
# def _get_params(self):
|
||||||
return np.asarray(self._variance)
|
# return np.asarray(self._variance)
|
||||||
|
#
|
||||||
def _get_param_names(self):
|
# def _get_param_names(self):
|
||||||
return ["noise_variance"]
|
# return ["noise_variance"]
|
||||||
|
#
|
||||||
def _set_params(self, x):
|
# def _set_params(self, x):
|
||||||
x = np.float64(x)
|
def parameters_changed(self):
|
||||||
if np.all(self._variance != x):
|
if np.any(self._variance != self.variance):
|
||||||
if x == 0.:#special case of zero noise
|
if np.all(self.variance == 0.):#special case of zero noise
|
||||||
self.precision = np.inf
|
self.precision = np.inf
|
||||||
self.V = None
|
self.V = None
|
||||||
else:
|
else:
|
||||||
self.precision = 1. / x
|
self.precision = 1. / self.variance
|
||||||
self.V = (self.precision) * self.Y
|
self.V = (self.precision) * self.Y
|
||||||
self.VVT_factor = self.precision * self.YYT_factor
|
self.VVT_factor = self.precision * self.YYT_factor
|
||||||
self.covariance_matrix = np.eye(self.N) * x
|
self.covariance_matrix = np.eye(self.N) * self.variance
|
||||||
self._variance = x
|
self._variance = self.variance
|
||||||
|
|
||||||
def predictive_values(self, mu, var, full_cov):
|
def predictive_values(self, mu, var, full_cov):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -24,14 +24,14 @@ class likelihood(Parameterized):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
Parameterized.__init__(self)
|
Parameterized.__init__(self)
|
||||||
|
|
||||||
def _get_params(self):
|
# def _get_params(self):
|
||||||
raise NotImplementedError
|
# raise NotImplementedError
|
||||||
|
#
|
||||||
def _get_param_names(self):
|
# def _get_param_names(self):
|
||||||
raise NotImplementedError
|
# raise NotImplementedError
|
||||||
|
#
|
||||||
def _set_params(self, x):
|
# def _set_params(self, x):
|
||||||
raise NotImplementedError
|
# raise NotImplementedError
|
||||||
|
|
||||||
def fit(self):
|
def fit(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue