Resolve merge conflicts

This commit is contained in:
Mike Croucher 2015-04-01 11:44:20 +01:00
commit e82b5fe773
31 changed files with 1729 additions and 731 deletions

View file

@ -1,13 +1,14 @@
# Copyright (c) 2013,2014, GPy authors (see AUTHORS.txt). # Copyright (c) 2013,2014, GPy authors (see AUTHORS.txt).
# Copyright (c) 2015, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
import sys import sys
from .parameterization import Parameterized from parameterization import Parameterized
import numpy as np import numpy as np
class Mapping(Parameterized): class Mapping(Parameterized):
""" """
Base model for shared behavior between models that can act like a mapping. Base model for shared mapping behaviours
""" """
def __init__(self, input_dim, output_dim, name='mapping'): def __init__(self, input_dim, output_dim, name='mapping'):
@ -18,49 +19,12 @@ class Mapping(Parameterized):
def f(self, X): def f(self, X):
raise NotImplementedError raise NotImplementedError
def df_dX(self, dL_df, X): def gradients_X(self, dL_dF, X):
"""Evaluate derivatives of mapping outputs with respect to inputs.
:param dL_df: gradient of the objective with respect to the function.
:type dL_df: ndarray (num_data x output_dim)
:param X: the input locations where derivatives are to be evaluated.
:type X: ndarray (num_data x input_dim)
:returns: matrix containing gradients of the function with respect to the inputs.
"""
raise NotImplementedError raise NotImplementedError
def df_dtheta(self, dL_df, X): def update_gradients(self, dL_dF, X):
"""The gradient of the outputs of the mapping with respect to each of the parameters.
:param dL_df: gradient of the objective with respect to the function.
:type dL_df: ndarray (num_data x output_dim)
:param X: input locations where the function is evaluated.
:type X: ndarray (num_data x input_dim)
:returns: Matrix containing gradients with respect to parameters of each output for each input data.
:rtype: ndarray (num_params length)
"""
raise NotImplementedError raise NotImplementedError
def plot(self, *args):
"""
Plots the mapping associated with the model.
- In one dimension, the function is plotted.
- In two dimensions, a contour-plot shows the function
- In higher dimensions, we've not implemented this yet !TODO!
Can plot only part of the data and part of the posterior functions
using which_data and which_functions
This is a convenience function: arguments are passed to
GPy.plotting.matplot_dep.models_plots.plot_mapping
"""
if "matplotlib" in sys.modules:
from ..plotting.matplot_dep import models_plots
mapping_plots.plot_mapping(self,*args)
else:
raise NameError("matplotlib package has not been imported.")
class Bijective_mapping(Mapping): class Bijective_mapping(Mapping):
""" """
@ -74,72 +38,4 @@ class Bijective_mapping(Mapping):
"""Inverse mapping from output domain of the function to the inputs.""" """Inverse mapping from output domain of the function to the inputs."""
raise NotImplementedError raise NotImplementedError
from .model import Model
class Mapping_check_model(Model):
"""
This is a dummy model class used as a base class for checking that the
gradients of a given mapping are implemented correctly. It enables
checkgradient() to be called independently on each mapping.
"""
def __init__(self, mapping=None, dL_df=None, X=None):
num_samples = 20
if mapping==None:
mapping = GPy.mapping.linear(1, 1)
if X==None:
X = np.random.randn(num_samples, mapping.input_dim)
if dL_df==None:
dL_df = np.ones((num_samples, mapping.output_dim))
self.mapping=mapping
self.X = X
self.dL_df = dL_df
self.num_params = self.mapping.num_params
Model.__init__(self)
def _get_params(self):
return self.mapping._get_params()
def _get_param_names(self):
return self.mapping._get_param_names()
def _set_params(self, x):
self.mapping._set_params(x)
def log_likelihood(self):
return (self.dL_df*self.mapping.f(self.X)).sum()
def _log_likelihood_gradients(self):
raise NotImplementedError("This needs to be implemented to use the Mapping_check_model class.")
class Mapping_check_df_dtheta(Mapping_check_model):
"""This class allows gradient checks for the gradient of a mapping with respect to parameters. """
def __init__(self, mapping=None, dL_df=None, X=None):
Mapping_check_model.__init__(self,mapping=mapping,dL_df=dL_df, X=X)
def _log_likelihood_gradients(self):
return self.mapping.df_dtheta(self.dL_df, self.X)
class Mapping_check_df_dX(Mapping_check_model):
"""This class allows gradient checks for the gradient of a mapping with respect to X. """
def __init__(self, mapping=None, dL_df=None, X=None):
Mapping_check_model.__init__(self,mapping=mapping,dL_df=dL_df, X=X)
if dL_df==None:
dL_df = np.ones((self.X.shape[0],self.mapping.output_dim))
self.num_params = self.X.shape[0]*self.mapping.input_dim
def _log_likelihood_gradients(self):
return self.mapping.df_dX(self.dL_df, self.X).flatten()
def _get_param_names(self):
return ['X_' +str(i) + ','+str(j) for j in range(self.X.shape[1]) for i in range(self.X.shape[0])]
def _get_params(self):
return self.X.flatten()
def _set_params(self, x):
self.X=x.reshape(self.X.shape)

View file

@ -403,6 +403,7 @@ class Model(Parameterized):
model_details = [['<b>Model</b>', self.name + '<br>'], model_details = [['<b>Model</b>', self.name + '<br>'],
['<b>Log-likelihood</b>', '{}<br>'.format(float(self.log_likelihood()))], ['<b>Log-likelihood</b>', '{}<br>'.format(float(self.log_likelihood()))],
["<b>Number of Parameters</b>", '{}<br>'.format(self.size)], ["<b>Number of Parameters</b>", '{}<br>'.format(self.size)],
["<b>Number of Optimization Parameters</b>", '{}<br>'.format(self._size_transformed())],
["<b>Updates</b>", '{}<br>'.format(self._update_on)], ["<b>Updates</b>", '{}<br>'.format(self._update_on)],
] ]
from operator import itemgetter from operator import itemgetter
@ -420,6 +421,7 @@ class Model(Parameterized):
model_details = [['Name', self.name], model_details = [['Name', self.name],
['Log-likelihood', '{}'.format(float(self.log_likelihood()))], ['Log-likelihood', '{}'.format(float(self.log_likelihood()))],
["Number of Parameters", '{}'.format(self.size)], ["Number of Parameters", '{}'.format(self.size)],
["Number of Optimization Parameters", '{}'.format(self._size_transformed())],
["Updates", '{}'.format(self._update_on)], ["Updates", '{}'.format(self._update_on)],
] ]
from operator import itemgetter from operator import itemgetter

View file

@ -969,7 +969,7 @@ class Parameterizable(OptimizationHandlable):
self._add_parameter_name(param, ignore_added_names) self._add_parameter_name(param, ignore_added_names)
# and makes sure to not delete programmatically added parameters # and makes sure to not delete programmatically added parameters
for other in self.parameters[::-1]: for other in self.parameters[::-1]:
if other is not param and other.name.startswith(param.name): if other is not param and other.name == param.name:
warn_and_retry(param, _name_digit.match(other.name)) warn_and_retry(param, _name_digit.match(other.name))
return return
if pname not in dir(self): if pname not in dir(self):

View file

@ -10,11 +10,6 @@ from .parameterization.variational import VariationalPosterior, NormalPosterior
from ..util.linalg import mdot from ..util.linalg import mdot
import logging import logging
from GPy.inference.latent_function_inference.posterior import Posterior
from GPy.inference.optimization.stochastics import SparseGPStochastics,\
SparseGPMissing
#no stochastics.py file added! from GPy.inference.optimization.stochastics import SparseGPStochastics,\
#SparseGPMissing
logger = logging.getLogger("sparse gp") logger = logging.getLogger("sparse gp")
class SparseGP(GP): class SparseGP(GP):
@ -24,6 +19,10 @@ class SparseGP(GP):
This model allows (approximate) inference using variational DTC or FITC This model allows (approximate) inference using variational DTC or FITC
(Gaussian likelihoods) as well as non-conjugate sparse methods based on (Gaussian likelihoods) as well as non-conjugate sparse methods based on
these. these.
This is not for missing data, as the implementation for missing data involves
some inefficient optimization routine decisions.
See missing data SparseGP implementation in py:class:'~GPy.models.sparse_gp_minibatch.SparseGPMiniBatch'.
:param X: inputs :param X: inputs
:type X: np.ndarray (num_data x input_dim) :type X: np.ndarray (num_data x input_dim)
@ -66,7 +65,6 @@ class SparseGP(GP):
def set_Z(self, Z, trigger_update=True): def set_Z(self, Z, trigger_update=True):
if trigger_update: self.update_model(False) if trigger_update: self.update_model(False)
self.unlink_parameter(self.Z) self.unlink_parameter(self.Z)
from ..core import Param
self.Z = Param('inducing inputs',Z) self.Z = Param('inducing inputs',Z)
self.link_parameter(self.Z, index=0) self.link_parameter(self.Z, index=0)
if trigger_update: self.update_model(True) if trigger_update: self.update_model(True)
@ -120,7 +118,7 @@ class SparseGP(GP):
For uncertain inputs, the SparseGP bound produces a full covariance structure across D, so for full_cov we For uncertain inputs, the SparseGP bound produces a full covariance structure across D, so for full_cov we
return a NxDxD matrix and in the not full_cov case, we return the diagonal elements across D (NxD). return a NxDxD matrix and in the not full_cov case, we return the diagonal elements across D (NxD).
This is for both with and without missing data. This is for both with and without missing data. See for missing data SparseGP implementation py:class:'~GPy.models.sparse_gp_minibatch.SparseGPMiniBatch'.
""" """
if kern is None: kern = self.kern if kern is None: kern = self.kern

View file

@ -28,19 +28,20 @@ class VerboseOptimization(object):
try: try:
from IPython.display import display from IPython.display import display
from IPython.html.widgets import FloatProgressWidget, HTMLWidget, ContainerWidget from IPython.html.widgets import IntProgress, HTML, Box, VBox, HBox, FlexBox
self.text = HTMLWidget() self.text = HTML(width='100%')
self.progress = FloatProgressWidget() self.progress = IntProgress(min=0, max=maxiters)
self.model_show = HTMLWidget() #self.progresstext = Text(width='100%', disabled=True, value='0/{}'.format(maxiters))
self.model_show = HTML()
self.ipython_notebook = ipython_notebook self.ipython_notebook = ipython_notebook
except: except:
# Not in Ipython notebook # Not in Ipython notebook
self.ipython_notebook = False self.ipython_notebook = False
if self.ipython_notebook: if self.ipython_notebook:
left_col = ContainerWidget(children = [self.progress, self.text]) left_col = VBox(children=[self.progress, self.text], padding=2, width='40%')
right_col = ContainerWidget(children = [self.model_show]) right_col = Box(children=[self.model_show], padding=2, width='60%')
self.hor_align = ContainerWidget(children = [left_col, right_col]) self.hor_align = FlexBox(children = [left_col, right_col], width='100%', orientation='horizontal')
display(self.hor_align) display(self.hor_align)
@ -58,15 +59,16 @@ class VerboseOptimization(object):
self.hor_align.set_css({ self.hor_align.set_css({
'width': "100%", 'width': "100%",
}) })
self.hor_align.remove_class('vbox')
self.hor_align.add_class('hbox')
left_col.add_class("box-flex1")
right_col.add_class('box-flex0')
except: except:
pass pass
self.hor_align.remove_class('vbox')
self.hor_align.add_class('hbox')
left_col.add_class("box-flex1")
right_col.add_class('box-flex0')
#self.text.add_class('box-flex2') #self.text.add_class('box-flex2')
#self.progress.add_class('box-flex1') #self.progress.add_class('box-flex1')
else: else:
@ -104,7 +106,8 @@ class VerboseOptimization(object):
html_body += "<td class='tg-right'>{}</td>".format(val) html_body += "<td class='tg-right'>{}</td>".format(val)
html_body += "</tr>" html_body += "</tr>"
self.text.value = html_begin + html_body + html_end self.text.value = html_begin + html_body + html_end
self.progress.value = 100*(self.iteration+1)/self.maxiters self.progress.value = (self.iteration+1)
#self.progresstext.value = '0/{}'.format((self.iteration+1))
self.model_show.value = self.model._repr_html_() self.model_show.value = self.model._repr_html_()
else: else:
n_exps = exponents(self.fnow, self.current_gradient) n_exps = exponents(self.fnow, self.current_gradient)

View file

@ -50,19 +50,19 @@ class InferenceMethodList(LatentFunctionInference, list):
def on_optimization_end(self): def on_optimization_end(self):
for inf in self: for inf in self:
inf.on_optimization_end() inf.on_optimization_end()
def __getstate__(self): def __getstate__(self):
state = [] state = []
for inf in self: for inf in self:
state.append(inf) state.append(inf)
return state return state
def __setstate__(self, state): def __setstate__(self, state):
for inf in state: for inf in state:
self.append(inf) self.append(inf)
from .exact_gaussian_inference import ExactGaussianInference from .exact_gaussian_inference import ExactGaussianInference
from .laplace import Laplace from .laplace import Laplace,LaplaceBlock
from GPy.inference.latent_function_inference.var_dtc import VarDTC from GPy.inference.latent_function_inference.var_dtc import VarDTC
from .expectation_propagation import EP from .expectation_propagation import EP
from .expectation_propagation_dtc import EPDTC from .expectation_propagation_dtc import EPDTC
@ -78,9 +78,9 @@ from .svgp import SVGP
# class EMLikeLatentFunctionInference(LatentFunctionInference): # class EMLikeLatentFunctionInference(LatentFunctionInference):
# def update_approximation(self): # def update_approximation(self):
# """ # """
# This function gets called when the # This function gets called when the
# """ # """
# #
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): # def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """ # """
# Do inference on the latent functions given a covariance function `kern`, # Do inference on the latent functions given a covariance function `kern`,
@ -88,7 +88,7 @@ from .svgp import SVGP
# Additional metadata for the outputs `Y` can be given in `Y_metadata`. # Additional metadata for the outputs `Y` can be given in `Y_metadata`.
# """ # """
# raise NotImplementedError, "Abstract base class for full inference" # raise NotImplementedError, "Abstract base class for full inference"
# #
# class VariationalLatentFunctionInference(LatentFunctionInference): # class VariationalLatentFunctionInference(LatentFunctionInference):
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): # def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """ # """

View file

@ -40,8 +40,11 @@ class EP(LatentFunctionInference):
K = kern.K(X) K = kern.K(X)
if self._ep_approximation is None: if self._ep_approximation is None:
#if we don't yet have the results of runnign EP, run EP and store the computed factors in self._ep_approximation
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata) mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata)
else: else:
#if we've already run EP, just use the existing approximation stored in self._ep_approximation
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation
Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde)) Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde))

View file

@ -43,28 +43,31 @@ class Laplace(LatentFunctionInference):
""" """
Returns a Posterior class containing essential quantities of the posterior Returns a Posterior class containing essential quantities of the posterior
""" """
# Compute K # Compute K
K = kern.K(X) K = kern.K(X)
#Find mode #Find mode
if self.bad_fhat or self.first_run: if self.bad_fhat or self.first_run:
Ki_f_init = np.zeros_like(Y) Ki_f_init = np.zeros_like(Y)
first_run = False self.first_run = False
else: else:
Ki_f_init = self._previous_Ki_fhat Ki_f_init = self._previous_Ki_fhat
Ki_f_init = np.zeros_like(Y)# FIXME: take this out
f_hat, Ki_fhat = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata) f_hat, Ki_fhat = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata)
self.f_hat = f_hat self.f_hat = f_hat
self.Ki_fhat = Ki_fhat #self.Ki_fhat = Ki_fhat
self.K = K.copy() #self.K = K.copy()
#Compute hessian and other variables at mode #Compute hessian and other variables at mode
log_marginal, woodbury_inv, dL_dK, dL_dthetaL = self.mode_computations(f_hat, Ki_fhat, K, Y, likelihood, kern, Y_metadata) log_marginal, woodbury_inv, dL_dK, dL_dthetaL = self.mode_computations(f_hat, Ki_fhat, K, Y, likelihood, kern, Y_metadata)
self._previous_Ki_fhat = Ki_fhat.copy() self._previous_Ki_fhat = Ki_fhat.copy()
return Posterior(woodbury_vector=Ki_fhat, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} return Posterior(woodbury_vector=Ki_fhat, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL}
def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None): def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None, *args, **kwargs):
""" """
Rasmussen's numerically stable mode finding Rasmussen's numerically stable mode finding
For nomenclature see Rasmussen & Williams 2006 For nomenclature see Rasmussen & Williams 2006
@ -89,7 +92,12 @@ class Laplace(LatentFunctionInference):
#define the objective function (to be maximised) #define the objective function (to be maximised)
def obj(Ki_f, f): def obj(Ki_f, f):
return -0.5*np.dot(Ki_f.flatten(), f.flatten()) + np.sum(likelihood.logpdf(f, Y, Y_metadata=Y_metadata)) ll = -0.5*np.sum(np.dot(Ki_f.T, f)) + np.sum(likelihood.logpdf(f, Y, Y_metadata=Y_metadata))
if np.isnan(ll):
return -np.inf
else:
return ll
difference = np.inf difference = np.inf
iteration = 0 iteration = 0
@ -104,7 +112,7 @@ class Laplace(LatentFunctionInference):
W_f = W*f W_f = W*f
b = W_f + grad # R+W p46 line 6. b = W_f + grad # R+W p46 line 6.
W12BiW12, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave) W12BiW12, _, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave, *args, **kwargs)
W12BiW12Kb = np.dot(W12BiW12, np.dot(K, b)) W12BiW12Kb = np.dot(W12BiW12, np.dot(K, b))
#Work out the DIRECTION that we want to move in, but don't choose the stepsize yet #Work out the DIRECTION that we want to move in, but don't choose the stepsize yet
@ -121,7 +129,9 @@ class Laplace(LatentFunctionInference):
step = optimize.brent(inner_obj, tol=1e-4, maxiter=12) step = optimize.brent(inner_obj, tol=1e-4, maxiter=12)
Ki_f_new = Ki_f + step*dKi_f Ki_f_new = Ki_f + step*dKi_f
f_new = np.dot(K, Ki_f_new) f_new = np.dot(K, Ki_f_new)
#print "new {} vs old {}".format(obj(Ki_f_new, f_new), obj(Ki_f, f))
if obj(Ki_f_new, f_new) < obj(Ki_f, f):
raise ValueError("Shouldn't happen, brent optimization failing")
difference = np.abs(np.sum(f_new - f)) + np.abs(np.sum(Ki_f_new - Ki_f)) difference = np.abs(np.sum(f_new - f)) + np.abs(np.sum(Ki_f_new - Ki_f))
Ki_f = Ki_f_new Ki_f = Ki_f_new
f = f_new f = f_new
@ -152,14 +162,10 @@ class Laplace(LatentFunctionInference):
if np.any(np.isnan(W)): if np.any(np.isnan(W)):
raise ValueError('One or more element(s) of W is NaN') raise ValueError('One or more element(s) of W is NaN')
K_Wi_i, L, LiW12 = self._compute_B_statistics(K, W, likelihood.log_concave) K_Wi_i, logdet_I_KW, I_KW_i, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave)
#compute vital matrices
C = np.dot(LiW12, K)
Ki_W_i = K - C.T.dot(C)
#compute the log marginal #compute the log marginal
log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - np.sum(np.log(np.diag(L))) log_marginal = -0.5*np.sum(np.dot(Ki_f.T, f_hat)) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - 0.5*logdet_I_KW
# Compute matrices for derivatives # Compute matrices for derivatives
dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat
@ -196,23 +202,23 @@ class Laplace(LatentFunctionInference):
dL_dthetaL = np.zeros(num_params) dL_dthetaL = np.zeros(num_params)
for thetaL_i in range(num_params): for thetaL_i in range(num_params):
#Explicit #Explicit
dL_dthetaL_exp = ( np.sum(dlik_dthetaL[thetaL_i]) dL_dthetaL_exp = ( np.sum(dlik_dthetaL[thetaL_i,:, :])
# The + comes from the fact that dlik_hess_dthetaL == -dW_dthetaL # The + comes from the fact that dlik_hess_dthetaL == -dW_dthetaL
+ 0.5*np.sum(np.diag(Ki_W_i).flatten()*dlik_hess_dthetaL[:, thetaL_i].flatten()) + 0.5*np.sum(np.diag(Ki_W_i)*np.squeeze(dlik_hess_dthetaL[thetaL_i, :, :]))
) )
#Implicit #Implicit
dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[:, thetaL_i]) dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[thetaL_i, :, :])
#dfhat_dthetaL = mdot(Ki_W_i, dlik_grad_dthetaL[:, thetaL_i]) #dfhat_dthetaL = mdot(Ki_W_i, dlik_grad_dthetaL[thetaL_i, :, :])
dL_dthetaL_imp = np.dot(dL_dfhat.T, dfhat_dthetaL) dL_dthetaL_imp = np.dot(dL_dfhat.T, dfhat_dthetaL)
dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp dL_dthetaL[thetaL_i] = np.sum(dL_dthetaL_exp + dL_dthetaL_imp)
else: else:
dL_dthetaL = np.zeros(likelihood.size) dL_dthetaL = np.zeros(likelihood.size)
return log_marginal, K_Wi_i, dL_dK, dL_dthetaL return log_marginal, K_Wi_i, dL_dK, dL_dthetaL
def _compute_B_statistics(self, K, W, log_concave): def _compute_B_statistics(self, K, W, log_concave, *args, **kwargs):
""" """
Rasmussen suggests the use of a numerically stable positive definite matrix B Rasmussen suggests the use of a numerically stable positive definite matrix B
Which has a positive diagonal elements and can be easily inverted Which has a positive diagonal elements and can be easily inverted
@ -225,7 +231,7 @@ class Laplace(LatentFunctionInference):
""" """
if not log_concave: if not log_concave:
#print "Under 1e-10: {}".format(np.sum(W < 1e-6)) #print "Under 1e-10: {}".format(np.sum(W < 1e-6))
W[W<1e-6] = 1e-6 W = np.clip(W, 1e-6, 1e+30)
# NOTE: when setting a parameter inside parameters_changed it will allways come to closed update circles!!! # NOTE: when setting a parameter inside parameters_changed it will allways come to closed update circles!!!
#W.__setitem__(W < 1e-6, 1e-6, update=False) # FIXME-HACK: This is a hack since GPy can't handle negative variances which can occur #W.__setitem__(W < 1e-6, 1e-6, update=False) # FIXME-HACK: This is a hack since GPy can't handle negative variances which can occur
# If the likelihood is non-log-concave. We wan't to say that there is a negative variance # If the likelihood is non-log-concave. We wan't to say that there is a negative variance
@ -247,5 +253,160 @@ class Laplace(LatentFunctionInference):
#K_Wi_i_2 , _= dpotri(L2) #K_Wi_i_2 , _= dpotri(L2)
#symmetrify(K_Wi_i_2) #symmetrify(K_Wi_i_2)
return K_Wi_i, L, LiW12 #compute vital matrices
C = np.dot(LiW12, K)
Ki_W_i = K - C.T.dot(C)
I_KW_i = np.eye(K.shape[0]) - np.dot(K, K_Wi_i)
logdet_I_KW = 2*np.sum(np.log(np.diag(L)))
return K_Wi_i, logdet_I_KW, I_KW_i, Ki_W_i
class LaplaceBlock(Laplace):
def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None, *args, **kwargs):
Ki_f = Ki_f_init.copy()
f = np.dot(K, Ki_f)
#define the objective function (to be maximised)
def obj(Ki_f, f):
ll = -0.5*np.dot(Ki_f.T, f) + np.sum(likelihood.logpdf_sum(f, Y, Y_metadata=Y_metadata))
if np.isnan(ll):
return -np.inf
else:
return ll
difference = np.inf
iteration = 0
I = np.eye(K.shape[0])
while difference > self._mode_finding_tolerance and iteration < self._mode_finding_max_iter:
W = -likelihood.d2logpdf_df2(f, Y, Y_metadata=Y_metadata)
W[np.diag_indices_from(W)] = np.clip(np.diag(W), 1e-6, 1e+30)
W_f = np.dot(W, f)
grad = likelihood.dlogpdf_df(f, Y, Y_metadata=Y_metadata)
b = W_f + grad # R+W p46 line 6.
K_Wi_i, _, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave, *args, **kwargs)
#Work out the DIRECTION that we want to move in, but don't choose the stepsize yet
#a = (I - (K+Wi)i*K)*b
full_step_Ki_f = np.dot(I - np.dot(K_Wi_i, K), b)
dKi_f = full_step_Ki_f - Ki_f
#define an objective for the line search (minimize this one)
def inner_obj(step_size):
Ki_f_trial = Ki_f + step_size*dKi_f
f_trial = np.dot(K, Ki_f_trial)
return -obj(Ki_f_trial, f_trial)
#use scipy for the line search, the compute new values of f, Ki_f
step = optimize.brent(inner_obj, tol=1e-4, maxiter=12)
Ki_f_new = Ki_f + step*dKi_f
f_new = np.dot(K, Ki_f_new)
difference = np.abs(np.sum(f_new - f)) + np.abs(np.sum(Ki_f_new - Ki_f))
Ki_f = Ki_f_new
f = f_new
iteration += 1
#Warn of bad fits
if difference > self._mode_finding_tolerance:
if not self.bad_fhat:
warnings.warn("Not perfect f_hat fit difference: {}".format(difference))
self._previous_Ki_fhat = np.zeros_like(Y)
self.bad_fhat = True
elif self.bad_fhat:
self.bad_fhat = False
warnings.warn("f_hat now fine again")
if iteration > self._mode_finding_max_iter:
warnings.warn("didn't find the best")
return f, Ki_f
def mode_computations(self, f_hat, Ki_f, K, Y, likelihood, kern, Y_metadata):
#At this point get the hessian matrix (or vector as W is diagonal)
W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata)
W[np.diag_indices_from(W)] = np.clip(np.diag(W), 1e-6, 1e+30)
K_Wi_i, log_B_det, I_KW_i, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave)
#compute the log marginal
#FIXME: The derterminant should be output_dim*0.5 I think, gradients may now no longer check
log_marginal = -0.5*np.dot(f_hat.T, Ki_f) + np.sum(likelihood.logpdf_sum(f_hat, Y, Y_metadata=Y_metadata)) - 0.5*log_B_det
#Compute vival matrices for derivatives
dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat
#dL_dfhat = np.zeros((f_hat.shape[0]))
#for i in range(f_hat.shape[0]):
#dL_dfhat[i] = -0.5*np.trace(np.dot(Ki_W_i, dW_df[:,:,i]))
dL_dfhat = -0.5*np.einsum('ij,ijk->k', Ki_W_i, dW_df)
woodbury_vector = likelihood.dlogpdf_df(f_hat, Y, Y_metadata=Y_metadata)
####################
#compute dL_dK#
####################
if kern.size > 0 and not kern.is_fixed:
#Explicit
explicit_part = 0.5*(np.dot(Ki_f, Ki_f.T) - K_Wi_i)
#Implicit
implicit_part = woodbury_vector.dot(dL_dfhat[None,:]).dot(I_KW_i)
#implicit_part = Ki_f.dot(dL_dfhat[None,:]).dot(I_KW_i)
dL_dK = explicit_part + implicit_part
else:
dL_dK = np.zeros_like(K)
####################
#compute dL_dthetaL#
####################
if likelihood.size > 0 and not likelihood.is_fixed:
raise NotImplementedError
else:
dL_dthetaL = np.zeros(likelihood.size)
#self.K_Wi_i = K_Wi_i
#self.Ki_W_i = Ki_W_i
#self.W = W
#self.K = K
#self.dL_dfhat = dL_dfhat
#self.explicit_part = explicit_part
#self.implicit_part = implicit_part
return log_marginal, K_Wi_i, dL_dK, dL_dthetaL
def _compute_B_statistics(self, K, W, log_concave, *args, **kwargs):
"""
Rasmussen suggests the use of a numerically stable positive definite matrix B
Which has a positive diagonal element and can be easyily inverted
:param K: Prior Covariance matrix evaluated at locations X
:type K: NxN matrix
:param W: Negative hessian at a point (diagonal matrix)
:type W: Vector of diagonal values of hessian (1xN)
:returns: (K_Wi_i, L_B, not_provided)
"""
#w = GPy.util.diag.view(W)
#W[:] = np.where(w<1e-6, 1e-6, w)
#B = I + KW
B = np.eye(K.shape[0]) + np.dot(K, W)
#Bi, L, Li, logdetB = pdinv(B)
Bi = np.linalg.inv(B)
#K_Wi_i = np.eye(K.shape[0]) - mdot(W, Bi, K)
K_Wi_i = np.dot(W, Bi)
#self.K_Wi_i_brute = np.linalg.inv(K + np.linalg.inv(W))
#self.B = B
#self.Bi = Bi
Ki_W_i = np.dot(Bi, K)
sign, logdetB = np.linalg.slogdet(B)
return K_Wi_i, sign*logdetB, Bi, Ki_W_i

View file

@ -47,6 +47,8 @@ class SVGP(LatentFunctionInference):
#rescale the F term if working on a batch #rescale the F term if working on a batch
F, dF_dmu, dF_dv = F*batch_scale, dF_dmu*batch_scale, dF_dv*batch_scale F, dF_dmu, dF_dv = F*batch_scale, dF_dmu*batch_scale, dF_dv*batch_scale
if dF_dthetaL is not None:
dF_dthetaL = dF_dthetaL.sum(1).sum(1)*batch_scale
#derivatives of expected likelihood #derivatives of expected likelihood
Adv = A.T[:,:,None]*dF_dv[None,:,:] # As if dF_Dv is diagonal Adv = A.T[:,:,None]*dF_dv[None,:,:] # As if dF_Dv is diagonal
@ -69,4 +71,4 @@ class SVGP(LatentFunctionInference):
dL_dchol = np.dstack([2.*np.dot(dL_dS[:,:,i], L[:,:,i]) for i in range(num_outputs)]) dL_dchol = np.dstack([2.*np.dot(dL_dS[:,:,i], L[:,:,i]) for i in range(num_outputs)])
dL_dchol = choleskies.triang_to_flat(dL_dchol) dL_dchol = choleskies.triang_to_flat(dL_dchol)
return Posterior(mean=q_u_mean, cov=S, K=Kmm), log_marginal, {'dL_dKmm':dL_dKmm, 'dL_dKmn':dL_dKmn, 'dL_dKdiag': dF_dv, 'dL_dm':dL_dm, 'dL_dchol':dL_dchol, 'dL_dthetaL':dF_dthetaL} return Posterior(mean=q_u_mean, cov=S, K=Kmm), log_marginal, {'dL_dKmm':dL_dKmm, 'dL_dKmn':dL_dKmn, 'dL_dKdiag': dF_dv.sum(1), 'dL_dm':dL_dm, 'dL_dchol':dL_dchol, 'dL_dthetaL':dF_dthetaL}

View file

@ -14,7 +14,8 @@ from ._src.ODE_st import ODE_st
from ._src.ODE_t import ODE_t from ._src.ODE_t import ODE_t
from ._src.poly import Poly from ._src.poly import Poly
from ._src.eq_ode2 import EQ_ODE2 from ._src.eq_ode2 import EQ_ODE2
from ._src.trunclinear import TruncLinear,TruncLinear_inf from ._src.trunclinear import TruncLinear,TruncLinear_inf
from ._src.splitKern import SplitKern,DiffGenomeKern from ._src.splitKern import SplitKern,DEtime
from ._src.splitKern import DEtime as DiffGenomeKern

View file

@ -7,7 +7,7 @@ from .kern import Kern,CombinationKernel
from .independent_outputs import index_to_slices from .independent_outputs import index_to_slices
import itertools import itertools
class DiffGenomeKern(Kern): class DEtime(Kern):
def __init__(self, kernel, idx_p, Xp, index_dim=-1, name='DiffGenomeKern'): def __init__(self, kernel, idx_p, Xp, index_dim=-1, name='DiffGenomeKern'):
self.idx_p = idx_p self.idx_p = idx_p

View file

@ -77,7 +77,7 @@ class Bernoulli(Likelihood):
return Z_hat, mu_hat, sigma2_hat return Z_hat, mu_hat, sigma2_hat
def variational_expectations(self, Y, m, v, gh_points=None): def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):
if isinstance(self.gp_link, link_functions.Probit): if isinstance(self.gp_link, link_functions.Probit):
if gh_points is None: if gh_points is None:

View file

@ -34,7 +34,9 @@ class Gaussian(Likelihood):
if gp_link is None: if gp_link is None:
gp_link = link_functions.Identity() gp_link = link_functions.Identity()
assert isinstance(gp_link, link_functions.Identity), "the likelihood only implemented for the identity link" if not isinstance(gp_link, link_functions.Identity):
print "Warning, Exact inference is not implemeted for non-identity link functions,\
if you are not already, ensure Laplace inference_method is used"
super(Gaussian, self).__init__(gp_link, name=name) super(Gaussian, self).__init__(gp_link, name=name)
@ -263,16 +265,19 @@ class Gaussian(Likelihood):
return d2logpdf_dlink2_dvar return d2logpdf_dlink2_dvar
def dlogpdf_link_dtheta(self, f, y, Y_metadata=None): def dlogpdf_link_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dvar = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata) dlogpdf_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
return dlogpdf_dvar dlogpdf_dtheta[0,:,:] = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata)
return dlogpdf_dtheta
def dlogpdf_dlink_dtheta(self, f, y, Y_metadata=None): def dlogpdf_dlink_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dlink_dvar = self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata) dlogpdf_dlink_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
return dlogpdf_dlink_dvar dlogpdf_dlink_dtheta[0, :, :]= self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata)
return dlogpdf_dlink_dtheta
def d2logpdf_dlink2_dtheta(self, f, y, Y_metadata=None): def d2logpdf_dlink2_dtheta(self, f, y, Y_metadata=None):
d2logpdf_dlink2_dvar = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata) d2logpdf_dlink2_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
return d2logpdf_dlink2_dvar d2logpdf_dlink2_dtheta[0, :, :] = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata)
return d2logpdf_dlink2_dtheta
def _mean(self, gp): def _mean(self, gp):
""" """
@ -305,18 +310,17 @@ class Gaussian(Likelihood):
Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp]) Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp])
return Ysim.reshape(orig_shape) return Ysim.reshape(orig_shape)
def log_predictive_density(self, y_test, mu_star, var_star): def log_predictive_density(self, y_test, mu_star, var_star, Y_metadata=None):
""" """
assumes independence assumes independence
""" """
v = var_star + self.variance v = var_star + self.variance
return -0.5*np.log(2*np.pi) -0.5*np.log(v) - 0.5*np.square(y_test - mu_star)/v return -0.5*np.log(2*np.pi) -0.5*np.log(v) - 0.5*np.square(y_test - mu_star)/v
def variational_expectations(self, Y, m, v, gh_points=None): def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):
lik_var = float(self.variance) lik_var = float(self.variance)
F = -0.5*np.log(2*np.pi) -0.5*np.log(lik_var) - 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/lik_var F = -0.5*np.log(2*np.pi) -0.5*np.log(lik_var) - 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/lik_var
dF_dmu = (Y - m)/lik_var dF_dmu = (Y - m)/lik_var
dF_dv = np.ones_like(v)*(-0.5/lik_var) dF_dv = np.ones_like(v)*(-0.5/lik_var)
dF_dlik_var = np.sum(-0.5/lik_var + 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/(lik_var**2)) dF_dtheta = -0.5/lik_var + 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/(lik_var**2)
dF_dtheta = [dF_dlik_var] return F, dF_dmu, dF_dv, dF_dtheta.reshape(1, Y.shape[0], Y.shape[1])
return F, dF_dmu, dF_dv, dF_dtheta

View file

@ -5,7 +5,7 @@ import numpy as np
from scipy import stats,special from scipy import stats,special
import scipy as sp import scipy as sp
from . import link_functions from . import link_functions
from ..util.misc import chain_1, chain_2, chain_3 from ..util.misc import chain_1, chain_2, chain_3, blockify_dhess_dtheta, blockify_third, blockify_hessian, safe_exp
from scipy.integrate import quad from scipy.integrate import quad
import warnings import warnings
from ..core.parameterization import Parameterized from ..core.parameterization import Parameterized
@ -39,6 +39,7 @@ class Likelihood(Parameterized):
assert isinstance(gp_link,link_functions.GPTransformation), "gp_link is not a valid GPTransformation." assert isinstance(gp_link,link_functions.GPTransformation), "gp_link is not a valid GPTransformation."
self.gp_link = gp_link self.gp_link = gp_link
self.log_concave = False self.log_concave = False
self.not_block_really = False
def _gradients(self,partial): def _gradients(self,partial):
return np.zeros(0) return np.zeros(0)
@ -177,7 +178,12 @@ class Likelihood(Parameterized):
if np.any(np.isnan(dF_dm)) or np.any(np.isinf(dF_dm)): if np.any(np.isnan(dF_dm)) or np.any(np.isinf(dF_dm)):
stop stop
dF_dtheta = None # Not yet implemented if self.size:
dF_dtheta = self.dlogpdf_dtheta(X, Y[:,None]) # Ntheta x (orig size) x N_{quad_points}
dF_dtheta = np.dot(dF_dtheta, gh_w)
dF_dtheta = dF_dtheta.reshape(self.size, shape[0], shape[1])
else:
dF_dtheta = None # Not yet implemented
return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), dF_dtheta return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), dF_dtheta
def predictive_mean(self, mu, variance, Y_metadata=None): def predictive_mean(self, mu, variance, Y_metadata=None):
@ -189,14 +195,21 @@ class Likelihood(Parameterized):
""" """
#conditional_mean: the edpected value of y given some f, under this likelihood #conditional_mean: the edpected value of y given some f, under this likelihood
fmin = -np.inf
fmax = np.inf
def int_mean(f,m,v): def int_mean(f,m,v):
p = np.exp(-(0.5/v)*np.square(f - m)) exponent = -(0.5/v)*np.square(f - m)
#If exponent is under -30 then exp(exponent) will be very small, so don't exp it!)
#If p is zero then conditional_mean will overflow #If p is zero then conditional_mean will overflow
assert v.all() > 0
p = safe_exp(exponent)
#If p is zero then conditional_variance will overflow
if p < 1e-10: if p < 1e-10:
return 0. return 0.
else: else:
return self.conditional_mean(f)*p return self.conditional_mean(f)*p
scaled_mean = [quad(int_mean, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] scaled_mean = [quad(int_mean, fmin, fmax,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)]
mean = np.array(scaled_mean)[:,None] / np.sqrt(2*np.pi*(variance)) mean = np.array(scaled_mean)[:,None] / np.sqrt(2*np.pi*(variance))
return mean return mean
@ -210,7 +223,7 @@ class Likelihood(Parameterized):
Approximation to the predictive variance: V(Y_star) Approximation to the predictive variance: V(Y_star)
The following variance decomposition is used: The following variance decomposition is used:
V(Y_star) = E( V(Y_star|f_star) ) + V( E(Y_star|f_star) ) V(Y_star) = E( V(Y_star|f_star)**2 ) + V( E(Y_star|f_star) )**2
:param mu: mean of posterior :param mu: mean of posterior
:param sigma: standard deviation of posterior :param sigma: standard deviation of posterior
@ -220,15 +233,22 @@ class Likelihood(Parameterized):
#sigma2 = sigma**2 #sigma2 = sigma**2
normalizer = np.sqrt(2*np.pi*variance) normalizer = np.sqrt(2*np.pi*variance)
fmin_v = -np.inf
fmin_m = np.inf
fmin = -np.inf
fmax = np.inf
from ..util.misc import safe_exp
# E( V(Y_star|f_star) ) # E( V(Y_star|f_star) )
def int_var(f,m,v): def int_var(f,m,v):
p = np.exp(-(0.5/v)*np.square(f - m)) exponent = -(0.5/v)*np.square(f - m)
p = safe_exp(exponent)
#If p is zero then conditional_variance will overflow #If p is zero then conditional_variance will overflow
if p < 1e-10: if p < 1e-10:
return 0. return 0.
else: else:
return self.conditional_variance(f)*p return self.conditional_variance(f)*p
scaled_exp_variance = [quad(int_var, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] scaled_exp_variance = [quad(int_var, fmin_v, fmax,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)]
exp_var = np.array(scaled_exp_variance)[:,None] / normalizer exp_var = np.array(scaled_exp_variance)[:,None] / normalizer
#V( E(Y_star|f_star) ) = E( E(Y_star|f_star)**2 ) - E( E(Y_star|f_star) )**2 #V( E(Y_star|f_star) ) = E( E(Y_star|f_star)**2 ) - E( E(Y_star|f_star) )**2
@ -240,14 +260,15 @@ class Likelihood(Parameterized):
#E( E(Y_star|f_star)**2 ) #E( E(Y_star|f_star)**2 )
def int_pred_mean_sq(f,m,v,predictive_mean_sq): def int_pred_mean_sq(f,m,v,predictive_mean_sq):
p = np.exp(-(0.5/v)*np.square(f - m)) exponent = -(0.5/v)*np.square(f - m)
p = np.exp(exponent)
#If p is zero then conditional_mean**2 will overflow #If p is zero then conditional_mean**2 will overflow
if p < 1e-10: if p < 1e-10:
return 0. return 0.
else: else:
return self.conditional_mean(f)**2*p return self.conditional_mean(f)**2*p
scaled_exp_exp2 = [quad(int_pred_mean_sq, -np.inf, np.inf,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)] scaled_exp_exp2 = [quad(int_pred_mean_sq, fmin_m, fmax,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)]
exp_exp2 = np.array(scaled_exp_exp2)[:,None] / normalizer exp_exp2 = np.array(scaled_exp_exp2)[:,None] / normalizer
var_exp = exp_exp2 - predictive_mean_sq var_exp = exp_exp2 - predictive_mean_sq
@ -295,8 +316,18 @@ class Likelihood(Parameterized):
:returns: likelihood evaluated for this point :returns: likelihood evaluated for this point
:rtype: float :rtype: float
""" """
inv_link_f = self.gp_link.transf(f) if isinstance(self.gp_link, link_functions.Identity):
return self.pdf_link(inv_link_f, y, Y_metadata=Y_metadata) return self.pdf_link(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
return self.pdf_link(inv_link_f, y, Y_metadata=Y_metadata)
def logpdf_sum(self, f, y, Y_metadata=None):
"""
Convenience function that can overridden for functions where this could
be computed more efficiently
"""
return np.sum(self.logpdf(f, y, Y_metadata=Y_metadata))
def logpdf(self, f, y, Y_metadata=None): def logpdf(self, f, y, Y_metadata=None):
""" """
@ -313,8 +344,11 @@ class Likelihood(Parameterized):
:returns: log likelihood evaluated for this point :returns: log likelihood evaluated for this point
:rtype: float :rtype: float
""" """
inv_link_f = self.gp_link.transf(f) if isinstance(self.gp_link, link_functions.Identity):
return self.logpdf_link(inv_link_f, y, Y_metadata=Y_metadata) return self.logpdf_link(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
return self.logpdf_link(inv_link_f, y, Y_metadata=Y_metadata)
def dlogpdf_df(self, f, y, Y_metadata=None): def dlogpdf_df(self, f, y, Y_metadata=None):
""" """
@ -332,11 +366,15 @@ class Likelihood(Parameterized):
:returns: derivative of log likelihood evaluated for this point :returns: derivative of log likelihood evaluated for this point
:rtype: 1xN array :rtype: 1xN array
""" """
inv_link_f = self.gp_link.transf(f) if isinstance(self.gp_link, link_functions.Identity):
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) return self.dlogpdf_dlink(f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f) else:
return chain_1(dlogpdf_dlink, dlink_df) inv_link_f = self.gp_link.transf(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f)
return chain_1(dlogpdf_dlink, dlink_df)
@blockify_hessian
def d2logpdf_df2(self, f, y, Y_metadata=None): def d2logpdf_df2(self, f, y, Y_metadata=None):
""" """
Evaluates the link function link(f) then computes the second derivative of log likelihood using it Evaluates the link function link(f) then computes the second derivative of log likelihood using it
@ -353,13 +391,18 @@ class Likelihood(Parameterized):
:returns: second derivative of log likelihood evaluated for this point (diagonal only) :returns: second derivative of log likelihood evaluated for this point (diagonal only)
:rtype: 1xN array :rtype: 1xN array
""" """
inv_link_f = self.gp_link.transf(f) if isinstance(self.gp_link, link_functions.Identity):
d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata) d2logpdf_df2 = self.d2logpdf_dlink2(f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f) else:
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) inv_link_f = self.gp_link.transf(f)
d2link_df2 = self.gp_link.d2transf_df2(f) d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata)
return chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2) dlink_df = self.gp_link.dtransf_df(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
d2link_df2 = self.gp_link.d2transf_df2(f)
d2logpdf_df2 = chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2)
return d2logpdf_df2
@blockify_third
def d3logpdf_df3(self, f, y, Y_metadata=None): def d3logpdf_df3(self, f, y, Y_metadata=None):
""" """
Evaluates the link function link(f) then computes the third derivative of log likelihood using it Evaluates the link function link(f) then computes the third derivative of log likelihood using it
@ -376,64 +419,96 @@ class Likelihood(Parameterized):
:returns: third derivative of log likelihood evaluated for this point :returns: third derivative of log likelihood evaluated for this point
:rtype: float :rtype: float
""" """
inv_link_f = self.gp_link.transf(f) if isinstance(self.gp_link, link_functions.Identity):
d3logpdf_dlink3 = self.d3logpdf_dlink3(inv_link_f, y, Y_metadata=Y_metadata) d3logpdf_df3 = self.d3logpdf_dlink3(f, y, Y_metadata=Y_metadata)
dlink_df = self.gp_link.dtransf_df(f) else:
d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata) inv_link_f = self.gp_link.transf(f)
d2link_df2 = self.gp_link.d2transf_df2(f) d3logpdf_dlink3 = self.d3logpdf_dlink3(inv_link_f, y, Y_metadata=Y_metadata)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) dlink_df = self.gp_link.dtransf_df(f)
d3link_df3 = self.gp_link.d3transf_df3(f) d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata)
return chain_3(d3logpdf_dlink3, dlink_df, d2logpdf_dlink2, d2link_df2, dlogpdf_dlink, d3link_df3) d2link_df2 = self.gp_link.d2transf_df2(f)
dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata)
d3link_df3 = self.gp_link.d3transf_df3(f)
d3logpdf_df3 = chain_3(d3logpdf_dlink3, dlink_df, d2logpdf_dlink2, d2link_df2, dlogpdf_dlink, d3link_df3)
return d3logpdf_df3
def dlogpdf_dtheta(self, f, y, Y_metadata=None): def dlogpdf_dtheta(self, f, y, Y_metadata=None):
""" """
TODO: Doc strings TODO: Doc strings
""" """
if self.size > 0: if self.size > 0:
inv_link_f = self.gp_link.transf(f) if self.not_block_really:
return self.dlogpdf_link_dtheta(inv_link_f, y, Y_metadata=Y_metadata) raise NotImplementedError("Need to make a decorator for this!")
if isinstance(self.gp_link, link_functions.Identity):
return self.dlogpdf_link_dtheta(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
return self.dlogpdf_link_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
else: else:
# There are no parameters so return an empty array for derivatives # There are no parameters so return an empty array for derivatives
return np.zeros([1, 0]) return np.zeros((0, f.shape[0], f.shape[1]))
def dlogpdf_df_dtheta(self, f, y, Y_metadata=None): def dlogpdf_df_dtheta(self, f, y, Y_metadata=None):
""" """
TODO: Doc strings TODO: Doc strings
""" """
if self.size > 0: if self.size > 0:
inv_link_f = self.gp_link.transf(f) if self.not_block_really:
dlink_df = self.gp_link.dtransf_df(f) raise NotImplementedError("Need to make a decorator for this!")
dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata) if isinstance(self.gp_link, link_functions.Identity):
return chain_1(dlogpdf_dlink_dtheta, dlink_df) return self.dlogpdf_dlink_dtheta(f, y, Y_metadata=Y_metadata)
else:
inv_link_f = self.gp_link.transf(f)
dlink_df = self.gp_link.dtransf_df(f)
dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
dlogpdf_df_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
#Chain each parameter of hte likelihood seperately
for p in range(self.size):
dlogpdf_df_dtheta[p, :, :] = chain_1(dlogpdf_dlink_dtheta[p,:,:], dlink_df)
return dlogpdf_df_dtheta
#return chain_1(dlogpdf_dlink_dtheta, dlink_df)
else: else:
# There are no parameters so return an empty array for derivatives # There are no parameters so return an empty array for derivatives
return np.zeros([f.shape[0], 0]) return np.zeros((0, f.shape[0], f.shape[1]))
def d2logpdf_df2_dtheta(self, f, y, Y_metadata=None): def d2logpdf_df2_dtheta(self, f, y, Y_metadata=None):
""" """
TODO: Doc strings TODO: Doc strings
""" """
if self.size > 0: if self.size > 0:
inv_link_f = self.gp_link.transf(f) if self.not_block_really:
dlink_df = self.gp_link.dtransf_df(f) raise NotImplementedError("Need to make a decorator for this!")
d2link_df2 = self.gp_link.d2transf_df2(f) if isinstance(self.gp_link, link_functions.Identity):
d2logpdf_dlink2_dtheta = self.d2logpdf_dlink2_dtheta(inv_link_f, y, Y_metadata=Y_metadata) return self.d2logpdf_dlink2_dtheta(f, y, Y_metadata=Y_metadata)
dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata) else:
return chain_2(d2logpdf_dlink2_dtheta, dlink_df, dlogpdf_dlink_dtheta, d2link_df2) inv_link_f = self.gp_link.transf(f)
dlink_df = self.gp_link.dtransf_df(f)
d2link_df2 = self.gp_link.d2transf_df2(f)
d2logpdf_dlink2_dtheta = self.d2logpdf_dlink2_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata)
d2logpdf_df2_dtheta = np.zeros((self.size, f.shape[0], f.shape[1]))
#Chain each parameter of hte likelihood seperately
for p in range(self.size):
d2logpdf_df2_dtheta[p, :, :] = chain_2(d2logpdf_dlink2_dtheta[p,:,:], dlink_df, dlogpdf_dlink_dtheta[p,:,:], d2link_df2)
return d2logpdf_df2_dtheta
#return chain_2(d2logpdf_dlink2_dtheta, dlink_df, dlogpdf_dlink_dtheta, d2link_df2)
else: else:
# There are no parameters so return an empty array for derivatives # There are no parameters so return an empty array for derivatives
return np.zeros([f.shape[0], 0]) return np.zeros((0, f.shape[0], f.shape[1]))
def _laplace_gradients(self, f, y, Y_metadata=None): def _laplace_gradients(self, f, y, Y_metadata=None):
dlogpdf_dtheta = self.dlogpdf_dtheta(f, y, Y_metadata=Y_metadata).sum(axis=0) dlogpdf_dtheta = self.dlogpdf_dtheta(f, y, Y_metadata=Y_metadata)
dlogpdf_df_dtheta = self.dlogpdf_df_dtheta(f, y, Y_metadata=Y_metadata) dlogpdf_df_dtheta = self.dlogpdf_df_dtheta(f, y, Y_metadata=Y_metadata)
d2logpdf_df2_dtheta = self.d2logpdf_df2_dtheta(f, y, Y_metadata=Y_metadata) d2logpdf_df2_dtheta = self.d2logpdf_df2_dtheta(f, y, Y_metadata=Y_metadata)
#Parameters are stacked vertically. Must be listed in same order as 'get_param_names' #Parameters are stacked vertically. Must be listed in same order as 'get_param_names'
# ensure we have gradients for every parameter we want to optimize # ensure we have gradients for every parameter we want to optimize
assert len(dlogpdf_dtheta) == self.size #1 x num_param array assert dlogpdf_dtheta.shape[0] == self.size #f, d x num_param array
assert dlogpdf_df_dtheta.shape[1] == self.size #f x num_param matrix assert dlogpdf_df_dtheta.shape[0] == self.size #f x d x num_param matrix or just f x num_param
assert d2logpdf_df2_dtheta.shape[1] == self.size #f x num_param matrix assert d2logpdf_df2_dtheta.shape[0] == self.size #f x num_param matrix or f x d x num_param matrix, f x f x num_param or f x f x d x num_param
return dlogpdf_dtheta, dlogpdf_df_dtheta, d2logpdf_df2_dtheta return dlogpdf_dtheta, dlogpdf_df_dtheta, d2logpdf_df2_dtheta
@ -454,19 +529,98 @@ class Likelihood(Parameterized):
def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None): def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None):
#compute the quantiles by sampling!!! #compute the quantiles by sampling!!!
N_samp = 1000 N_samp = 500
s = np.random.randn(mu.shape[0], N_samp)*np.sqrt(var) + mu s = np.random.randn(mu.shape[0], N_samp)*np.sqrt(var) + mu
#ss_f = s.flatten() #ss_f = s.flatten()
#ss_y = self.samples(ss_f, Y_metadata) #ss_y = self.samples(ss_f, Y_metadata)
#ss_y = self.samples(s, Y_metadata, samples=100)
ss_y = self.samples(s, Y_metadata) ss_y = self.samples(s, Y_metadata)
#ss_y = ss_y.reshape(mu.shape[0], N_samp) #ss_y = ss_y.reshape(mu.shape[0], N_samp)
return [np.percentile(ss_y ,q, axis=1)[:,None] for q in quantiles] return [np.percentile(ss_y ,q, axis=1)[:,None] for q in quantiles]
def samples(self, gp, Y_metadata=None): def samples(self, gp, Y_metadata=None, samples=1):
""" """
Returns a set of samples of observations based on a given value of the latent variable. Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable :param gp: latent variable
:param samples: number of samples to take for each f location
""" """
raise NotImplementedError raise NotImplementedError("""May be possible to use MCMC with user-tuning, see
MCMC_pdf_samples in likelihood.py and write samples function
using this, beware this is a simple implementation
of Metropolis and will not work well for all likelihoods""")
def MCMC_pdf_samples(self, fNew, num_samples=1000, starting_loc=None, stepsize=0.1, burn_in=1000, Y_metadata=None):
"""
Simple implementation of Metropolis sampling algorithm
Will run a parallel chain for each input dimension (treats each f independently)
Thus assumes f*_1 independant of f*_2 etc.
:param num_samples: Number of samples to take
:param fNew: f at which to sample around
:param starting_loc: Starting locations of the independant chains (usually will be conditional_mean of likelihood), often link_f
:param stepsize: Stepsize for the normal proposal distribution (will need modifying)
:param burnin: number of samples to use for burnin (will need modifying)
:param Y_metadata: Y_metadata for pdf
"""
print "Warning, using MCMC for sampling y*, needs to be tuned!"
if starting_loc is None:
starting_loc = fNew
from functools import partial
logpdf = partial(self.logpdf, f=fNew, Y_metadata=Y_metadata)
pdf = lambda y_star: np.exp(logpdf(y=y_star[:, None]))
#Should be the link function of f is a good starting point
#(i.e. the point before you corrupt it with the likelihood)
par_chains = starting_loc.shape[0]
chain_values = np.zeros((par_chains, num_samples))
chain_values[:, 0][:,None] = starting_loc
#Use same stepsize for all par_chains
stepsize = np.ones(par_chains)*stepsize
accepted = np.zeros((par_chains, num_samples+burn_in))
accept_ratio = np.zeros(num_samples+burn_in)
#Whilst burning in, only need to keep the previous lot
burnin_cache = np.zeros(par_chains)
burnin_cache[:] = starting_loc.flatten()
burning_in = True
for i in xrange(burn_in+num_samples):
next_ind = i-burn_in
if burning_in:
old_y = burnin_cache
else:
old_y = chain_values[:,next_ind-1]
old_lik = pdf(old_y)
#Propose new y from Gaussian proposal
new_y = np.random.normal(loc=old_y, scale=stepsize)
new_lik = pdf(new_y)
#Accept using Metropolis (not hastings) acceptance
#Always accepts if new_lik > old_lik
accept_probability = np.minimum(1, new_lik/old_lik)
u = np.random.uniform(0,1,par_chains)
#print "Accept prob: ", accept_probability
accepts = u < accept_probability
if burning_in:
burnin_cache[accepts] = new_y[accepts]
burnin_cache[~accepts] = old_y[~accepts]
if i == burn_in:
burning_in = False
chain_values[:,0] = burnin_cache
else:
#If it was accepted then new_y becomes the latest sample
chain_values[accepts, next_ind] = new_y[accepts]
#Otherwise use old y as the sample
chain_values[~accepts, next_ind] = old_y[~accepts]
accepted[~accepts, i] = 0
accepted[accepts, i] = 1
accept_ratio[i] = np.sum(accepted[:,i])/float(par_chains)
#Show progress
if i % int((burn_in+num_samples)*0.1) == 0:
print "{}% of samples taken ({})".format((i/int((burn_in+num_samples)*0.1)*10), i)
print "Last run accept ratio: ", accept_ratio[i]
print "Average accept ratio: ", np.mean(accept_ratio)
return chain_values

View file

@ -35,8 +35,8 @@ class StudentT(Likelihood):
self.log_concave = False self.log_concave = False
def parameters_changed(self): #def parameters_changed(self):
self.variance = (self.v / float(self.v - 2)) * self.sigma2 #self.variance = (self.v / float(self.v - 2)) * self.sigma2
def update_gradients(self, grads): def update_gradients(self, grads):
""" """
@ -180,7 +180,8 @@ class StudentT(Likelihood):
:rtype: float :rtype: float
""" """
e = y - inv_link_f e = y - inv_link_f
dlogpdf_dvar = self.v*(e**2 - self.sigma2)/(2*self.sigma2*(self.sigma2*self.v + e**2)) e2 = np.square(e)
dlogpdf_dvar = self.v*(e2 - self.sigma2)/(2*self.sigma2*(self.sigma2*self.v + e2))
return dlogpdf_dvar return dlogpdf_dvar
def dlogpdf_dlink_dvar(self, inv_link_f, y, Y_metadata=None): def dlogpdf_dlink_dvar(self, inv_link_f, y, Y_metadata=None):
@ -226,17 +227,18 @@ class StudentT(Likelihood):
def dlogpdf_link_dtheta(self, f, y, Y_metadata=None): def dlogpdf_link_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dvar = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata) dlogpdf_dvar = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata)
dlogpdf_dv = np.zeros_like(dlogpdf_dvar) #FIXME: Not done yet dlogpdf_dv = np.zeros_like(dlogpdf_dvar) #FIXME: Not done yet
return np.hstack((dlogpdf_dvar, dlogpdf_dv)) return np.array((dlogpdf_dvar, dlogpdf_dv))
def dlogpdf_dlink_dtheta(self, f, y, Y_metadata=None): def dlogpdf_dlink_dtheta(self, f, y, Y_metadata=None):
dlogpdf_dlink_dvar = self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata) dlogpdf_dlink_dvar = self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata)
dlogpdf_dlink_dv = np.zeros_like(dlogpdf_dlink_dvar) #FIXME: Not done yet dlogpdf_dlink_dv = np.zeros_like(dlogpdf_dlink_dvar) #FIXME: Not done yet
return np.hstack((dlogpdf_dlink_dvar, dlogpdf_dlink_dv)) return np.array((dlogpdf_dlink_dvar, dlogpdf_dlink_dv))
def d2logpdf_dlink2_dtheta(self, f, y, Y_metadata=None): def d2logpdf_dlink2_dtheta(self, f, y, Y_metadata=None):
d2logpdf_dlink2_dvar = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata) d2logpdf_dlink2_dvar = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata)
d2logpdf_dlink2_dv = np.zeros_like(d2logpdf_dlink2_dvar) #FIXME: Not done yet d2logpdf_dlink2_dv = np.zeros_like(d2logpdf_dlink2_dvar) #FIXME: Not done yet
return np.hstack((d2logpdf_dlink2_dvar, d2logpdf_dlink2_dv))
return np.array((d2logpdf_dlink2_dvar, d2logpdf_dlink2_dv))
def predictive_mean(self, mu, sigma, Y_metadata=None): def predictive_mean(self, mu, sigma, Y_metadata=None):
# The comment here confuses mean and median. # The comment here confuses mean and median.

View file

@ -17,45 +17,25 @@ class Additive(Mapping):
:type mapping1: GPy.mappings.Mapping :type mapping1: GPy.mappings.Mapping
:param mapping2: second mapping to add together. :param mapping2: second mapping to add together.
:type mapping2: GPy.mappings.Mapping :type mapping2: GPy.mappings.Mapping
:param tensor: whether or not to use the tensor product of input spaces
:type tensor: bool
""" """
def __init__(self, mapping1, mapping2, tensor=False): def __init__(self, mapping1, mapping2):
if tensor: assert(mapping1.input_dim==mapping2.input_dim)
input_dim = mapping1.input_dim + mapping2.input_dim
else:
input_dim = mapping1.input_dim
assert(mapping1.input_dim==mapping2.input_dim)
assert(mapping1.output_dim==mapping2.output_dim) assert(mapping1.output_dim==mapping2.output_dim)
output_dim = mapping1.output_dim input_dim, output_dim = mapping1.input_dim, mapping1.output_dim
Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim) Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim)
self.mapping1 = mapping1 self.mapping1 = mapping1
self.mapping2 = mapping2 self.mapping2 = mapping2
self.num_params = self.mapping1.num_params + self.mapping2.num_params self.num_params = self.mapping1.num_params + self.mapping2.num_params
self.name = self.mapping1.name + '+' + self.mapping2.name self.name = self.mapping1.name + '+' + self.mapping2.name
def _get_param_names(self):
return self.mapping1._get_param_names + self.mapping2._get_param_names
def _get_params(self):
return np.hstack((self.mapping1._get_params(), self.mapping2._get_params()))
def _set_params(self, x):
self.mapping1._set_params(x[:self.mapping1.num_params])
self.mapping2._set_params(x[self.mapping1.num_params:])
def randomize(self):
self.mapping1._randomize()
self.mapping2._randomize()
def f(self, X): def f(self, X):
return self.mapping1.f(X) + self.mapping2.f(X) return self.mapping1.f(X) + self.mapping2.f(X)
def df_dtheta(self, dL_df, X): def update_gradients(self, dL_dF, X):
self._df_dA = (dL_df[:, :, None]*self.kern.K(X, self.X)[:, None, :]).sum(0).T self.mapping1.update_gradients(dL_dF, X)
self._df_dbias = (dL_df.sum(0)) self.mapping2.update_gradients(dL_dF, X)
return np.hstack((self._df_dA.flatten(), self._df_dbias))
def df_dX(self, dL_df, X): def gradients_X(self, dL_dF, X):
return self.kern.dK_dX((dL_df[:, None, :]*self.A[None, :, :]).sum(2), X, self.X) return self.mapping1.gradients_X(dL_dF, X) + self.mapping2.gradients_X(dL_dF, X)

26
GPy/mappings/identity.py Normal file
View file

@ -0,0 +1,26 @@
# Copyright (c) 2015, James Hensman
from ..core.mapping import Mapping
from ..core import Param
class Identity(Mapping):
"""
A mapping that does nothing!
"""
def __init__(self, input_dim, output_dim, name='identity'):
Mapping.__init__(self, input_dim, output_dim, name)
def f(self, X):
return X
def update_gradients(self, dL_dF, X):
pass
def gradients_X(self, dL_dF, X):
return dL_dF

View file

@ -1,9 +1,10 @@
# Copyright (c) 2013, GPy authors (see AUTHORS.txt). # Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Copyright (c) 2015, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np import numpy as np
from ..core.mapping import Mapping from ..core.mapping import Mapping
import GPy from ..core import Param
class Kernel(Mapping): class Kernel(Mapping):
""" """
@ -11,50 +12,41 @@ class Kernel(Mapping):
.. math:: .. math::
f(\mathbf{x}*) = \mathbf{A}\mathbf{k}(\mathbf{X}, \mathbf{x}^*) + \mathbf{b} f(\mathbf{x}) = \sum_i \alpha_i k(\mathbf{z}_i, \mathbf{x})
:param X: input observations containing :math:`\mathbf{X}` or for multple outputs
:type X: ndarray
.. math::
f_i(\mathbf{x}) = \sum_j \alpha_{i,j} k(\mathbf{z}_i, \mathbf{x})
:param input_dim: dimension of input.
:type input_dim: int
:param output_dim: dimension of output. :param output_dim: dimension of output.
:type output_dim: int :type output_dim: int
:param Z: input observations containing :math:`\mathbf{Z}`
:type Z: ndarray
:param kernel: a GPy kernel, defaults to GPy.kern.RBF :param kernel: a GPy kernel, defaults to GPy.kern.RBF
:type kernel: GPy.kern.kern :type kernel: GPy.kern.kern
""" """
def __init__(self, X, output_dim=1, kernel=None): def __init__(self, input_dim, output_dim, Z, kernel, name='kernmap'):
Mapping.__init__(self, input_dim=X.shape[1], output_dim=output_dim) Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name)
if kernel is None:
kernel = GPy.kern.RBF(self.input_dim)
self.kern = kernel self.kern = kernel
self.X = X self.Z = Z
self.num_data = X.shape[0] self.num_bases, Zdim = X.shape
self.num_params = self.output_dim*(self.num_data + 1) assert Zdim == self.input_dim
self.A = np.array((self.num_data, self.output_dim)) self.A = GPy.core.Param('A', np.random.randn(self.num_bases, self.output_dim))
self.bias = np.array(self.output_dim) self.add_parameter(self.A)
self.randomize()
self.name = 'kernel'
def _get_param_names(self):
return sum([['A_%i_%i' % (n, d) for d in range(self.output_dim)] for n in range(self.num_data)], []) + ['bias_%i' % d for d in range(self.output_dim)]
def _get_params(self):
return np.hstack((self.A.flatten(), self.bias))
def _set_params(self, x):
self.A = x[:self.num_data * self.output_dim].reshape(self.num_data, self.output_dim).copy()
self.bias = x[self.num_data*self.output_dim:].copy()
def randomize(self):
self.A = np.random.randn(self.num_data, self.output_dim)/np.sqrt(self.num_data+1)
self.bias = np.random.randn(self.output_dim)/np.sqrt(self.num_data+1)
def f(self, X): def f(self, X):
return np.dot(self.kern.K(X, self.X),self.A) + self.bias return np.dot(self.kern.K(X, self.Z), self.A)
def df_dtheta(self, dL_df, X): def update_gradients(self, dL_dF, X):
self._df_dA = (dL_df[:, :, None]*self.kern.K(X, self.X)[:, None, :]).sum(0).T self.kern.update_gradients_full(np.dot(dL_dF, self.A.T))
self._df_dbias = (dL_df.sum(0)) self.A.gradient = np.dot( self.kern.K(self.Z, X), dL_dF)
return np.hstack((self._df_dA.flatten(), self._df_dbias))
def df_dX(self, dL_df, X): def gradients_X(self, dL_dF, X):
return self.kern.gradients_X((dL_df[:, None, :]*self.A[None, :, :]).sum(2), X, self.X) return self.kern.gradients_X(np.dot(dL_dF, self.A.T), X, self.Z)

View file

@ -1,43 +1,39 @@
# Copyright (c) 2013, 2014 GPy authors (see AUTHORS.txt). # Copyright (c) 2013, 2014 GPy authors (see AUTHORS.txt).
# Copyright (c) 2015, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np import numpy as np
from ..core.mapping import Bijective_mapping from ..core.mapping import Mapping
from ..core.parameterization import Param from ..core.parameterization import Param
class Linear(Bijective_mapping): class Linear(Mapping):
""" """
Mapping based on a linear model. A Linear mapping.
.. math:: .. math::
f(\mathbf{x}*) = \mathbf{W}\mathbf{x}^* + \mathbf{b} F(\mathbf{x}) = \mathbf{A} \mathbf{x})
:param X: input observations
:type X: ndarray :param input_dim: dimension of input.
:type input_dim: int
:param output_dim: dimension of output. :param output_dim: dimension of output.
:type output_dim: int :type output_dim: int
:param kernel: a GPy kernel, defaults to GPy.kern.RBF
:type kernel: GPy.kern.kern
""" """
def __init__(self, input_dim=1, output_dim=1, name='linear'): def __init__(self, input_dim, output_dim, name='linmap'):
Bijective_mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name) Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name)
self.W = Param('W',np.array((self.input_dim, self.output_dim))) self.A = GPy.core.Param('A', np.random.randn(self.input_dim, self.output_dim))
self.bias = Param('bias',np.array(self.output_dim)) self.add_parameter(self.A)
self.link_parameters(self.W, self.bias)
def f(self, X): def f(self, X):
return np.dot(X,self.W) + self.bias return np.dot(X, self.A)
def g(self, f): def update_gradients(self, dL_dF, X):
V = np.linalg.solve(np.dot(self.W.T, self.W), W.T) self.A.gradient = np.dot( X.T, dL_dF)
return np.dot(f-self.bias, V)
def df_dtheta(self, dL_df, X): def gradients_X(self, dL_dF, X):
df_dW = (dL_df[:, :, None]*X[:, None, :]).sum(0).T return np.dot(dL_dF, self.A.T)
df_dbias = (dL_df.sum(0))
return np.hstack((df_dW.flatten(), df_dbias))
def dL_dX(self, partial, X):
"""The gradient of L with respect to the inputs to the mapping, where L is a function that is dependent on the output of the mapping, f."""
return (partial[:, None, :]*self.W[None, :, :]).sum(2)

View file

@ -3,128 +3,40 @@
import numpy as np import numpy as np
from ..core.mapping import Mapping from ..core.mapping import Mapping
from ..core import Param
class MLP(Mapping): class MLP(Mapping):
""" """
Mapping based on a multi-layer perceptron neural network model. Mapping based on a multi-layer perceptron neural network model, with a single hidden layer
.. math::
f(\\mathbf{x}*) = \\mathbf{W}^0\\boldsymbol{\\phi}(\\mathbf{W}^1\\mathbf{x}+\\mathbf{b}^1)^* + \\mathbf{b}^0
where
.. math::
\\phi(\\cdot) = \\text{tanh}(\\cdot)
:param X: input observations
:type X: ndarray
:param output_dim: dimension of output.
:type output_dim: int
:param hidden_dim: dimension of hidden layer. If it is an int, there is one hidden layer of the given dimension. If it is a list of ints there are as manny hidden layers as the length of the list, each with the given number of hidden nodes in it.
:type hidden_dim: int or list of ints.
""" """
def __init__(self, input_dim=1, output_dim=1, hidden_dim=3): def __init__(self, input_dim=1, output_dim=1, hidden_dim=3, name='mlpmap'):
Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim) super(MLP).__init__(self, input_dim=input_dim, output_dim=output_dim, name=name)
self.name = 'mlp'
if isinstance(hidden_dim, int):
hidden_dim = [hidden_dim]
self.hidden_dim = hidden_dim self.hidden_dim = hidden_dim
self.activation = [None]*len(self.hidden_dim) self.W1 = Param('W1', np.random.randn(self.input_dim, self.hidden_dim))
self.W = [] self.b1 = Param('b1', np.random.randn(self.hidden_dim))
self._dL_dW = [] self.W2 = Param('W2', np.random.randn(self.hidden_dim, self.output_dim))
self.bias = [] self.b2 = Param('b2', np.random.randn(self.output_dim))
self._dL_dbias = []
self.W.append(np.zeros((self.input_dim, self.hidden_dim[0])))
self._dL_dW.append(np.zeros((self.input_dim, self.hidden_dim[0])))
self.bias.append(np.zeros(self.hidden_dim[0]))
self._dL_dbias.append(np.zeros(self.hidden_dim[0]))
self.num_params = self.hidden_dim[0]*(self.input_dim+1)
for h1, h0 in zip(hidden_dim[1:], hidden_dim[0:-1]):
self.W.append(np.zeros((h0, h1)))
self._dL_dW.append(np.zeros((h0, h1)))
self.bias.append(np.zeros(h1))
self._dL_dbias.append(np.zeros(h1))
self.num_params += h1*(h0+1)
self.W.append(np.zeros((self.hidden_dim[-1], self.output_dim)))
self._dL_dW.append(np.zeros((self.hidden_dim[-1], self.output_dim)))
self.bias.append(np.zeros(self.output_dim))
self._dL_dbias.append(np.zeros(self.output_dim))
self.num_params += self.output_dim*(self.hidden_dim[-1]+1)
self.randomize()
def _get_param_names(self):
return sum([['W%i_%i_%i' % (i, n, d) for n in range(self.W[i].shape[0]) for d in range(self.W[i].shape[1])] + ['bias%i_%i' % (i, d) for d in range(self.W[i].shape[1])] for i in range(len(self.W))], [])
def _get_params(self):
param = np.array([])
for W, bias in zip(self.W, self.bias):
param = np.hstack((param, W.flatten(), bias))
return param
def _set_params(self, x):
start = 0
for W, bias in zip(self.W, self.bias):
end = W.shape[0]*W.shape[1]+start
W[:] = x[start:end].reshape(W.shape[0], W.shape[1]).copy()
start = end
end = W.shape[1]+end
bias[:] = x[start:end].copy()
start = end
def randomize(self):
for W, bias in zip(self.W, self.bias):
W[:] = np.random.randn(W.shape[0], W.shape[1])/np.sqrt(W.shape[0]+1)
bias[:] = np.random.randn(W.shape[1])/np.sqrt(W.shape[0]+1)
def f(self, X): def f(self, X):
self._f_computations(X) N, D = X.shape
return np.dot(np.tanh(self.activation[-1]), self.W[-1]) + self.bias[-1] activations = np.tanh(np.dot(X,self.W1) + self.b1)
self.out = np.dot(self.activations,self.W2) + self.b2
return self.output_fn(self.out)
def _f_computations(self, X): def update_gradients(self, dL_dF, X):
W = self.W[0] activations = np.tanh(np.dot(X,self.W1) + self.b1)
bias = self.bias[0]
self.activation[0] = np.dot(X,W) + bias
for W, bias, index in zip(self.W[1:-1], self.bias[1:-1], range(1, len(self.activation))):
self.activation[index] = np.dot(np.tanh(self.activation[index-1]), W)+bias
def df_dtheta(self, dL_df, X):
self._df_computations(dL_df, X)
g = np.array([])
for gW, gbias in zip(self._dL_dW, self._dL_dbias):
g = np.hstack((g, gW.flatten(), gbias))
return g
def _df_computations(self, dL_df, X): #Evaluate second-layer gradients.
self._f_computations(X) self.W2.gradient = np.dot(activations.T, dL_dF)
a0 = self.activation[-1] self.b2.gradient = np.sum(dL_dF, 0)
W = self.W[-1]
self._dL_dW[-1] = (dL_df[:, :, None]*np.tanh(a0[:, None, :])).sum(0).T # Backpropagation to hidden layer.
dL_dta=(dL_df[:, None, :]*W[None, :, :]).sum(2) delta_hid = np.dot(dL_dF, self.W2.T) * (1.0 - activations**2)
self._dL_dbias[-1] = (dL_df.sum(0))
for dL_dW, dL_dbias, W, bias, a0, a1 in zip(self._dL_dW[-2:0:-1], # Finally, evaluate the first-layer gradients.
self._dL_dbias[-2:0:-1], self.W1.gradients = np.dot(X.T,delta_hid)
self.W[-2:0:-1], self.b1.gradients = np.sum(delta_hid, 0)
self.bias[-2:0:-1],
self.activation[-2::-1],
self.activation[-1:0:-1]):
ta = np.tanh(a1)
dL_da = dL_dta*(1-ta*ta)
dL_dW[:] = (dL_da[:, :, None]*np.tanh(a0[:, None, :])).sum(0).T
dL_dbias[:] = (dL_da.sum(0))
dL_dta = (dL_da[:, None, :]*W[None, :, :]).sum(2)
ta = np.tanh(self.activation[0])
dL_da = dL_dta*(1-ta*ta)
W = self.W[0]
self._dL_dW[0] = (dL_da[:, :, None]*X[:, None, :]).sum(0).T
self._dL_dbias[0] = (dL_da.sum(0))
self._dL_dX = (dL_da[:, None, :]*W[None, :, :]).sum(2)
def df_dX(self, dL_df, X):
self._df_computations(dL_df, X)
return self._dL_dX

View file

@ -0,0 +1,94 @@
from GPy.core.mapping import Mapping
from GPy.core import Param
import numpy as np
class PiecewiseLinear(Mapping):
"""
A piecewise-linear mapping.
The parameters of this mapping are the positions and values of the function where it is broken (self.breaks, self.values).
Outside the range of the breaks, the function is assumed to have gradient 1
"""
def __init__(self, input_dim, output_dim, values, breaks, name='piecewise_linear'):
assert input_dim==1
assert output_dim==1
Mapping.__init__(self, input_dim, output_dim, name)
values, breaks = np.array(values).flatten(), np.array(breaks).flatten()
assert values.size == breaks.size
self.values = Param('values', values)
self.breaks = Param('breaks', breaks)
self.link_parameter(self.values)
self.link_parameter(self.breaks)
def parameters_changed(self):
self.order = np.argsort(self.breaks)*1
self.reverse_order = np.zeros_like(self.order)
self.reverse_order[self.order] = np.arange(self.order.size)
self.sorted_breaks = self.breaks[self.order]
self.sorted_values = self.values[self.order]
self.grads = np.diff(self.sorted_values)/np.diff(self.sorted_breaks)
def f(self, X):
x = X.flatten()
y = x.copy()
#first adjus the points below the first value
y[x<self.sorted_breaks[0]] = x[x<self.sorted_breaks[0]] + self.sorted_values[0] - self.sorted_breaks[0]
#now all the points pas the last break
y[x>self.sorted_breaks[-1]] = x[x>self.sorted_breaks[-1]] + self.sorted_values[-1] - self.sorted_breaks[-1]
#loop throught the pairs of points
for low, up, g, v in zip(self. sorted_breaks[:-1], self.sorted_breaks[1:], self.grads, self.sorted_values[:-1]):
i = np.logical_and(x>low, x<up)
y[i] = v + (x[i]-low)*g
return y.reshape(-1,1)
def update_gradients(self, dL_dF, X):
x = X.flatten()
dL_dF = dL_dF.flatten()
dL_db = np.zeros(self.sorted_breaks.size)
dL_dv = np.zeros(self.sorted_values.size)
#loop across each interval, computing the gradient for each of the 4 parameters that define it
for i, (low, up, g, v) in enumerate(zip(self. sorted_breaks[:-1], self.sorted_breaks[1:], self.grads, self.sorted_values[:-1])):
index = np.logical_and(x>low, x<up)
xx = x[index]
grad = dL_dF[index]
span = up-low
dL_dv[i] += np.sum(grad*( (low - xx)/span + 1))
dL_dv[i+1] += np.sum(grad*(xx-low)/span)
dL_db[i] += np.sum(grad*g*(xx-up)/span)
dL_db[i+1] += np.sum(grad*g*(low-xx)/span)
#now the end parts
dL_db[0] -= np.sum(dL_dF[x<self.sorted_breaks[0]])
dL_db[-1] -= np.sum(dL_dF[x>self.sorted_breaks[-1]])
dL_dv[0] += np.sum(dL_dF[x<self.sorted_breaks[0]])
dL_dv[-1] += np.sum(dL_dF[x>self.sorted_breaks[-1]])
#now put the gradients back in the correct order!
self.breaks.gradient = dL_db[self.reverse_order]
self.values.gradient = dL_dv[self.reverse_order]
def gradients_X(self, dL_dF, X):
x = X.flatten()
#outside the range of the breakpoints, the function is just offset by a contant, so the partial derivative is 1.
dL_dX = dL_dF.copy().flatten()
#insude the breakpoints, the partial derivative is self.grads
for low, up, g, v in zip(self. sorted_breaks[:-1], self.sorted_breaks[1:], self.grads, self.sorted_values[:-1]):
i = np.logical_and(x>low, x<up)
dL_dX[i] = dL_dF[i]*g
return dL_dX.reshape(-1,1)

View file

@ -5,6 +5,30 @@ import unittest
import numpy as np import numpy as np
import GPy import GPy
class MappingGradChecker(GPy.core.Model):
"""
This class has everything we need to check the gradient of a mapping. It
implement a simple likelihood which is the sum of the outputs of the
mapping. the gradients are checked against the parameters of the mapping
and the input.
"""
def __init__(self, mapping, X, name):
super(MappingChecker).__init__(self, name)
self.mapping = mapping
self.add_parameter(self.mapping)
self.X = GPy.core.Param('X',X)
self.add_parameter(self.X)
self.dL_dY = np.ones((self.X.shape[0]. self.mapping.output_dim))
def log_likelihood(self):
return np.sum(self.mapping.f(X))
def parameters_changed(self):
self.X.gradient = self.mapping.gradients_X(self.dL_dY, self.X)
self.mapping.update_gradients(self.dL_dY, self.X)
class MappingTests(unittest.TestCase): class MappingTests(unittest.TestCase):

View file

@ -15,4 +15,4 @@ import netpbmfile
import inference_plots import inference_plots
import maps import maps
import img_plots import img_plots
from ssgplvm import SSGPLVM_plot from ssgplvm import SSGPLVM_plot

View file

@ -11,39 +11,38 @@ import GPy
class InferenceXTestCase(unittest.TestCase): class InferenceXTestCase(unittest.TestCase):
def genData(self): def genData(self):
D1,D2,N = 12,12,50 D1,D2,N = 12,12,50
np.random.seed(1234)
x = np.linspace(0, 4 * np.pi, N)[:, None] x = np.linspace(0, 4 * np.pi, N)[:, None]
s1 = np.vectorize(lambda x: np.sin(x)) s1 = np.vectorize(lambda x: np.sin(x))
s2 = np.vectorize(lambda x: np.cos(x)**2) s2 = np.vectorize(lambda x: np.cos(x)**2)
s3 = np.vectorize(lambda x:-np.exp(-np.cos(2 * x))) s3 = np.vectorize(lambda x:-np.exp(-np.cos(2 * x)))
sS = np.vectorize(lambda x: np.cos(x)) sS = np.vectorize(lambda x: np.cos(x))
s1 = s1(x) s1 = s1(x)
s2 = s2(x) s2 = s2(x)
s3 = s3(x) s3 = s3(x)
sS = sS(x) sS = sS(x)
s1 -= s1.mean(); s1 /= s1.std(0) s1 -= s1.mean(); s1 /= s1.std(0)
s2 -= s2.mean(); s2 /= s2.std(0) s2 -= s2.mean(); s2 /= s2.std(0)
s3 -= s3.mean(); s3 /= s3.std(0) s3 -= s3.mean(); s3 /= s3.std(0)
sS -= sS.mean(); sS /= sS.std(0) sS -= sS.mean(); sS /= sS.std(0)
S1 = np.hstack([s1, sS]) S1 = np.hstack([s1, sS])
S2 = np.hstack([s3, sS]) S2 = np.hstack([s3, sS])
P1 = np.random.randn(S1.shape[1], D1) P1 = np.random.randn(S1.shape[1], D1)
P2 = np.random.randn(S2.shape[1], D2) P2 = np.random.randn(S2.shape[1], D2)
Y1 = S1.dot(P1) Y1 = S1.dot(P1)
Y2 = S2.dot(P2) Y2 = S2.dot(P2)
Y1 += .01 * np.random.randn(*Y1.shape) Y1 += .01 * np.random.randn(*Y1.shape)
Y2 += .01 * np.random.randn(*Y2.shape) Y2 += .01 * np.random.randn(*Y2.shape)
Y1 -= Y1.mean(0) Y1 -= Y1.mean(0)
Y2 -= Y2.mean(0) Y2 -= Y2.mean(0)
Y1 /= Y1.std(0) Y1 /= Y1.std(0)
@ -52,33 +51,34 @@ class InferenceXTestCase(unittest.TestCase):
slist = [s1, s2, s3, sS] slist = [s1, s2, s3, sS]
slist_names = ["s1", "s2", "s3", "sS"] slist_names = ["s1", "s2", "s3", "sS"]
Ylist = [Y1, Y2] Ylist = [Y1, Y2]
return Ylist return Ylist
def test_inferenceX_BGPLVM(self): def test_inferenceX_BGPLVM(self):
Ys = self.genData() Ys = self.genData()
m = GPy.models.BayesianGPLVM(Ys[0],5,kernel=GPy.kern.Linear(5,ARD=True)) m = GPy.models.BayesianGPLVM(Ys[0],5,kernel=GPy.kern.Linear(5,ARD=True))
x,mi = m.infer_newX(m.Y, optimize=False) x,mi = m.infer_newX(m.Y, optimize=False)
self.assertTrue(mi.checkgrad()) self.assertTrue(mi.checkgrad())
m.optimize(max_iters=10000)
x,mi = m.infer_newX(m.Y)
self.assertTrue(np.allclose(m.X.mean, mi.X.mean)) m.optimize(max_iters=10000)
self.assertTrue(np.allclose(m.X.variance, mi.X.variance)) x, mi = m.infer_newX(m.Y)
print m.X.mean - mi.X.mean
self.assertTrue(np.allclose(m.X.mean, mi.X.mean, rtol=1e-4, atol=1e-4))
self.assertTrue(np.allclose(m.X.variance, mi.X.variance, rtol=1e-4, atol=1e-4))
def test_inferenceX_GPLVM(self): def test_inferenceX_GPLVM(self):
Ys = self.genData() Ys = self.genData()
m = GPy.models.GPLVM(Ys[0],3,kernel=GPy.kern.RBF(3,ARD=True)) m = GPy.models.GPLVM(Ys[0],3,kernel=GPy.kern.RBF(3,ARD=True))
x,mi = m.infer_newX(m.Y, optimize=False) x,mi = m.infer_newX(m.Y, optimize=False)
self.assertTrue(mi.checkgrad()) self.assertTrue(mi.checkgrad())
# m.optimize(max_iters=10000) # m.optimize(max_iters=10000)
# x,mi = m.infer_newX(m.Y) # x,mi = m.infer_newX(m.Y)
# self.assertTrue(np.allclose(m.X, x)) # self.assertTrue(np.allclose(m.X, x))
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View file

@ -10,7 +10,7 @@ from GPy.likelihoods import link_functions
from GPy.core.parameterization import Param from GPy.core.parameterization import Param
from functools import partial from functools import partial
#np.random.seed(300) #np.random.seed(300)
#np.random.seed(7) #np.random.seed(4)
#np.seterr(divide='raise') #np.seterr(divide='raise')
def dparam_partial(inst_func, *args): def dparam_partial(inst_func, *args):
@ -29,7 +29,7 @@ def dparam_partial(inst_func, *args):
def param_func(param_val, param_name, inst_func, args): def param_func(param_val, param_name, inst_func, args):
#inst_func.im_self._set_params(param) #inst_func.im_self._set_params(param)
#inst_func.im_self.add_parameter(Param(param_name, param_val)) #inst_func.im_self.add_parameter(Param(param_name, param_val))
inst_func.__self__[param_name] = param_val inst_func.im_self[param_name] = param_val
return inst_func(*args) return inst_func(*args)
return functools.partial(param_func, inst_func=inst_func, args=args) return functools.partial(param_func, inst_func=inst_func, args=args)
@ -44,38 +44,51 @@ def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None,
The number of parameters and N is the number of data The number of parameters and N is the number of data
Need to take a slice out from f and a slice out of df Need to take a slice out from f and a slice out of df
""" """
print("\n{} likelihood: {} vs {}".format(func.__self__.__class__.__name__, print "\n{} likelihood: {} vs {}".format(func.im_self.__class__.__name__,
func.__name__, dfunc.__name__)) func.__name__, dfunc.__name__)
partial_f = dparam_partial(func, *args) partial_f = dparam_partial(func, *args)
partial_df = dparam_partial(dfunc, *args) partial_df = dparam_partial(dfunc, *args)
gradchecking = True gradchecking = True
zipped_params = zip(params, params_names) zipped_params = zip(params, params_names)
for param_ind, (param_val, param_name) in enumerate(zipped_params): for param_ind, (param_val, param_name) in enumerate(zipped_params):
#Check one parameter at a time, make sure it is 2d (as some gradients only return arrays) then strip out the parameter #Check one parameter at a time, make sure it is 2d (as some gradients only return arrays) then strip out the parameter
fnum = np.atleast_2d(partial_f(param_val, param_name))[:, param_ind].shape[0] f_ = partial_f(param_val, param_name)
dfnum = np.atleast_2d(partial_df(param_val, param_name))[:, param_ind].shape[0] df_ = partial_df(param_val, param_name)
#Reshape it such that we have a 3d matrix incase, that is we want it (?, N, D) regardless of whether ? is num_params or not
f_ = f_.reshape(-1, f_.shape[0], f_.shape[1])
df_ = df_.reshape(-1, f_.shape[0], f_.shape[1])
#Get the number of f and number of dimensions
fnum = f_.shape[-2]
fdim = f_.shape[-1]
dfnum = df_.shape[-2]
for fixed_val in range(dfnum): for fixed_val in range(dfnum):
#dlik and dlik_dvar gives back 1 value for each #dlik and dlik_dvar gives back 1 value for each
f_ind = min(fnum, fixed_val+1) - 1 f_ind = min(fnum, fixed_val+1) - 1
print("fnum: {} dfnum: {} f_ind: {} fixed_val: {}".format(fnum, dfnum, f_ind, fixed_val)) print "fnum: {} dfnum: {} f_ind: {} fixed_val: {}".format(fnum, dfnum, f_ind, fixed_val)
#Make grad checker with this param moving, note that set_params is NOT being called #Make grad checker with this param moving, note that set_params is NOT being called
#The parameter is being set directly with __setattr__ #The parameter is being set directly with __setattr__
#Check only the parameter and function value we wish to check at a time #Check only the parameter and function value we wish to check at a time
grad = GradientChecker(lambda p_val: np.atleast_2d(partial_f(p_val, param_name))[f_ind, param_ind], #func = lambda p_val, fnum, fdim, param_ind, f_ind, param_ind: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :]
lambda p_val: np.atleast_2d(partial_df(p_val, param_name))[fixed_val, param_ind], #dfunc_dparam = lambda d_val, fnum, fdim, param_ind, fixed_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :]
param_val, [param_name])
#First we reshape the output such that it is (num_params, N, D) then we pull out the relavent parameter-findex and checkgrad just this index at a time
func = lambda p_val: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :]
dfunc_dparam = lambda d_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :]
grad = GradientChecker(func, dfunc_dparam, param_val, [param_name])
if constraints is not None: if constraints is not None:
for constrain_param, constraint in constraints: for constrain_param, constraint in constraints:
if grad.grep_param_names(constrain_param): if grad.grep_param_names(constrain_param):
constraint(constrain_param, grad) constraint(constrain_param, grad)
else: else:
print("parameter didn't exist") print "parameter didn't exist"
print(constrain_param, " ", constraint) print constrain_param, " ", constraint
if randomize: if randomize:
grad.randomize() grad.randomize()
if verbose: if verbose:
print(grad) print grad
grad.checkgrad(verbose=1) grad.checkgrad(verbose=1)
if not grad.checkgrad(verbose=True): if not grad.checkgrad(verbose=True):
gradchecking = False gradchecking = False
@ -104,37 +117,9 @@ class TestNoiseModels(object):
self.var = 0.2 self.var = 0.2
self.var = np.random.rand(1)
#Make a bigger step as lower bound can be quite curved #Make a bigger step as lower bound can be quite curved
self.step = 1e-4 self.step = 1e-4
def tearDown(self):
self.Y = None
self.f = None
self.X = None
def test_scale2_models(self):
self.setUp()
####################################################
# Constraint wrappers so we can just list them off #
####################################################
def constrain_fixed(regex, model):
model[regex].constrain_fixed()
def constrain_negative(regex, model):
model[regex].constrain_negative()
def constrain_positive(regex, model):
model[regex].constrain_positive()
def constrain_bounded(regex, model, lower, upper):
"""
Used like: partial(constrain_bounded, lower=0, upper=1)
"""
model[regex].constrain_bounded(lower, upper)
""" """
Dictionary where we nest models we would like to check Dictionary where we nest models we would like to check
Name: { Name: {
@ -149,136 +134,170 @@ class TestNoiseModels(object):
"link_f_constraints": [constraint_wrappers, listed_here] "link_f_constraints": [constraint_wrappers, listed_here]
} }
""" """
noise_models = {"Student_t_default": { self.noise_models = {"Student_t_default": {
"model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var),
"grad_params": { "grad_params": {
"names": [".*t_scale2"], "names": [".*t_scale2"],
"vals": [self.var], "vals": [self.var],
"constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
#"constraints": [("t_scale2", constrain_positive), ("deg_free", partial(constrain_fixed, value=5))] },
}, "laplace": True
"laplace": True },
}, "Student_t_1_var": {
"Student_t_1_var": { "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var),
"model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), "grad_params": {
"grad_params": { "names": [".*t_scale2"],
"names": [".*t_scale2"], "vals": [1.0],
"vals": [1.0], "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
"constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] },
}, "laplace": True
"laplace": True },
}, "Student_t_small_deg_free": {
"Student_t_small_deg_free": { "model": GPy.likelihoods.StudentT(deg_free=1.5, sigma2=self.var),
"model": GPy.likelihoods.StudentT(deg_free=1.5, sigma2=self.var), "grad_params": {
"grad_params": { "names": [".*t_scale2"],
"names": [".*t_scale2"], "vals": [self.var],
"vals": [self.var], "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
"constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] },
}, "laplace": True
"laplace": True },
}, "Student_t_small_var": {
"Student_t_small_var": { "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var),
"model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), "grad_params": {
"grad_params": { "names": [".*t_scale2"],
"names": [".*t_scale2"], "vals": [0.001],
"vals": [0.001], "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
"constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] },
}, "laplace": True
"laplace": True },
}, "Student_t_large_var": {
"Student_t_large_var": { "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var),
"model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), "grad_params": {
"grad_params": { "names": [".*t_scale2"],
"names": [".*t_scale2"], "vals": [10.0],
"vals": [10.0], "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
"constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] },
}, "laplace": True
"laplace": True },
}, "Student_t_approx_gauss": {
"Student_t_approx_gauss": { "model": GPy.likelihoods.StudentT(deg_free=1000, sigma2=self.var),
"model": GPy.likelihoods.StudentT(deg_free=1000, sigma2=self.var), "grad_params": {
"grad_params": { "names": [".*t_scale2"],
"names": [".*t_scale2"], "vals": [self.var],
"vals": [self.var], "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
"constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] },
}, "laplace": True
"laplace": True },
}, #"Student_t_log": {
"Student_t_log": { #"model": GPy.likelihoods.StudentT(gp_link=link_functions.Log(), deg_free=5, sigma2=self.var),
"model": GPy.likelihoods.StudentT(gp_link=link_functions.Log(), deg_free=5, sigma2=self.var), #"grad_params": {
"grad_params": { #"names": [".*t_noise"],
"names": [".*t_scale2"], #"vals": [self.var],
"vals": [self.var], #"constraints": [(".*t_noise", self.constrain_positive), (".*deg_free", self.constrain_fixed)]
"constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] #},
}, #"laplace": True
"laplace": True #},
}, "Gaussian_default": {
"Gaussian_default": { "model": GPy.likelihoods.Gaussian(variance=self.var),
"model": GPy.likelihoods.Gaussian(variance=self.var), "grad_params": {
"grad_params": { "names": [".*variance"],
"names": [".*variance"], "vals": [self.var],
"vals": [self.var], "constraints": [(".*variance", self.constrain_positive)]
"constraints": [(".*variance", constrain_positive)] },
}, "laplace": True,
"laplace": True, "ep": False # FIXME: Should be True when we have it working again
"ep": False # FIXME: Should be True when we have it working again },
}, "Gaussian_log": {
#"Gaussian_log": { "model": GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var),
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Log(), variance=self.var, D=self.D, N=self.N), "grad_params": {
#"grad_params": { "names": [".*variance"],
#"names": ["noise_model_variance"], "vals": [self.var],
#"vals": [self.var], "constraints": [(".*variance", self.constrain_positive)]
#"constraints": [constrain_positive] },
#}, "laplace": True
#"laplace": True },
#}, #"Gaussian_probit": {
#"Gaussian_probit": { #"model": GPy.likelihoods.gaussian(gp_link=link_functions.Probit(), variance=self.var, D=self.D, N=self.N),
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Probit(), variance=self.var, D=self.D, N=self.N), #"grad_params": {
#"grad_params": { #"names": ["noise_model_variance"],
#"names": ["noise_model_variance"], #"vals": [self.var],
#"vals": [self.var], #"constraints": [constrain_positive]
#"constraints": [constrain_positive] #},
#}, #"laplace": True
#"laplace": True #},
#}, #"Gaussian_log_ex": {
#"Gaussian_log_ex": { #"model": GPy.likelihoods.gaussian(gp_link=link_functions.Log_ex_1(), variance=self.var, D=self.D, N=self.N),
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Log_ex_1(), variance=self.var, D=self.D, N=self.N), #"grad_params": {
#"grad_params": { #"names": ["noise_model_variance"],
#"names": ["noise_model_variance"], #"vals": [self.var],
#"vals": [self.var], #"constraints": [constrain_positive]
#"constraints": [constrain_positive] #},
#}, #"laplace": True
#"laplace": True #},
#}, "Bernoulli_default": {
"Bernoulli_default": { "model": GPy.likelihoods.Bernoulli(),
"model": GPy.likelihoods.Bernoulli(), "link_f_constraints": [partial(self.constrain_bounded, lower=0, upper=1)],
"link_f_constraints": [partial(constrain_bounded, lower=0, upper=1)], "laplace": True,
"laplace": True, "Y": self.binary_Y,
"Y": self.binary_Y, "ep": False # FIXME: Should be True when we have it working again
"ep": False # FIXME: Should be True when we have it working again },
}, "Exponential_default": {
"Exponential_default": { "model": GPy.likelihoods.Exponential(),
"model": GPy.likelihoods.Exponential(), "link_f_constraints": [self.constrain_positive],
"link_f_constraints": [constrain_positive], "Y": self.positive_Y,
"Y": self.positive_Y, "laplace": True,
"laplace": True, },
}, "Poisson_default": {
"Poisson_default": { "model": GPy.likelihoods.Poisson(),
"model": GPy.likelihoods.Poisson(), "link_f_constraints": [self.constrain_positive],
"link_f_constraints": [constrain_positive], "Y": self.integer_Y,
"Y": self.integer_Y, "laplace": True,
"laplace": True, "ep": False #Should work though...
"ep": False #Should work though... },
}#, #,
#GAMMA needs some work!"Gamma_default": { #GAMMA needs some work!"Gamma_default": {
#"model": GPy.likelihoods.Gamma(), #"model": GPy.likelihoods.Gamma(),
#"link_f_constraints": [constrain_positive], #"link_f_constraints": [constrain_positive],
#"Y": self.positive_Y, #"Y": self.positive_Y,
#"laplace": True #"laplace": True
#} #}
} }
for name, attributes in noise_models.items():
####################################################
# Constraint wrappers so we can just list them off #
####################################################
def constrain_fixed(self, regex, model):
model[regex].constrain_fixed()
def constrain_negative(self, regex, model):
model[regex].constrain_negative()
def constrain_positive(self, regex, model):
model[regex].constrain_positive()
def constrain_fixed_below(self, regex, model, up_to):
model[regex][0:up_to].constrain_fixed()
def constrain_fixed_above(self, regex, model, above):
model[regex][above:].constrain_fixed()
def constrain_bounded(self, regex, model, lower, upper):
"""
Used like: partial(constrain_bounded, lower=0, upper=1)
"""
model[regex].constrain_bounded(lower, upper)
def tearDown(self):
self.Y = None
self.f = None
self.X = None
def test_scale2_models(self):
self.setUp()
for name, attributes in self.noise_models.iteritems():
model = attributes["model"] model = attributes["model"]
if "grad_params" in attributes: if "grad_params" in attributes:
params = attributes["grad_params"] params = attributes["grad_params"]
@ -290,7 +309,7 @@ class TestNoiseModels(object):
param_vals = [] param_vals = []
param_names = [] param_names = []
constrain_positive = [] constrain_positive = []
param_constraints = [] # ??? TODO: Saul to Fix. param_constraints = []
if "link_f_constraints" in attributes: if "link_f_constraints" in attributes:
link_f_constraints = attributes["link_f_constraints"] link_f_constraints = attributes["link_f_constraints"]
else: else:
@ -303,6 +322,10 @@ class TestNoiseModels(object):
f = attributes["f"].copy() f = attributes["f"].copy()
else: else:
f = self.f.copy() f = self.f.copy()
if "Y_metadata" in attributes:
Y_metadata = attributes["Y_metadata"].copy()
else:
Y_metadata = None
if "laplace" in attributes: if "laplace" in attributes:
laplace = attributes["laplace"] laplace = attributes["laplace"]
else: else:
@ -317,30 +340,30 @@ class TestNoiseModels(object):
#Required by all #Required by all
#Normal derivatives #Normal derivatives
yield self.t_logpdf, model, Y, f yield self.t_logpdf, model, Y, f, Y_metadata
yield self.t_dlogpdf_df, model, Y, f yield self.t_dlogpdf_df, model, Y, f, Y_metadata
yield self.t_d2logpdf_df2, model, Y, f yield self.t_d2logpdf_df2, model, Y, f, Y_metadata
#Link derivatives #Link derivatives
yield self.t_dlogpdf_dlink, model, Y, f, link_f_constraints yield self.t_dlogpdf_dlink, model, Y, f, Y_metadata, link_f_constraints
yield self.t_d2logpdf_dlink2, model, Y, f, link_f_constraints yield self.t_d2logpdf_dlink2, model, Y, f, Y_metadata, link_f_constraints
if laplace: if laplace:
#Laplace only derivatives #Laplace only derivatives
yield self.t_d3logpdf_df3, model, Y, f yield self.t_d3logpdf_df3, model, Y, f, Y_metadata
yield self.t_d3logpdf_dlink3, model, Y, f, link_f_constraints yield self.t_d3logpdf_dlink3, model, Y, f, Y_metadata, link_f_constraints
#Params #Params
yield self.t_dlogpdf_dparams, model, Y, f, param_vals, param_names, param_constraints yield self.t_dlogpdf_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_dlogpdf_df_dparams, model, Y, f, param_vals, param_names, param_constraints yield self.t_dlogpdf_df_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_d2logpdf2_df2_dparams, model, Y, f, param_vals, param_names, param_constraints yield self.t_d2logpdf2_df2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
#Link params #Link params
yield self.t_dlogpdf_link_dparams, model, Y, f, param_vals, param_names, param_constraints yield self.t_dlogpdf_link_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_dlogpdf_dlink_dparams, model, Y, f, param_vals, param_names, param_constraints yield self.t_dlogpdf_dlink_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
yield self.t_d2logpdf2_dlink2_dparams, model, Y, f, param_vals, param_names, param_constraints yield self.t_d2logpdf2_dlink2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints
#laplace likelihood gradcheck #laplace likelihood gradcheck
yield self.t_laplace_fit_rbf_white, model, self.X, Y, f, self.step, param_vals, param_names, param_constraints yield self.t_laplace_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints
if ep: if ep:
#ep likelihood gradcheck #ep likelihood gradcheck
yield self.t_ep_fit_rbf_white, model, self.X, Y, f, self.step, param_vals, param_names, param_constraints yield self.t_ep_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints
self.tearDown() self.tearDown()
@ -349,76 +372,76 @@ class TestNoiseModels(object):
# dpdf_df's # # dpdf_df's #
############# #############
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_logpdf(self, model, Y, f): def t_logpdf(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
print(model) print model
#print model._get_params() #print model._get_params()
np.testing.assert_almost_equal( np.testing.assert_almost_equal(
model.pdf(f.copy(), Y.copy()).prod(), model.pdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).prod(),
np.exp(model.logpdf(f.copy(), Y.copy()).sum()) np.exp(model.logpdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).sum())
) )
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_dlogpdf_df(self, model, Y, f): def t_dlogpdf_df(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
self.description = "\n{}".format(inspect.stack()[0][3]) self.description = "\n{}".format(inspect.stack()[0][3])
logpdf = functools.partial(np.sum(model.logpdf), y=Y) logpdf = functools.partial(np.sum(model.logpdf), y=Y, Y_metadata=Y_metadata)
dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y) dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(logpdf, dlogpdf_df, f.copy(), 'g') grad = GradientChecker(logpdf, dlogpdf_df, f.copy(), 'g')
grad.randomize() grad.randomize()
print(model) print model
assert grad.checkgrad(verbose=1) assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_d2logpdf_df2(self, model, Y, f): def t_d2logpdf_df2(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y) dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata)
d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y) d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(dlogpdf_df, d2logpdf_df2, f.copy(), 'g') grad = GradientChecker(dlogpdf_df, d2logpdf_df2, f.copy(), 'g')
grad.randomize() grad.randomize()
print(model) print model
assert grad.checkgrad(verbose=1) assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_d3logpdf_df3(self, model, Y, f): def t_d3logpdf_df3(self, model, Y, f, Y_metadata):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y) d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata)
d3logpdf_df3 = functools.partial(model.d3logpdf_df3, y=Y) d3logpdf_df3 = functools.partial(model.d3logpdf_df3, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(d2logpdf_df2, d3logpdf_df3, f.copy(), 'g') grad = GradientChecker(d2logpdf_df2, d3logpdf_df3, f.copy(), 'g')
grad.randomize() grad.randomize()
print(model) print model
assert grad.checkgrad(verbose=1) assert grad.checkgrad(verbose=1)
############## ##############
# df_dparams # # df_dparams #
############## ##############
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_dlogpdf_dparams(self, model, Y, f, params, params_names, param_constraints): def t_dlogpdf_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
print(model) print model
assert ( assert (
dparam_checkgrad(model.logpdf, model.dlogpdf_dtheta, dparam_checkgrad(model.logpdf, model.dlogpdf_dtheta,
params, params_names, args=(f, Y), constraints=param_constraints, params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True) randomize=False, verbose=True)
) )
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_dlogpdf_df_dparams(self, model, Y, f, params, params_names, param_constraints): def t_dlogpdf_df_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
print(model) print model
assert ( assert (
dparam_checkgrad(model.dlogpdf_df, model.dlogpdf_df_dtheta, dparam_checkgrad(model.dlogpdf_df, model.dlogpdf_df_dtheta,
params, params_names, args=(f, Y), constraints=param_constraints, params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True) randomize=False, verbose=True)
) )
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_d2logpdf2_df2_dparams(self, model, Y, f, params, params_names, param_constraints): def t_d2logpdf2_df2_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
print(model) print model
assert ( assert (
dparam_checkgrad(model.d2logpdf_df2, model.d2logpdf_df2_dtheta, dparam_checkgrad(model.d2logpdf_df2, model.d2logpdf_df2_dtheta,
params, params_names, args=(f, Y), constraints=param_constraints, params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True) randomize=False, verbose=True)
) )
@ -426,10 +449,10 @@ class TestNoiseModels(object):
# dpdf_dlink's # # dpdf_dlink's #
################ ################
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_dlogpdf_dlink(self, model, Y, f, link_f_constraints): def t_dlogpdf_dlink(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
logpdf = functools.partial(model.logpdf_link, y=Y) logpdf = functools.partial(model.logpdf_link, y=Y, Y_metadata=Y_metadata)
dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y) dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(logpdf, dlogpdf_dlink, f.copy(), 'g') grad = GradientChecker(logpdf, dlogpdf_dlink, f.copy(), 'g')
#Apply constraints to link_f values #Apply constraints to link_f values
@ -437,15 +460,15 @@ class TestNoiseModels(object):
constraint('g', grad) constraint('g', grad)
grad.randomize() grad.randomize()
print(grad) print grad
print(model) print model
assert grad.checkgrad(verbose=1) assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_d2logpdf_dlink2(self, model, Y, f, link_f_constraints): def t_d2logpdf_dlink2(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y) dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata)
d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y) d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(dlogpdf_dlink, d2logpdf_dlink2, f.copy(), 'g') grad = GradientChecker(dlogpdf_dlink, d2logpdf_dlink2, f.copy(), 'g')
#Apply constraints to link_f values #Apply constraints to link_f values
@ -453,15 +476,15 @@ class TestNoiseModels(object):
constraint('g', grad) constraint('g', grad)
grad.randomize() grad.randomize()
print(grad) print grad
print(model) print model
assert grad.checkgrad(verbose=1) assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_d3logpdf_dlink3(self, model, Y, f, link_f_constraints): def t_d3logpdf_dlink3(self, model, Y, f, Y_metadata, link_f_constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y) d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata)
d3logpdf_dlink3 = functools.partial(model.d3logpdf_dlink3, y=Y) d3logpdf_dlink3 = functools.partial(model.d3logpdf_dlink3, y=Y, Y_metadata=Y_metadata)
grad = GradientChecker(d2logpdf_dlink2, d3logpdf_dlink3, f.copy(), 'g') grad = GradientChecker(d2logpdf_dlink2, d3logpdf_dlink3, f.copy(), 'g')
#Apply constraints to link_f values #Apply constraints to link_f values
@ -469,40 +492,40 @@ class TestNoiseModels(object):
constraint('g', grad) constraint('g', grad)
grad.randomize() grad.randomize()
print(grad) print grad
print(model) print model
assert grad.checkgrad(verbose=1) assert grad.checkgrad(verbose=1)
################# #################
# dlink_dparams # # dlink_dparams #
################# #################
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_dlogpdf_link_dparams(self, model, Y, f, params, param_names, param_constraints): def t_dlogpdf_link_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
print(model) print model
assert ( assert (
dparam_checkgrad(model.logpdf_link, model.dlogpdf_link_dtheta, dparam_checkgrad(model.logpdf_link, model.dlogpdf_link_dtheta,
params, param_names, args=(f, Y), constraints=param_constraints, params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True) randomize=False, verbose=True)
) )
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_dlogpdf_dlink_dparams(self, model, Y, f, params, param_names, param_constraints): def t_dlogpdf_dlink_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
print(model) print model
assert ( assert (
dparam_checkgrad(model.dlogpdf_dlink, model.dlogpdf_dlink_dtheta, dparam_checkgrad(model.dlogpdf_dlink, model.dlogpdf_dlink_dtheta,
params, param_names, args=(f, Y), constraints=param_constraints, params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True) randomize=False, verbose=True)
) )
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_d2logpdf2_dlink2_dparams(self, model, Y, f, params, param_names, param_constraints): def t_d2logpdf2_dlink2_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
print(model) print model
assert ( assert (
dparam_checkgrad(model.d2logpdf_dlink2, model.d2logpdf_dlink2_dtheta, dparam_checkgrad(model.d2logpdf_dlink2, model.d2logpdf_dlink2_dtheta,
params, param_names, args=(f, Y), constraints=param_constraints, params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints,
randomize=False, verbose=True) randomize=False, verbose=True)
) )
@ -510,21 +533,23 @@ class TestNoiseModels(object):
# laplace test # # laplace test #
################ ################
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_laplace_fit_rbf_white(self, model, X, Y, f, step, param_vals, param_names, constraints): def t_laplace_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
#Normalize #Normalize
Y = Y/Y.max() Y = Y/Y.max()
white_var = 1e-6 white_var = 1e-5
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1]) kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
laplace_likelihood = GPy.inference.latent_function_inference.Laplace() laplace_likelihood = GPy.inference.latent_function_inference.Laplace()
m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, inference_method=laplace_likelihood)
m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=laplace_likelihood)
m['.*white'].constrain_fixed(white_var) m['.*white'].constrain_fixed(white_var)
#Set constraints #Set constraints
for constrain_param, constraint in constraints: for constrain_param, constraint in constraints:
constraint(constrain_param, m) constraint(constrain_param, m)
print(m) print m
m.randomize()
m.randomize() m.randomize()
#Set params #Set params
@ -533,7 +558,7 @@ class TestNoiseModels(object):
m[name] = param_vals[param_num] m[name] = param_vals[param_num]
#m.optimize(max_iters=8) #m.optimize(max_iters=8)
print(m) print m
#if not m.checkgrad(step=step): #if not m.checkgrad(step=step):
#m.checkgrad(verbose=1, step=step) #m.checkgrad(verbose=1, step=step)
#NOTE this test appears to be stochastic for some likelihoods (student t?) #NOTE this test appears to be stochastic for some likelihoods (student t?)
@ -545,14 +570,15 @@ class TestNoiseModels(object):
# EP test # # EP test #
########### ###########
@with_setup(setUp, tearDown) @with_setup(setUp, tearDown)
def t_ep_fit_rbf_white(self, model, X, Y, f, step, param_vals, param_names, constraints): def t_ep_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
#Normalize #Normalize
Y = Y/Y.max() Y = Y/Y.max()
white_var = 1e-6 white_var = 1e-6
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1]) kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
ep_inf = GPy.inference.latent_function_inference.EP() ep_inf = GPy.inference.latent_function_inference.EP()
m = GPy.core.GP(X.copy(), Y.copy(), kernel=kernel, likelihood=model, inference_method=ep_inf)
m = GPy.core.GP(X.copy(), Y.copy(), kernel=kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=ep_inf)
m['.*white'].constrain_fixed(white_var) m['.*white'].constrain_fixed(white_var)
for param_num in range(len(param_names)): for param_num in range(len(param_names)):
@ -561,7 +587,7 @@ class TestNoiseModels(object):
constraints[param_num](name, m) constraints[param_num](name, m)
m.randomize() m.randomize()
print(m) print m
assert m.checkgrad(verbose=1, step=step) assert m.checkgrad(verbose=1, step=step)
@ -571,8 +597,8 @@ class LaplaceTests(unittest.TestCase):
""" """
def setUp(self): def setUp(self):
self.N = 5 self.N = 15
self.D = 3 self.D = 1
self.X = np.random.rand(self.N, self.D)*10 self.X = np.random.rand(self.N, self.D)*10
self.real_std = 0.1 self.real_std = 0.1
@ -598,7 +624,7 @@ class LaplaceTests(unittest.TestCase):
self.X = None self.X = None
def test_gaussian_d2logpdf_df2_2(self): def test_gaussian_d2logpdf_df2_2(self):
print("\n{}".format(inspect.stack()[0][3])) print "\n{}".format(inspect.stack()[0][3])
self.Y = None self.Y = None
self.N = 2 self.N = 2
@ -636,28 +662,28 @@ class LaplaceTests(unittest.TestCase):
exact_inf = GPy.inference.latent_function_inference.ExactGaussianInference() exact_inf = GPy.inference.latent_function_inference.ExactGaussianInference()
m1 = GPy.core.GP(X, Y.copy(), kernel=kernel1, likelihood=gauss_distr1, inference_method=exact_inf) m1 = GPy.core.GP(X, Y.copy(), kernel=kernel1, likelihood=gauss_distr1, inference_method=exact_inf)
m1['.*white'].constrain_fixed(1e-6) m1['.*white'].constrain_fixed(1e-6)
m1['.*rbf.variance'] = initial_var_guess m1['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
m1['.*rbf.variance'].constrain_bounded(1e-4, 10)
m1.randomize() m1.randomize()
gauss_distr2 = GPy.likelihoods.Gaussian(variance=initial_var_guess) gauss_distr2 = GPy.likelihoods.Gaussian(variance=initial_var_guess)
laplace_inf = GPy.inference.latent_function_inference.Laplace() laplace_inf = GPy.inference.latent_function_inference.Laplace()
m2 = GPy.core.GP(X, Y.copy(), kernel=kernel2, likelihood=gauss_distr2, inference_method=laplace_inf) m2 = GPy.core.GP(X, Y.copy(), kernel=kernel2, likelihood=gauss_distr2, inference_method=laplace_inf)
m2['.*white'].constrain_fixed(1e-6) m2['.*white'].constrain_fixed(1e-6)
m2['.*rbf.variance'].constrain_bounded(1e-4, 10) m2['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10)
m2.randomize() m2.randomize()
if debug: if debug:
print(m1) print m1
print(m2) print m2
optimizer = 'scg' optimizer = 'scg'
print("Gaussian") print "Gaussian"
m1.optimize(optimizer, messages=debug, ipython_notebook=False) m1.optimize(optimizer, messages=debug, ipython_notebook=False)
print ("Laplace Gaussian") print "Laplace Gaussian"
m2.optimize(optimizer, messages=debug, ipython_notebook=False) m2.optimize(optimizer, messages=debug, ipython_notebook=False)
if debug: if debug:
print(m1) print m1
print(m2) print m2
m2[:] = m1[:] m2[:] = m1[:]
@ -687,8 +713,6 @@ class LaplaceTests(unittest.TestCase):
pb.scatter(X, m1.likelihood.Y, c='g') pb.scatter(X, m1.likelihood.Y, c='g')
pb.scatter(X, m2.likelihood.Y, c='r', marker='x') pb.scatter(X, m2.likelihood.Y, c='r', marker='x')
#Check Y's are the same #Check Y's are the same
np.testing.assert_almost_equal(m1.Y, m2.Y, decimal=5) np.testing.assert_almost_equal(m1.Y, m2.Y, decimal=5)
#Check marginals are the same #Check marginals are the same
@ -706,5 +730,5 @@ class LaplaceTests(unittest.TestCase):
self.assertTrue(m2.checkgrad(verbose=True)) self.assertTrue(m2.checkgrad(verbose=True))
if __name__ == "__main__": if __name__ == "__main__":
print("Running unit tests") print "Running unit tests"
unittest.main() unittest.main()

18
GPy/testing/misc_tests.py Normal file
View file

@ -0,0 +1,18 @@
import numpy as np
import scipy as sp
import GPy
class MiscTests(np.testing.TestCase):
"""
Testing some utilities of misc
"""
def setUp(self):
self._lim_val = np.finfo(np.float64).max
self._lim_val_exp = np.log(self._lim_val)
def test_safe_exp_upper(self):
assert np.exp(self._lim_val_exp + 1) == np.inf
assert GPy.util.misc.safe_exp(self._lim_val_exp + 1) < np.inf
def test_safe_exp_lower(self):
assert GPy.util.misc.safe_exp(1e-10) < np.inf

34
GPy/testing/svgp_tests.py Normal file
View file

@ -0,0 +1,34 @@
import numpy as np
import scipy as sp
import GPy
class SVGP_nonconvex(np.testing.TestCase):
"""
Inference in the SVGP with a student-T likelihood
"""
def setUp(self):
X = np.linspace(0,10,100).reshape(-1,1)
Z = np.linspace(0,10,10).reshape(-1,1)
Y = np.sin(X) + np.random.randn(*X.shape)*0.1
Y[50] += 3
lik = GPy.likelihoods.StudentT(deg_free=2)
k = GPy.kern.RBF(1, lengthscale=5.) + GPy.kern.White(1, 1e-6)
self.m = GPy.core.SVGP(X, Y, Z=Z, likelihood=lik, kernel=k)
def test_grad(self):
assert self.m.checkgrad(step=1e-4)
class SVGP_classification(np.testing.TestCase):
"""
Inference in the SVGP with a Bernoulli likelihood
"""
def setUp(self):
X = np.linspace(0,10,100).reshape(-1,1)
Z = np.linspace(0,10,10).reshape(-1,1)
Y = np.where((np.sin(X) + np.random.randn(*X.shape)*0.1)>0, 1,0)
lik = GPy.likelihoods.Bernoulli()
k = GPy.kern.RBF(1, lengthscale=5.) + GPy.kern.White(1, 1e-6)
self.m = GPy.core.SVGP(X, Y, Z=Z, likelihood=lik, kernel=k)
def test_grad(self):
assert self.m.checkgrad(step=1e-4)

View file

@ -17,6 +17,54 @@ def get_blocks(A, blocksizes):
count_i += i count_i += i
return B return B
def get_block_shapes(B):
assert B.dtype is np.dtype('object'), "Must be a block matrix"
return [B[b,b].shape[0] for b in range(0, B.shape[0])]
def unblock(B):
assert B.dtype is np.dtype('object'), "Must be a block matrix"
block_shapes = get_block_shapes(B)
num_elements = np.sum(block_shapes)
A = np.empty(shape=(num_elements, num_elements))
count_i = 0
for Bi, i in enumerate(block_shapes):
count_j = 0
for Bj, j in enumerate(block_shapes):
A[count_i:count_i + i, count_j:count_j + j] = B[Bi, Bj]
count_j += j
count_i += i
return A
def block_dot(A, B):
"""
Element wise dot product on block matricies
+------+------+ +------+------+ +-------+-------+
| | | | | | |A11.B11|B12.B12|
| A11 | A12 | | B11 | B12 | | | |
+------+------+ o +------+------| = +-------+-------+
| | | | | | |A21.B21|A22.B22|
| A21 | A22 | | B21 | B22 | | | |
+-------------+ +------+------+ +-------+-------+
..Note
If either (A or B) of the diagonal matrices are stored as vectors then a more
efficient dot product using numpy broadcasting will be used, i.e. A11*B11
"""
#Must have same number of blocks and be a block matrix
assert A.dtype is np.dtype('object'), "Must be a block matrix"
assert B.dtype is np.dtype('object'), "Must be a block matrix"
Ashape = A.shape
Bshape = B.shape
assert Ashape == Bshape
def f(A,B):
if Ashape[0] == Ashape[1] or Bshape[0] == Bshape[1]:
#FIXME: Careful if one is transpose of other, would make a matrix
return A*B
else:
return np.dot(A,B)
dot = np.vectorize(f, otypes = [np.object])
return dot(A,B)
if __name__=='__main__': if __name__=='__main__':
@ -24,3 +72,5 @@ if __name__=='__main__':
B = get_blocks(A,[2,3]) B = get_blocks(A,[2,3])
B[0,0] += 7 B[0,0] += 7
print(B) print(B)
assert np.all(unblock(B) == A)

View file

@ -4,6 +4,16 @@
import numpy as np import numpy as np
from .config import * from .config import *
_lim_val = np.finfo(np.float64).max
_lim_val_exp = np.log(_lim_val)
_lim_val_square = np.sqrt(_lim_val)
_lim_val_cube = np.power(_lim_val, -3)
def safe_exp(f):
clip_f = np.clip(f, -np.inf, _lim_val_exp)
return np.exp(clip_f)
def chain_1(df_dg, dg_dx): def chain_1(df_dg, dg_dx):
""" """
Generic chaining function for first derivative Generic chaining function for first derivative
@ -11,6 +21,11 @@ def chain_1(df_dg, dg_dx):
.. math:: .. math::
\\frac{d(f . g)}{dx} = \\frac{df}{dg} \\frac{dg}{dx} \\frac{d(f . g)}{dx} = \\frac{df}{dg} \\frac{dg}{dx}
""" """
if np.all(dg_dx==1.):
return df_dg
if len(df_dg) > 1 and df_dg.shape[-1] > 1:
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
raise NotImplementedError('Not implemented for matricies yet')
return df_dg * dg_dx return df_dg * dg_dx
def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2): def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2):
@ -20,7 +35,13 @@ def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2):
.. math:: .. math::
\\frac{d^{2}(f . g)}{dx^{2}} = \\frac{d^{2}f}{dg^{2}}(\\frac{dg}{dx})^{2} + \\frac{df}{dg}\\frac{d^{2}g}{dx^{2}} \\frac{d^{2}(f . g)}{dx^{2}} = \\frac{d^{2}f}{dg^{2}}(\\frac{dg}{dx})^{2} + \\frac{df}{dg}\\frac{d^{2}g}{dx^{2}}
""" """
return d2f_dg2*(dg_dx**2) + df_dg*d2g_dx2 if np.all(dg_dx==1.) and np.all(d2g_dx2 == 0):
return d2f_dg2
if len(d2f_dg2) > 1 and d2f_dg2.shape[-1] > 1:
raise NotImplementedError('Not implemented for matricies yet')
#dg_dx_2 = np.clip(dg_dx, 1e-12, _lim_val_square)**2
dg_dx_2 = dg_dx**2
return d2f_dg2*(dg_dx_2) + df_dg*d2g_dx2
def chain_3(d3f_dg3, dg_dx, d2f_dg2, d2g_dx2, df_dg, d3g_dx3): def chain_3(d3f_dg3, dg_dx, d2f_dg2, d2g_dx2, df_dg, d3g_dx3):
""" """
@ -29,11 +50,18 @@ def chain_3(d3f_dg3, dg_dx, d2f_dg2, d2g_dx2, df_dg, d3g_dx3):
.. math:: .. math::
\\frac{d^{3}(f . g)}{dx^{3}} = \\frac{d^{3}f}{dg^{3}}(\\frac{dg}{dx})^{3} + 3\\frac{d^{2}f}{dg^{2}}\\frac{dg}{dx}\\frac{d^{2}g}{dx^{2}} + \\frac{df}{dg}\\frac{d^{3}g}{dx^{3}} \\frac{d^{3}(f . g)}{dx^{3}} = \\frac{d^{3}f}{dg^{3}}(\\frac{dg}{dx})^{3} + 3\\frac{d^{2}f}{dg^{2}}\\frac{dg}{dx}\\frac{d^{2}g}{dx^{2}} + \\frac{df}{dg}\\frac{d^{3}g}{dx^{3}}
""" """
return d3f_dg3*(dg_dx**3) + 3*d2f_dg2*dg_dx*d2g_dx2 + df_dg*d3g_dx3 if np.all(dg_dx==1.) and np.all(d2g_dx2==0) and np.all(d3g_dx3==0):
return d3f_dg3
if ( (len(d2f_dg2) > 1 and d2f_dg2.shape[-1] > 1)
or (len(d3f_dg3) > 1 and d3f_dg3.shape[-1] > 1)):
raise NotImplementedError('Not implemented for matricies yet')
#dg_dx_3 = np.clip(dg_dx, 1e-12, _lim_val_cube)**3
dg_dx_3 = dg_dx**3
return d3f_dg3*(dg_dx_3) + 3*d2f_dg2*dg_dx*d2g_dx2 + df_dg*d3g_dx3
def opt_wrapper(m, **kwargs): def opt_wrapper(m, **kwargs):
""" """
This function just wraps the optimization procedure of a GPy Thit function just wraps the optimization procedure of a GPy
object so that optimize() pickleable (necessary for multiprocessing). object so that optimize() pickleable (necessary for multiprocessing).
""" """
m.optimize(**kwargs) m.optimize(**kwargs)
@ -96,3 +124,47 @@ from :class:ndarray)"""
if len(param) == 1: if len(param) == 1:
return param[0].view(np.ndarray) return param[0].view(np.ndarray)
return [x.view(np.ndarray) for x in param] return [x.view(np.ndarray) for x in param]
def blockify_hessian(func):
def wrapper_func(self, *args, **kwargs):
# Invoke the wrapped function first
retval = func(self, *args, **kwargs)
# Now do something here with retval and/or action
if self.not_block_really and (retval.shape[0] != retval.shape[1]):
return np.diagflat(retval)
else:
return retval
return wrapper_func
def blockify_third(func):
def wrapper_func(self, *args, **kwargs):
# Invoke the wrapped function first
retval = func(self, *args, **kwargs)
# Now do something here with retval and/or action
if self.not_block_really and (len(retval.shape) < 3):
num_data = retval.shape[0]
d3_block_cache = np.zeros((num_data, num_data, num_data))
diag_slice = range(num_data)
d3_block_cache[diag_slice, diag_slice, diag_slice] = np.squeeze(retval)
return d3_block_cache
else:
return retval
return wrapper_func
def blockify_dhess_dtheta(func):
def wrapper_func(self, *args, **kwargs):
# Invoke the wrapped function first
retval = func(self, *args, **kwargs)
# Now do something here with retval and/or action
if self.not_block_really and (len(retval.shape) < 3):
num_data = retval.shape[0]
num_params = retval.shape[-1]
dhess_dtheta = np.zeros((num_data, num_data, num_params))
diag_slice = range(num_data)
for param_ind in range(num_params):
dhess_dtheta[diag_slice, diag_slice, param_ind] = np.squeeze(retval[:,param_ind])
return dhess_dtheta
else:
return retval
return wrapper_func

View file

@ -0,0 +1,44 @@
import numpy as np
import GPy
def load_housing_data():
X = np.loadtxt('housing.data')
X, Y = X[:,:-1], X[:,-1:]
#scale the X data
xmax, xmin = X.max(0), X.min(0)
X = (X-xmin)/(xmax-xmin)
#loy the response
Y = np.log(Y)
return X, Y
def fit_full_GP():
X, Y = load_housing_data()
k = GPy.kern.RBF(X.shape[1], ARD=True) + GPy.kern.Linear(X.shape[1])
m = GPy.models.GPRegression(X, Y, kernel=k)
m.optimize('bfgs', max_iters=400, gtol=0)
return m
def fit_svgp_st():
np.random.seed(0)
X, Y = load_housing_data()
Z = X[np.random.permutation(X.shape[0])[:100]]
k = GPy.kern.RBF(X.shape[1], ARD=True) + GPy.kern.Linear(X.shape[1], ARD=True) + GPy.kern.White(1,0.01) + GPy.kern.Bias(1)
lik = GPy.likelihoods.StudentT(deg_free=3.)
m = GPy.core.SVGP(X, Y, Z=Z, kernel=k, likelihood=lik)
[m.optimize('scg', max_iters=40, gtol=0, messages=1, xtol=0, ftol=0) for i in range(10)]
m.optimize('bfgs', max_iters=4000, gtol=0, messages=1, xtol=0, ftol=0)
return m
if __name__=="__main__":
import timeit

506
benchmarks/housing.data Normal file
View file

@ -0,0 +1,506 @@
0.00632 18.00 2.310 0 0.5380 6.5750 65.20 4.0900 1 296.0 15.30 396.90 4.98 24.00
0.02731 0.00 7.070 0 0.4690 6.4210 78.90 4.9671 2 242.0 17.80 396.90 9.14 21.60
0.02729 0.00 7.070 0 0.4690 7.1850 61.10 4.9671 2 242.0 17.80 392.83 4.03 34.70
0.03237 0.00 2.180 0 0.4580 6.9980 45.80 6.0622 3 222.0 18.70 394.63 2.94 33.40
0.06905 0.00 2.180 0 0.4580 7.1470 54.20 6.0622 3 222.0 18.70 396.90 5.33 36.20
0.02985 0.00 2.180 0 0.4580 6.4300 58.70 6.0622 3 222.0 18.70 394.12 5.21 28.70
0.08829 12.50 7.870 0 0.5240 6.0120 66.60 5.5605 5 311.0 15.20 395.60 12.43 22.90
0.14455 12.50 7.870 0 0.5240 6.1720 96.10 5.9505 5 311.0 15.20 396.90 19.15 27.10
0.21124 12.50 7.870 0 0.5240 5.6310 100.00 6.0821 5 311.0 15.20 386.63 29.93 16.50
0.17004 12.50 7.870 0 0.5240 6.0040 85.90 6.5921 5 311.0 15.20 386.71 17.10 18.90
0.22489 12.50 7.870 0 0.5240 6.3770 94.30 6.3467 5 311.0 15.20 392.52 20.45 15.00
0.11747 12.50 7.870 0 0.5240 6.0090 82.90 6.2267 5 311.0 15.20 396.90 13.27 18.90
0.09378 12.50 7.870 0 0.5240 5.8890 39.00 5.4509 5 311.0 15.20 390.50 15.71 21.70
0.62976 0.00 8.140 0 0.5380 5.9490 61.80 4.7075 4 307.0 21.00 396.90 8.26 20.40
0.63796 0.00 8.140 0 0.5380 6.0960 84.50 4.4619 4 307.0 21.00 380.02 10.26 18.20
0.62739 0.00 8.140 0 0.5380 5.8340 56.50 4.4986 4 307.0 21.00 395.62 8.47 19.90
1.05393 0.00 8.140 0 0.5380 5.9350 29.30 4.4986 4 307.0 21.00 386.85 6.58 23.10
0.78420 0.00 8.140 0 0.5380 5.9900 81.70 4.2579 4 307.0 21.00 386.75 14.67 17.50
0.80271 0.00 8.140 0 0.5380 5.4560 36.60 3.7965 4 307.0 21.00 288.99 11.69 20.20
0.72580 0.00 8.140 0 0.5380 5.7270 69.50 3.7965 4 307.0 21.00 390.95 11.28 18.20
1.25179 0.00 8.140 0 0.5380 5.5700 98.10 3.7979 4 307.0 21.00 376.57 21.02 13.60
0.85204 0.00 8.140 0 0.5380 5.9650 89.20 4.0123 4 307.0 21.00 392.53 13.83 19.60
1.23247 0.00 8.140 0 0.5380 6.1420 91.70 3.9769 4 307.0 21.00 396.90 18.72 15.20
0.98843 0.00 8.140 0 0.5380 5.8130 100.00 4.0952 4 307.0 21.00 394.54 19.88 14.50
0.75026 0.00 8.140 0 0.5380 5.9240 94.10 4.3996 4 307.0 21.00 394.33 16.30 15.60
0.84054 0.00 8.140 0 0.5380 5.5990 85.70 4.4546 4 307.0 21.00 303.42 16.51 13.90
0.67191 0.00 8.140 0 0.5380 5.8130 90.30 4.6820 4 307.0 21.00 376.88 14.81 16.60
0.95577 0.00 8.140 0 0.5380 6.0470 88.80 4.4534 4 307.0 21.00 306.38 17.28 14.80
0.77299 0.00 8.140 0 0.5380 6.4950 94.40 4.4547 4 307.0 21.00 387.94 12.80 18.40
1.00245 0.00 8.140 0 0.5380 6.6740 87.30 4.2390 4 307.0 21.00 380.23 11.98 21.00
1.13081 0.00 8.140 0 0.5380 5.7130 94.10 4.2330 4 307.0 21.00 360.17 22.60 12.70
1.35472 0.00 8.140 0 0.5380 6.0720 100.00 4.1750 4 307.0 21.00 376.73 13.04 14.50
1.38799 0.00 8.140 0 0.5380 5.9500 82.00 3.9900 4 307.0 21.00 232.60 27.71 13.20
1.15172 0.00 8.140 0 0.5380 5.7010 95.00 3.7872 4 307.0 21.00 358.77 18.35 13.10
1.61282 0.00 8.140 0 0.5380 6.0960 96.90 3.7598 4 307.0 21.00 248.31 20.34 13.50
0.06417 0.00 5.960 0 0.4990 5.9330 68.20 3.3603 5 279.0 19.20 396.90 9.68 18.90
0.09744 0.00 5.960 0 0.4990 5.8410 61.40 3.3779 5 279.0 19.20 377.56 11.41 20.00
0.08014 0.00 5.960 0 0.4990 5.8500 41.50 3.9342 5 279.0 19.20 396.90 8.77 21.00
0.17505 0.00 5.960 0 0.4990 5.9660 30.20 3.8473 5 279.0 19.20 393.43 10.13 24.70
0.02763 75.00 2.950 0 0.4280 6.5950 21.80 5.4011 3 252.0 18.30 395.63 4.32 30.80
0.03359 75.00 2.950 0 0.4280 7.0240 15.80 5.4011 3 252.0 18.30 395.62 1.98 34.90
0.12744 0.00 6.910 0 0.4480 6.7700 2.90 5.7209 3 233.0 17.90 385.41 4.84 26.60
0.14150 0.00 6.910 0 0.4480 6.1690 6.60 5.7209 3 233.0 17.90 383.37 5.81 25.30
0.15936 0.00 6.910 0 0.4480 6.2110 6.50 5.7209 3 233.0 17.90 394.46 7.44 24.70
0.12269 0.00 6.910 0 0.4480 6.0690 40.00 5.7209 3 233.0 17.90 389.39 9.55 21.20
0.17142 0.00 6.910 0 0.4480 5.6820 33.80 5.1004 3 233.0 17.90 396.90 10.21 19.30
0.18836 0.00 6.910 0 0.4480 5.7860 33.30 5.1004 3 233.0 17.90 396.90 14.15 20.00
0.22927 0.00 6.910 0 0.4480 6.0300 85.50 5.6894 3 233.0 17.90 392.74 18.80 16.60
0.25387 0.00 6.910 0 0.4480 5.3990 95.30 5.8700 3 233.0 17.90 396.90 30.81 14.40
0.21977 0.00 6.910 0 0.4480 5.6020 62.00 6.0877 3 233.0 17.90 396.90 16.20 19.40
0.08873 21.00 5.640 0 0.4390 5.9630 45.70 6.8147 4 243.0 16.80 395.56 13.45 19.70
0.04337 21.00 5.640 0 0.4390 6.1150 63.00 6.8147 4 243.0 16.80 393.97 9.43 20.50
0.05360 21.00 5.640 0 0.4390 6.5110 21.10 6.8147 4 243.0 16.80 396.90 5.28 25.00
0.04981 21.00 5.640 0 0.4390 5.9980 21.40 6.8147 4 243.0 16.80 396.90 8.43 23.40
0.01360 75.00 4.000 0 0.4100 5.8880 47.60 7.3197 3 469.0 21.10 396.90 14.80 18.90
0.01311 90.00 1.220 0 0.4030 7.2490 21.90 8.6966 5 226.0 17.90 395.93 4.81 35.40
0.02055 85.00 0.740 0 0.4100 6.3830 35.70 9.1876 2 313.0 17.30 396.90 5.77 24.70
0.01432 100.00 1.320 0 0.4110 6.8160 40.50 8.3248 5 256.0 15.10 392.90 3.95 31.60
0.15445 25.00 5.130 0 0.4530 6.1450 29.20 7.8148 8 284.0 19.70 390.68 6.86 23.30
0.10328 25.00 5.130 0 0.4530 5.9270 47.20 6.9320 8 284.0 19.70 396.90 9.22 19.60
0.14932 25.00 5.130 0 0.4530 5.7410 66.20 7.2254 8 284.0 19.70 395.11 13.15 18.70
0.17171 25.00 5.130 0 0.4530 5.9660 93.40 6.8185 8 284.0 19.70 378.08 14.44 16.00
0.11027 25.00 5.130 0 0.4530 6.4560 67.80 7.2255 8 284.0 19.70 396.90 6.73 22.20
0.12650 25.00 5.130 0 0.4530 6.7620 43.40 7.9809 8 284.0 19.70 395.58 9.50 25.00
0.01951 17.50 1.380 0 0.4161 7.1040 59.50 9.2229 3 216.0 18.60 393.24 8.05 33.00
0.03584 80.00 3.370 0 0.3980 6.2900 17.80 6.6115 4 337.0 16.10 396.90 4.67 23.50
0.04379 80.00 3.370 0 0.3980 5.7870 31.10 6.6115 4 337.0 16.10 396.90 10.24 19.40
0.05789 12.50 6.070 0 0.4090 5.8780 21.40 6.4980 4 345.0 18.90 396.21 8.10 22.00
0.13554 12.50 6.070 0 0.4090 5.5940 36.80 6.4980 4 345.0 18.90 396.90 13.09 17.40
0.12816 12.50 6.070 0 0.4090 5.8850 33.00 6.4980 4 345.0 18.90 396.90 8.79 20.90
0.08826 0.00 10.810 0 0.4130 6.4170 6.60 5.2873 4 305.0 19.20 383.73 6.72 24.20
0.15876 0.00 10.810 0 0.4130 5.9610 17.50 5.2873 4 305.0 19.20 376.94 9.88 21.70
0.09164 0.00 10.810 0 0.4130 6.0650 7.80 5.2873 4 305.0 19.20 390.91 5.52 22.80
0.19539 0.00 10.810 0 0.4130 6.2450 6.20 5.2873 4 305.0 19.20 377.17 7.54 23.40
0.07896 0.00 12.830 0 0.4370 6.2730 6.00 4.2515 5 398.0 18.70 394.92 6.78 24.10
0.09512 0.00 12.830 0 0.4370 6.2860 45.00 4.5026 5 398.0 18.70 383.23 8.94 21.40
0.10153 0.00 12.830 0 0.4370 6.2790 74.50 4.0522 5 398.0 18.70 373.66 11.97 20.00
0.08707 0.00 12.830 0 0.4370 6.1400 45.80 4.0905 5 398.0 18.70 386.96 10.27 20.80
0.05646 0.00 12.830 0 0.4370 6.2320 53.70 5.0141 5 398.0 18.70 386.40 12.34 21.20
0.08387 0.00 12.830 0 0.4370 5.8740 36.60 4.5026 5 398.0 18.70 396.06 9.10 20.30
0.04113 25.00 4.860 0 0.4260 6.7270 33.50 5.4007 4 281.0 19.00 396.90 5.29 28.00
0.04462 25.00 4.860 0 0.4260 6.6190 70.40 5.4007 4 281.0 19.00 395.63 7.22 23.90
0.03659 25.00 4.860 0 0.4260 6.3020 32.20 5.4007 4 281.0 19.00 396.90 6.72 24.80
0.03551 25.00 4.860 0 0.4260 6.1670 46.70 5.4007 4 281.0 19.00 390.64 7.51 22.90
0.05059 0.00 4.490 0 0.4490 6.3890 48.00 4.7794 3 247.0 18.50 396.90 9.62 23.90
0.05735 0.00 4.490 0 0.4490 6.6300 56.10 4.4377 3 247.0 18.50 392.30 6.53 26.60
0.05188 0.00 4.490 0 0.4490 6.0150 45.10 4.4272 3 247.0 18.50 395.99 12.86 22.50
0.07151 0.00 4.490 0 0.4490 6.1210 56.80 3.7476 3 247.0 18.50 395.15 8.44 22.20
0.05660 0.00 3.410 0 0.4890 7.0070 86.30 3.4217 2 270.0 17.80 396.90 5.50 23.60
0.05302 0.00 3.410 0 0.4890 7.0790 63.10 3.4145 2 270.0 17.80 396.06 5.70 28.70
0.04684 0.00 3.410 0 0.4890 6.4170 66.10 3.0923 2 270.0 17.80 392.18 8.81 22.60
0.03932 0.00 3.410 0 0.4890 6.4050 73.90 3.0921 2 270.0 17.80 393.55 8.20 22.00
0.04203 28.00 15.040 0 0.4640 6.4420 53.60 3.6659 4 270.0 18.20 395.01 8.16 22.90
0.02875 28.00 15.040 0 0.4640 6.2110 28.90 3.6659 4 270.0 18.20 396.33 6.21 25.00
0.04294 28.00 15.040 0 0.4640 6.2490 77.30 3.6150 4 270.0 18.20 396.90 10.59 20.60
0.12204 0.00 2.890 0 0.4450 6.6250 57.80 3.4952 2 276.0 18.00 357.98 6.65 28.40
0.11504 0.00 2.890 0 0.4450 6.1630 69.60 3.4952 2 276.0 18.00 391.83 11.34 21.40
0.12083 0.00 2.890 0 0.4450 8.0690 76.00 3.4952 2 276.0 18.00 396.90 4.21 38.70
0.08187 0.00 2.890 0 0.4450 7.8200 36.90 3.4952 2 276.0 18.00 393.53 3.57 43.80
0.06860 0.00 2.890 0 0.4450 7.4160 62.50 3.4952 2 276.0 18.00 396.90 6.19 33.20
0.14866 0.00 8.560 0 0.5200 6.7270 79.90 2.7778 5 384.0 20.90 394.76 9.42 27.50
0.11432 0.00 8.560 0 0.5200 6.7810 71.30 2.8561 5 384.0 20.90 395.58 7.67 26.50
0.22876 0.00 8.560 0 0.5200 6.4050 85.40 2.7147 5 384.0 20.90 70.80 10.63 18.60
0.21161 0.00 8.560 0 0.5200 6.1370 87.40 2.7147 5 384.0 20.90 394.47 13.44 19.30
0.13960 0.00 8.560 0 0.5200 6.1670 90.00 2.4210 5 384.0 20.90 392.69 12.33 20.10
0.13262 0.00 8.560 0 0.5200 5.8510 96.70 2.1069 5 384.0 20.90 394.05 16.47 19.50
0.17120 0.00 8.560 0 0.5200 5.8360 91.90 2.2110 5 384.0 20.90 395.67 18.66 19.50
0.13117 0.00 8.560 0 0.5200 6.1270 85.20 2.1224 5 384.0 20.90 387.69 14.09 20.40
0.12802 0.00 8.560 0 0.5200 6.4740 97.10 2.4329 5 384.0 20.90 395.24 12.27 19.80
0.26363 0.00 8.560 0 0.5200 6.2290 91.20 2.5451 5 384.0 20.90 391.23 15.55 19.40
0.10793 0.00 8.560 0 0.5200 6.1950 54.40 2.7778 5 384.0 20.90 393.49 13.00 21.70
0.10084 0.00 10.010 0 0.5470 6.7150 81.60 2.6775 6 432.0 17.80 395.59 10.16 22.80
0.12329 0.00 10.010 0 0.5470 5.9130 92.90 2.3534 6 432.0 17.80 394.95 16.21 18.80
0.22212 0.00 10.010 0 0.5470 6.0920 95.40 2.5480 6 432.0 17.80 396.90 17.09 18.70
0.14231 0.00 10.010 0 0.5470 6.2540 84.20 2.2565 6 432.0 17.80 388.74 10.45 18.50
0.17134 0.00 10.010 0 0.5470 5.9280 88.20 2.4631 6 432.0 17.80 344.91 15.76 18.30
0.13158 0.00 10.010 0 0.5470 6.1760 72.50 2.7301 6 432.0 17.80 393.30 12.04 21.20
0.15098 0.00 10.010 0 0.5470 6.0210 82.60 2.7474 6 432.0 17.80 394.51 10.30 19.20
0.13058 0.00 10.010 0 0.5470 5.8720 73.10 2.4775 6 432.0 17.80 338.63 15.37 20.40
0.14476 0.00 10.010 0 0.5470 5.7310 65.20 2.7592 6 432.0 17.80 391.50 13.61 19.30
0.06899 0.00 25.650 0 0.5810 5.8700 69.70 2.2577 2 188.0 19.10 389.15 14.37 22.00
0.07165 0.00 25.650 0 0.5810 6.0040 84.10 2.1974 2 188.0 19.10 377.67 14.27 20.30
0.09299 0.00 25.650 0 0.5810 5.9610 92.90 2.0869 2 188.0 19.10 378.09 17.93 20.50
0.15038 0.00 25.650 0 0.5810 5.8560 97.00 1.9444 2 188.0 19.10 370.31 25.41 17.30
0.09849 0.00 25.650 0 0.5810 5.8790 95.80 2.0063 2 188.0 19.10 379.38 17.58 18.80
0.16902 0.00 25.650 0 0.5810 5.9860 88.40 1.9929 2 188.0 19.10 385.02 14.81 21.40
0.38735 0.00 25.650 0 0.5810 5.6130 95.60 1.7572 2 188.0 19.10 359.29 27.26 15.70
0.25915 0.00 21.890 0 0.6240 5.6930 96.00 1.7883 4 437.0 21.20 392.11 17.19 16.20
0.32543 0.00 21.890 0 0.6240 6.4310 98.80 1.8125 4 437.0 21.20 396.90 15.39 18.00
0.88125 0.00 21.890 0 0.6240 5.6370 94.70 1.9799 4 437.0 21.20 396.90 18.34 14.30
0.34006 0.00 21.890 0 0.6240 6.4580 98.90 2.1185 4 437.0 21.20 395.04 12.60 19.20
1.19294 0.00 21.890 0 0.6240 6.3260 97.70 2.2710 4 437.0 21.20 396.90 12.26 19.60
0.59005 0.00 21.890 0 0.6240 6.3720 97.90 2.3274 4 437.0 21.20 385.76 11.12 23.00
0.32982 0.00 21.890 0 0.6240 5.8220 95.40 2.4699 4 437.0 21.20 388.69 15.03 18.40
0.97617 0.00 21.890 0 0.6240 5.7570 98.40 2.3460 4 437.0 21.20 262.76 17.31 15.60
0.55778 0.00 21.890 0 0.6240 6.3350 98.20 2.1107 4 437.0 21.20 394.67 16.96 18.10
0.32264 0.00 21.890 0 0.6240 5.9420 93.50 1.9669 4 437.0 21.20 378.25 16.90 17.40
0.35233 0.00 21.890 0 0.6240 6.4540 98.40 1.8498 4 437.0 21.20 394.08 14.59 17.10
0.24980 0.00 21.890 0 0.6240 5.8570 98.20 1.6686 4 437.0 21.20 392.04 21.32 13.30
0.54452 0.00 21.890 0 0.6240 6.1510 97.90 1.6687 4 437.0 21.20 396.90 18.46 17.80
0.29090 0.00 21.890 0 0.6240 6.1740 93.60 1.6119 4 437.0 21.20 388.08 24.16 14.00
1.62864 0.00 21.890 0 0.6240 5.0190 100.00 1.4394 4 437.0 21.20 396.90 34.41 14.40
3.32105 0.00 19.580 1 0.8710 5.4030 100.00 1.3216 5 403.0 14.70 396.90 26.82 13.40
4.09740 0.00 19.580 0 0.8710 5.4680 100.00 1.4118 5 403.0 14.70 396.90 26.42 15.60
2.77974 0.00 19.580 0 0.8710 4.9030 97.80 1.3459 5 403.0 14.70 396.90 29.29 11.80
2.37934 0.00 19.580 0 0.8710 6.1300 100.00 1.4191 5 403.0 14.70 172.91 27.80 13.80
2.15505 0.00 19.580 0 0.8710 5.6280 100.00 1.5166 5 403.0 14.70 169.27 16.65 15.60
2.36862 0.00 19.580 0 0.8710 4.9260 95.70 1.4608 5 403.0 14.70 391.71 29.53 14.60
2.33099 0.00 19.580 0 0.8710 5.1860 93.80 1.5296 5 403.0 14.70 356.99 28.32 17.80
2.73397 0.00 19.580 0 0.8710 5.5970 94.90 1.5257 5 403.0 14.70 351.85 21.45 15.40
1.65660 0.00 19.580 0 0.8710 6.1220 97.30 1.6180 5 403.0 14.70 372.80 14.10 21.50
1.49632 0.00 19.580 0 0.8710 5.4040 100.00 1.5916 5 403.0 14.70 341.60 13.28 19.60
1.12658 0.00 19.580 1 0.8710 5.0120 88.00 1.6102 5 403.0 14.70 343.28 12.12 15.30
2.14918 0.00 19.580 0 0.8710 5.7090 98.50 1.6232 5 403.0 14.70 261.95 15.79 19.40
1.41385 0.00 19.580 1 0.8710 6.1290 96.00 1.7494 5 403.0 14.70 321.02 15.12 17.00
3.53501 0.00 19.580 1 0.8710 6.1520 82.60 1.7455 5 403.0 14.70 88.01 15.02 15.60
2.44668 0.00 19.580 0 0.8710 5.2720 94.00 1.7364 5 403.0 14.70 88.63 16.14 13.10
1.22358 0.00 19.580 0 0.6050 6.9430 97.40 1.8773 5 403.0 14.70 363.43 4.59 41.30
1.34284 0.00 19.580 0 0.6050 6.0660 100.00 1.7573 5 403.0 14.70 353.89 6.43 24.30
1.42502 0.00 19.580 0 0.8710 6.5100 100.00 1.7659 5 403.0 14.70 364.31 7.39 23.30
1.27346 0.00 19.580 1 0.6050 6.2500 92.60 1.7984 5 403.0 14.70 338.92 5.50 27.00
1.46336 0.00 19.580 0 0.6050 7.4890 90.80 1.9709 5 403.0 14.70 374.43 1.73 50.00
1.83377 0.00 19.580 1 0.6050 7.8020 98.20 2.0407 5 403.0 14.70 389.61 1.92 50.00
1.51902 0.00 19.580 1 0.6050 8.3750 93.90 2.1620 5 403.0 14.70 388.45 3.32 50.00
2.24236 0.00 19.580 0 0.6050 5.8540 91.80 2.4220 5 403.0 14.70 395.11 11.64 22.70
2.92400 0.00 19.580 0 0.6050 6.1010 93.00 2.2834 5 403.0 14.70 240.16 9.81 25.00
2.01019 0.00 19.580 0 0.6050 7.9290 96.20 2.0459 5 403.0 14.70 369.30 3.70 50.00
1.80028 0.00 19.580 0 0.6050 5.8770 79.20 2.4259 5 403.0 14.70 227.61 12.14 23.80
2.30040 0.00 19.580 0 0.6050 6.3190 96.10 2.1000 5 403.0 14.70 297.09 11.10 23.80
2.44953 0.00 19.580 0 0.6050 6.4020 95.20 2.2625 5 403.0 14.70 330.04 11.32 22.30
1.20742 0.00 19.580 0 0.6050 5.8750 94.60 2.4259 5 403.0 14.70 292.29 14.43 17.40
2.31390 0.00 19.580 0 0.6050 5.8800 97.30 2.3887 5 403.0 14.70 348.13 12.03 19.10
0.13914 0.00 4.050 0 0.5100 5.5720 88.50 2.5961 5 296.0 16.60 396.90 14.69 23.10
0.09178 0.00 4.050 0 0.5100 6.4160 84.10 2.6463 5 296.0 16.60 395.50 9.04 23.60
0.08447 0.00 4.050 0 0.5100 5.8590 68.70 2.7019 5 296.0 16.60 393.23 9.64 22.60
0.06664 0.00 4.050 0 0.5100 6.5460 33.10 3.1323 5 296.0 16.60 390.96 5.33 29.40
0.07022 0.00 4.050 0 0.5100 6.0200 47.20 3.5549 5 296.0 16.60 393.23 10.11 23.20
0.05425 0.00 4.050 0 0.5100 6.3150 73.40 3.3175 5 296.0 16.60 395.60 6.29 24.60
0.06642 0.00 4.050 0 0.5100 6.8600 74.40 2.9153 5 296.0 16.60 391.27 6.92 29.90
0.05780 0.00 2.460 0 0.4880 6.9800 58.40 2.8290 3 193.0 17.80 396.90 5.04 37.20
0.06588 0.00 2.460 0 0.4880 7.7650 83.30 2.7410 3 193.0 17.80 395.56 7.56 39.80
0.06888 0.00 2.460 0 0.4880 6.1440 62.20 2.5979 3 193.0 17.80 396.90 9.45 36.20
0.09103 0.00 2.460 0 0.4880 7.1550 92.20 2.7006 3 193.0 17.80 394.12 4.82 37.90
0.10008 0.00 2.460 0 0.4880 6.5630 95.60 2.8470 3 193.0 17.80 396.90 5.68 32.50
0.08308 0.00 2.460 0 0.4880 5.6040 89.80 2.9879 3 193.0 17.80 391.00 13.98 26.40
0.06047 0.00 2.460 0 0.4880 6.1530 68.80 3.2797 3 193.0 17.80 387.11 13.15 29.60
0.05602 0.00 2.460 0 0.4880 7.8310 53.60 3.1992 3 193.0 17.80 392.63 4.45 50.00
0.07875 45.00 3.440 0 0.4370 6.7820 41.10 3.7886 5 398.0 15.20 393.87 6.68 32.00
0.12579 45.00 3.440 0 0.4370 6.5560 29.10 4.5667 5 398.0 15.20 382.84 4.56 29.80
0.08370 45.00 3.440 0 0.4370 7.1850 38.90 4.5667 5 398.0 15.20 396.90 5.39 34.90
0.09068 45.00 3.440 0 0.4370 6.9510 21.50 6.4798 5 398.0 15.20 377.68 5.10 37.00
0.06911 45.00 3.440 0 0.4370 6.7390 30.80 6.4798 5 398.0 15.20 389.71 4.69 30.50
0.08664 45.00 3.440 0 0.4370 7.1780 26.30 6.4798 5 398.0 15.20 390.49 2.87 36.40
0.02187 60.00 2.930 0 0.4010 6.8000 9.90 6.2196 1 265.0 15.60 393.37 5.03 31.10
0.01439 60.00 2.930 0 0.4010 6.6040 18.80 6.2196 1 265.0 15.60 376.70 4.38 29.10
0.01381 80.00 0.460 0 0.4220 7.8750 32.00 5.6484 4 255.0 14.40 394.23 2.97 50.00
0.04011 80.00 1.520 0 0.4040 7.2870 34.10 7.3090 2 329.0 12.60 396.90 4.08 33.30
0.04666 80.00 1.520 0 0.4040 7.1070 36.60 7.3090 2 329.0 12.60 354.31 8.61 30.30
0.03768 80.00 1.520 0 0.4040 7.2740 38.30 7.3090 2 329.0 12.60 392.20 6.62 34.60
0.03150 95.00 1.470 0 0.4030 6.9750 15.30 7.6534 3 402.0 17.00 396.90 4.56 34.90
0.01778 95.00 1.470 0 0.4030 7.1350 13.90 7.6534 3 402.0 17.00 384.30 4.45 32.90
0.03445 82.50 2.030 0 0.4150 6.1620 38.40 6.2700 2 348.0 14.70 393.77 7.43 24.10
0.02177 82.50 2.030 0 0.4150 7.6100 15.70 6.2700 2 348.0 14.70 395.38 3.11 42.30
0.03510 95.00 2.680 0 0.4161 7.8530 33.20 5.1180 4 224.0 14.70 392.78 3.81 48.50
0.02009 95.00 2.680 0 0.4161 8.0340 31.90 5.1180 4 224.0 14.70 390.55 2.88 50.00
0.13642 0.00 10.590 0 0.4890 5.8910 22.30 3.9454 4 277.0 18.60 396.90 10.87 22.60
0.22969 0.00 10.590 0 0.4890 6.3260 52.50 4.3549 4 277.0 18.60 394.87 10.97 24.40
0.25199 0.00 10.590 0 0.4890 5.7830 72.70 4.3549 4 277.0 18.60 389.43 18.06 22.50
0.13587 0.00 10.590 1 0.4890 6.0640 59.10 4.2392 4 277.0 18.60 381.32 14.66 24.40
0.43571 0.00 10.590 1 0.4890 5.3440 100.00 3.8750 4 277.0 18.60 396.90 23.09 20.00
0.17446 0.00 10.590 1 0.4890 5.9600 92.10 3.8771 4 277.0 18.60 393.25 17.27 21.70
0.37578 0.00 10.590 1 0.4890 5.4040 88.60 3.6650 4 277.0 18.60 395.24 23.98 19.30
0.21719 0.00 10.590 1 0.4890 5.8070 53.80 3.6526 4 277.0 18.60 390.94 16.03 22.40
0.14052 0.00 10.590 0 0.4890 6.3750 32.30 3.9454 4 277.0 18.60 385.81 9.38 28.10
0.28955 0.00 10.590 0 0.4890 5.4120 9.80 3.5875 4 277.0 18.60 348.93 29.55 23.70
0.19802 0.00 10.590 0 0.4890 6.1820 42.40 3.9454 4 277.0 18.60 393.63 9.47 25.00
0.04560 0.00 13.890 1 0.5500 5.8880 56.00 3.1121 5 276.0 16.40 392.80 13.51 23.30
0.07013 0.00 13.890 0 0.5500 6.6420 85.10 3.4211 5 276.0 16.40 392.78 9.69 28.70
0.11069 0.00 13.890 1 0.5500 5.9510 93.80 2.8893 5 276.0 16.40 396.90 17.92 21.50
0.11425 0.00 13.890 1 0.5500 6.3730 92.40 3.3633 5 276.0 16.40 393.74 10.50 23.00
0.35809 0.00 6.200 1 0.5070 6.9510 88.50 2.8617 8 307.0 17.40 391.70 9.71 26.70
0.40771 0.00 6.200 1 0.5070 6.1640 91.30 3.0480 8 307.0 17.40 395.24 21.46 21.70
0.62356 0.00 6.200 1 0.5070 6.8790 77.70 3.2721 8 307.0 17.40 390.39 9.93 27.50
0.61470 0.00 6.200 0 0.5070 6.6180 80.80 3.2721 8 307.0 17.40 396.90 7.60 30.10
0.31533 0.00 6.200 0 0.5040 8.2660 78.30 2.8944 8 307.0 17.40 385.05 4.14 44.80
0.52693 0.00 6.200 0 0.5040 8.7250 83.00 2.8944 8 307.0 17.40 382.00 4.63 50.00
0.38214 0.00 6.200 0 0.5040 8.0400 86.50 3.2157 8 307.0 17.40 387.38 3.13 37.60
0.41238 0.00 6.200 0 0.5040 7.1630 79.90 3.2157 8 307.0 17.40 372.08 6.36 31.60
0.29819 0.00 6.200 0 0.5040 7.6860 17.00 3.3751 8 307.0 17.40 377.51 3.92 46.70
0.44178 0.00 6.200 0 0.5040 6.5520 21.40 3.3751 8 307.0 17.40 380.34 3.76 31.50
0.53700 0.00 6.200 0 0.5040 5.9810 68.10 3.6715 8 307.0 17.40 378.35 11.65 24.30
0.46296 0.00 6.200 0 0.5040 7.4120 76.90 3.6715 8 307.0 17.40 376.14 5.25 31.70
0.57529 0.00 6.200 0 0.5070 8.3370 73.30 3.8384 8 307.0 17.40 385.91 2.47 41.70
0.33147 0.00 6.200 0 0.5070 8.2470 70.40 3.6519 8 307.0 17.40 378.95 3.95 48.30
0.44791 0.00 6.200 1 0.5070 6.7260 66.50 3.6519 8 307.0 17.40 360.20 8.05 29.00
0.33045 0.00 6.200 0 0.5070 6.0860 61.50 3.6519 8 307.0 17.40 376.75 10.88 24.00
0.52058 0.00 6.200 1 0.5070 6.6310 76.50 4.1480 8 307.0 17.40 388.45 9.54 25.10
0.51183 0.00 6.200 0 0.5070 7.3580 71.60 4.1480 8 307.0 17.40 390.07 4.73 31.50
0.08244 30.00 4.930 0 0.4280 6.4810 18.50 6.1899 6 300.0 16.60 379.41 6.36 23.70
0.09252 30.00 4.930 0 0.4280 6.6060 42.20 6.1899 6 300.0 16.60 383.78 7.37 23.30
0.11329 30.00 4.930 0 0.4280 6.8970 54.30 6.3361 6 300.0 16.60 391.25 11.38 22.00
0.10612 30.00 4.930 0 0.4280 6.0950 65.10 6.3361 6 300.0 16.60 394.62 12.40 20.10
0.10290 30.00 4.930 0 0.4280 6.3580 52.90 7.0355 6 300.0 16.60 372.75 11.22 22.20
0.12757 30.00 4.930 0 0.4280 6.3930 7.80 7.0355 6 300.0 16.60 374.71 5.19 23.70
0.20608 22.00 5.860 0 0.4310 5.5930 76.50 7.9549 7 330.0 19.10 372.49 12.50 17.60
0.19133 22.00 5.860 0 0.4310 5.6050 70.20 7.9549 7 330.0 19.10 389.13 18.46 18.50
0.33983 22.00 5.860 0 0.4310 6.1080 34.90 8.0555 7 330.0 19.10 390.18 9.16 24.30
0.19657 22.00 5.860 0 0.4310 6.2260 79.20 8.0555 7 330.0 19.10 376.14 10.15 20.50
0.16439 22.00 5.860 0 0.4310 6.4330 49.10 7.8265 7 330.0 19.10 374.71 9.52 24.50
0.19073 22.00 5.860 0 0.4310 6.7180 17.50 7.8265 7 330.0 19.10 393.74 6.56 26.20
0.14030 22.00 5.860 0 0.4310 6.4870 13.00 7.3967 7 330.0 19.10 396.28 5.90 24.40
0.21409 22.00 5.860 0 0.4310 6.4380 8.90 7.3967 7 330.0 19.10 377.07 3.59 24.80
0.08221 22.00 5.860 0 0.4310 6.9570 6.80 8.9067 7 330.0 19.10 386.09 3.53 29.60
0.36894 22.00 5.860 0 0.4310 8.2590 8.40 8.9067 7 330.0 19.10 396.90 3.54 42.80
0.04819 80.00 3.640 0 0.3920 6.1080 32.00 9.2203 1 315.0 16.40 392.89 6.57 21.90
0.03548 80.00 3.640 0 0.3920 5.8760 19.10 9.2203 1 315.0 16.40 395.18 9.25 20.90
0.01538 90.00 3.750 0 0.3940 7.4540 34.20 6.3361 3 244.0 15.90 386.34 3.11 44.00
0.61154 20.00 3.970 0 0.6470 8.7040 86.90 1.8010 5 264.0 13.00 389.70 5.12 50.00
0.66351 20.00 3.970 0 0.6470 7.3330 100.00 1.8946 5 264.0 13.00 383.29 7.79 36.00
0.65665 20.00 3.970 0 0.6470 6.8420 100.00 2.0107 5 264.0 13.00 391.93 6.90 30.10
0.54011 20.00 3.970 0 0.6470 7.2030 81.80 2.1121 5 264.0 13.00 392.80 9.59 33.80
0.53412 20.00 3.970 0 0.6470 7.5200 89.40 2.1398 5 264.0 13.00 388.37 7.26 43.10
0.52014 20.00 3.970 0 0.6470 8.3980 91.50 2.2885 5 264.0 13.00 386.86 5.91 48.80
0.82526 20.00 3.970 0 0.6470 7.3270 94.50 2.0788 5 264.0 13.00 393.42 11.25 31.00
0.55007 20.00 3.970 0 0.6470 7.2060 91.60 1.9301 5 264.0 13.00 387.89 8.10 36.50
0.76162 20.00 3.970 0 0.6470 5.5600 62.80 1.9865 5 264.0 13.00 392.40 10.45 22.80
0.78570 20.00 3.970 0 0.6470 7.0140 84.60 2.1329 5 264.0 13.00 384.07 14.79 30.70
0.57834 20.00 3.970 0 0.5750 8.2970 67.00 2.4216 5 264.0 13.00 384.54 7.44 50.00
0.54050 20.00 3.970 0 0.5750 7.4700 52.60 2.8720 5 264.0 13.00 390.30 3.16 43.50
0.09065 20.00 6.960 1 0.4640 5.9200 61.50 3.9175 3 223.0 18.60 391.34 13.65 20.70
0.29916 20.00 6.960 0 0.4640 5.8560 42.10 4.4290 3 223.0 18.60 388.65 13.00 21.10
0.16211 20.00 6.960 0 0.4640 6.2400 16.30 4.4290 3 223.0 18.60 396.90 6.59 25.20
0.11460 20.00 6.960 0 0.4640 6.5380 58.70 3.9175 3 223.0 18.60 394.96 7.73 24.40
0.22188 20.00 6.960 1 0.4640 7.6910 51.80 4.3665 3 223.0 18.60 390.77 6.58 35.20
0.05644 40.00 6.410 1 0.4470 6.7580 32.90 4.0776 4 254.0 17.60 396.90 3.53 32.40
0.09604 40.00 6.410 0 0.4470 6.8540 42.80 4.2673 4 254.0 17.60 396.90 2.98 32.00
0.10469 40.00 6.410 1 0.4470 7.2670 49.00 4.7872 4 254.0 17.60 389.25 6.05 33.20
0.06127 40.00 6.410 1 0.4470 6.8260 27.60 4.8628 4 254.0 17.60 393.45 4.16 33.10
0.07978 40.00 6.410 0 0.4470 6.4820 32.10 4.1403 4 254.0 17.60 396.90 7.19 29.10
0.21038 20.00 3.330 0 0.4429 6.8120 32.20 4.1007 5 216.0 14.90 396.90 4.85 35.10
0.03578 20.00 3.330 0 0.4429 7.8200 64.50 4.6947 5 216.0 14.90 387.31 3.76 45.40
0.03705 20.00 3.330 0 0.4429 6.9680 37.20 5.2447 5 216.0 14.90 392.23 4.59 35.40
0.06129 20.00 3.330 1 0.4429 7.6450 49.70 5.2119 5 216.0 14.90 377.07 3.01 46.00
0.01501 90.00 1.210 1 0.4010 7.9230 24.80 5.8850 1 198.0 13.60 395.52 3.16 50.00
0.00906 90.00 2.970 0 0.4000 7.0880 20.80 7.3073 1 285.0 15.30 394.72 7.85 32.20
0.01096 55.00 2.250 0 0.3890 6.4530 31.90 7.3073 1 300.0 15.30 394.72 8.23 22.00
0.01965 80.00 1.760 0 0.3850 6.2300 31.50 9.0892 1 241.0 18.20 341.60 12.93 20.10
0.03871 52.50 5.320 0 0.4050 6.2090 31.30 7.3172 6 293.0 16.60 396.90 7.14 23.20
0.04590 52.50 5.320 0 0.4050 6.3150 45.60 7.3172 6 293.0 16.60 396.90 7.60 22.30
0.04297 52.50 5.320 0 0.4050 6.5650 22.90 7.3172 6 293.0 16.60 371.72 9.51 24.80
0.03502 80.00 4.950 0 0.4110 6.8610 27.90 5.1167 4 245.0 19.20 396.90 3.33 28.50
0.07886 80.00 4.950 0 0.4110 7.1480 27.70 5.1167 4 245.0 19.20 396.90 3.56 37.30
0.03615 80.00 4.950 0 0.4110 6.6300 23.40 5.1167 4 245.0 19.20 396.90 4.70 27.90
0.08265 0.00 13.920 0 0.4370 6.1270 18.40 5.5027 4 289.0 16.00 396.90 8.58 23.90
0.08199 0.00 13.920 0 0.4370 6.0090 42.30 5.5027 4 289.0 16.00 396.90 10.40 21.70
0.12932 0.00 13.920 0 0.4370 6.6780 31.10 5.9604 4 289.0 16.00 396.90 6.27 28.60
0.05372 0.00 13.920 0 0.4370 6.5490 51.00 5.9604 4 289.0 16.00 392.85 7.39 27.10
0.14103 0.00 13.920 0 0.4370 5.7900 58.00 6.3200 4 289.0 16.00 396.90 15.84 20.30
0.06466 70.00 2.240 0 0.4000 6.3450 20.10 7.8278 5 358.0 14.80 368.24 4.97 22.50
0.05561 70.00 2.240 0 0.4000 7.0410 10.00 7.8278 5 358.0 14.80 371.58 4.74 29.00
0.04417 70.00 2.240 0 0.4000 6.8710 47.40 7.8278 5 358.0 14.80 390.86 6.07 24.80
0.03537 34.00 6.090 0 0.4330 6.5900 40.40 5.4917 7 329.0 16.10 395.75 9.50 22.00
0.09266 34.00 6.090 0 0.4330 6.4950 18.40 5.4917 7 329.0 16.10 383.61 8.67 26.40
0.10000 34.00 6.090 0 0.4330 6.9820 17.70 5.4917 7 329.0 16.10 390.43 4.86 33.10
0.05515 33.00 2.180 0 0.4720 7.2360 41.10 4.0220 7 222.0 18.40 393.68 6.93 36.10
0.05479 33.00 2.180 0 0.4720 6.6160 58.10 3.3700 7 222.0 18.40 393.36 8.93 28.40
0.07503 33.00 2.180 0 0.4720 7.4200 71.90 3.0992 7 222.0 18.40 396.90 6.47 33.40
0.04932 33.00 2.180 0 0.4720 6.8490 70.30 3.1827 7 222.0 18.40 396.90 7.53 28.20
0.49298 0.00 9.900 0 0.5440 6.6350 82.50 3.3175 4 304.0 18.40 396.90 4.54 22.80
0.34940 0.00 9.900 0 0.5440 5.9720 76.70 3.1025 4 304.0 18.40 396.24 9.97 20.30
2.63548 0.00 9.900 0 0.5440 4.9730 37.80 2.5194 4 304.0 18.40 350.45 12.64 16.10
0.79041 0.00 9.900 0 0.5440 6.1220 52.80 2.6403 4 304.0 18.40 396.90 5.98 22.10
0.26169 0.00 9.900 0 0.5440 6.0230 90.40 2.8340 4 304.0 18.40 396.30 11.72 19.40
0.26938 0.00 9.900 0 0.5440 6.2660 82.80 3.2628 4 304.0 18.40 393.39 7.90 21.60
0.36920 0.00 9.900 0 0.5440 6.5670 87.30 3.6023 4 304.0 18.40 395.69 9.28 23.80
0.25356 0.00 9.900 0 0.5440 5.7050 77.70 3.9450 4 304.0 18.40 396.42 11.50 16.20
0.31827 0.00 9.900 0 0.5440 5.9140 83.20 3.9986 4 304.0 18.40 390.70 18.33 17.80
0.24522 0.00 9.900 0 0.5440 5.7820 71.70 4.0317 4 304.0 18.40 396.90 15.94 19.80
0.40202 0.00 9.900 0 0.5440 6.3820 67.20 3.5325 4 304.0 18.40 395.21 10.36 23.10
0.47547 0.00 9.900 0 0.5440 6.1130 58.80 4.0019 4 304.0 18.40 396.23 12.73 21.00
0.16760 0.00 7.380 0 0.4930 6.4260 52.30 4.5404 5 287.0 19.60 396.90 7.20 23.80
0.18159 0.00 7.380 0 0.4930 6.3760 54.30 4.5404 5 287.0 19.60 396.90 6.87 23.10
0.35114 0.00 7.380 0 0.4930 6.0410 49.90 4.7211 5 287.0 19.60 396.90 7.70 20.40
0.28392 0.00 7.380 0 0.4930 5.7080 74.30 4.7211 5 287.0 19.60 391.13 11.74 18.50
0.34109 0.00 7.380 0 0.4930 6.4150 40.10 4.7211 5 287.0 19.60 396.90 6.12 25.00
0.19186 0.00 7.380 0 0.4930 6.4310 14.70 5.4159 5 287.0 19.60 393.68 5.08 24.60
0.30347 0.00 7.380 0 0.4930 6.3120 28.90 5.4159 5 287.0 19.60 396.90 6.15 23.00
0.24103 0.00 7.380 0 0.4930 6.0830 43.70 5.4159 5 287.0 19.60 396.90 12.79 22.20
0.06617 0.00 3.240 0 0.4600 5.8680 25.80 5.2146 4 430.0 16.90 382.44 9.97 19.30
0.06724 0.00 3.240 0 0.4600 6.3330 17.20 5.2146 4 430.0 16.90 375.21 7.34 22.60
0.04544 0.00 3.240 0 0.4600 6.1440 32.20 5.8736 4 430.0 16.90 368.57 9.09 19.80
0.05023 35.00 6.060 0 0.4379 5.7060 28.40 6.6407 1 304.0 16.90 394.02 12.43 17.10
0.03466 35.00 6.060 0 0.4379 6.0310 23.30 6.6407 1 304.0 16.90 362.25 7.83 19.40
0.05083 0.00 5.190 0 0.5150 6.3160 38.10 6.4584 5 224.0 20.20 389.71 5.68 22.20
0.03738 0.00 5.190 0 0.5150 6.3100 38.50 6.4584 5 224.0 20.20 389.40 6.75 20.70
0.03961 0.00 5.190 0 0.5150 6.0370 34.50 5.9853 5 224.0 20.20 396.90 8.01 21.10
0.03427 0.00 5.190 0 0.5150 5.8690 46.30 5.2311 5 224.0 20.20 396.90 9.80 19.50
0.03041 0.00 5.190 0 0.5150 5.8950 59.60 5.6150 5 224.0 20.20 394.81 10.56 18.50
0.03306 0.00 5.190 0 0.5150 6.0590 37.30 4.8122 5 224.0 20.20 396.14 8.51 20.60
0.05497 0.00 5.190 0 0.5150 5.9850 45.40 4.8122 5 224.0 20.20 396.90 9.74 19.00
0.06151 0.00 5.190 0 0.5150 5.9680 58.50 4.8122 5 224.0 20.20 396.90 9.29 18.70
0.01301 35.00 1.520 0 0.4420 7.2410 49.30 7.0379 1 284.0 15.50 394.74 5.49 32.70
0.02498 0.00 1.890 0 0.5180 6.5400 59.70 6.2669 1 422.0 15.90 389.96 8.65 16.50
0.02543 55.00 3.780 0 0.4840 6.6960 56.40 5.7321 5 370.0 17.60 396.90 7.18 23.90
0.03049 55.00 3.780 0 0.4840 6.8740 28.10 6.4654 5 370.0 17.60 387.97 4.61 31.20
0.03113 0.00 4.390 0 0.4420 6.0140 48.50 8.0136 3 352.0 18.80 385.64 10.53 17.50
0.06162 0.00 4.390 0 0.4420 5.8980 52.30 8.0136 3 352.0 18.80 364.61 12.67 17.20
0.01870 85.00 4.150 0 0.4290 6.5160 27.70 8.5353 4 351.0 17.90 392.43 6.36 23.10
0.01501 80.00 2.010 0 0.4350 6.6350 29.70 8.3440 4 280.0 17.00 390.94 5.99 24.50
0.02899 40.00 1.250 0 0.4290 6.9390 34.50 8.7921 1 335.0 19.70 389.85 5.89 26.60
0.06211 40.00 1.250 0 0.4290 6.4900 44.40 8.7921 1 335.0 19.70 396.90 5.98 22.90
0.07950 60.00 1.690 0 0.4110 6.5790 35.90 10.7103 4 411.0 18.30 370.78 5.49 24.10
0.07244 60.00 1.690 0 0.4110 5.8840 18.50 10.7103 4 411.0 18.30 392.33 7.79 18.60
0.01709 90.00 2.020 0 0.4100 6.7280 36.10 12.1265 5 187.0 17.00 384.46 4.50 30.10
0.04301 80.00 1.910 0 0.4130 5.6630 21.90 10.5857 4 334.0 22.00 382.80 8.05 18.20
0.10659 80.00 1.910 0 0.4130 5.9360 19.50 10.5857 4 334.0 22.00 376.04 5.57 20.60
8.98296 0.00 18.100 1 0.7700 6.2120 97.40 2.1222 24 666.0 20.20 377.73 17.60 17.80
3.84970 0.00 18.100 1 0.7700 6.3950 91.00 2.5052 24 666.0 20.20 391.34 13.27 21.70
5.20177 0.00 18.100 1 0.7700 6.1270 83.40 2.7227 24 666.0 20.20 395.43 11.48 22.70
4.26131 0.00 18.100 0 0.7700 6.1120 81.30 2.5091 24 666.0 20.20 390.74 12.67 22.60
4.54192 0.00 18.100 0 0.7700 6.3980 88.00 2.5182 24 666.0 20.20 374.56 7.79 25.00
3.83684 0.00 18.100 0 0.7700 6.2510 91.10 2.2955 24 666.0 20.20 350.65 14.19 19.90
3.67822 0.00 18.100 0 0.7700 5.3620 96.20 2.1036 24 666.0 20.20 380.79 10.19 20.80
4.22239 0.00 18.100 1 0.7700 5.8030 89.00 1.9047 24 666.0 20.20 353.04 14.64 16.80
3.47428 0.00 18.100 1 0.7180 8.7800 82.90 1.9047 24 666.0 20.20 354.55 5.29 21.90
4.55587 0.00 18.100 0 0.7180 3.5610 87.90 1.6132 24 666.0 20.20 354.70 7.12 27.50
3.69695 0.00 18.100 0 0.7180 4.9630 91.40 1.7523 24 666.0 20.20 316.03 14.00 21.90
13.52220 0.00 18.100 0 0.6310 3.8630 100.00 1.5106 24 666.0 20.20 131.42 13.33 23.10
4.89822 0.00 18.100 0 0.6310 4.9700 100.00 1.3325 24 666.0 20.20 375.52 3.26 50.00
5.66998 0.00 18.100 1 0.6310 6.6830 96.80 1.3567 24 666.0 20.20 375.33 3.73 50.00
6.53876 0.00 18.100 1 0.6310 7.0160 97.50 1.2024 24 666.0 20.20 392.05 2.96 50.00
9.23230 0.00 18.100 0 0.6310 6.2160 100.00 1.1691 24 666.0 20.20 366.15 9.53 50.00
8.26725 0.00 18.100 1 0.6680 5.8750 89.60 1.1296 24 666.0 20.20 347.88 8.88 50.00
11.10810 0.00 18.100 0 0.6680 4.9060 100.00 1.1742 24 666.0 20.20 396.90 34.77 13.80
18.49820 0.00 18.100 0 0.6680 4.1380 100.00 1.1370 24 666.0 20.20 396.90 37.97 13.80
19.60910 0.00 18.100 0 0.6710 7.3130 97.90 1.3163 24 666.0 20.20 396.90 13.44 15.00
15.28800 0.00 18.100 0 0.6710 6.6490 93.30 1.3449 24 666.0 20.20 363.02 23.24 13.90
9.82349 0.00 18.100 0 0.6710 6.7940 98.80 1.3580 24 666.0 20.20 396.90 21.24 13.30
23.64820 0.00 18.100 0 0.6710 6.3800 96.20 1.3861 24 666.0 20.20 396.90 23.69 13.10
17.86670 0.00 18.100 0 0.6710 6.2230 100.00 1.3861 24 666.0 20.20 393.74 21.78 10.20
88.97620 0.00 18.100 0 0.6710 6.9680 91.90 1.4165 24 666.0 20.20 396.90 17.21 10.40
15.87440 0.00 18.100 0 0.6710 6.5450 99.10 1.5192 24 666.0 20.20 396.90 21.08 10.90
9.18702 0.00 18.100 0 0.7000 5.5360 100.00 1.5804 24 666.0 20.20 396.90 23.60 11.30
7.99248 0.00 18.100 0 0.7000 5.5200 100.00 1.5331 24 666.0 20.20 396.90 24.56 12.30
20.08490 0.00 18.100 0 0.7000 4.3680 91.20 1.4395 24 666.0 20.20 285.83 30.63 8.80
16.81180 0.00 18.100 0 0.7000 5.2770 98.10 1.4261 24 666.0 20.20 396.90 30.81 7.20
24.39380 0.00 18.100 0 0.7000 4.6520 100.00 1.4672 24 666.0 20.20 396.90 28.28 10.50
22.59710 0.00 18.100 0 0.7000 5.0000 89.50 1.5184 24 666.0 20.20 396.90 31.99 7.40
14.33370 0.00 18.100 0 0.7000 4.8800 100.00 1.5895 24 666.0 20.20 372.92 30.62 10.20
8.15174 0.00 18.100 0 0.7000 5.3900 98.90 1.7281 24 666.0 20.20 396.90 20.85 11.50
6.96215 0.00 18.100 0 0.7000 5.7130 97.00 1.9265 24 666.0 20.20 394.43 17.11 15.10
5.29305 0.00 18.100 0 0.7000 6.0510 82.50 2.1678 24 666.0 20.20 378.38 18.76 23.20
11.57790 0.00 18.100 0 0.7000 5.0360 97.00 1.7700 24 666.0 20.20 396.90 25.68 9.70
8.64476 0.00 18.100 0 0.6930 6.1930 92.60 1.7912 24 666.0 20.20 396.90 15.17 13.80
13.35980 0.00 18.100 0 0.6930 5.8870 94.70 1.7821 24 666.0 20.20 396.90 16.35 12.70
8.71675 0.00 18.100 0 0.6930 6.4710 98.80 1.7257 24 666.0 20.20 391.98 17.12 13.10
5.87205 0.00 18.100 0 0.6930 6.4050 96.00 1.6768 24 666.0 20.20 396.90 19.37 12.50
7.67202 0.00 18.100 0 0.6930 5.7470 98.90 1.6334 24 666.0 20.20 393.10 19.92 8.50
38.35180 0.00 18.100 0 0.6930 5.4530 100.00 1.4896 24 666.0 20.20 396.90 30.59 5.00
9.91655 0.00 18.100 0 0.6930 5.8520 77.80 1.5004 24 666.0 20.20 338.16 29.97 6.30
25.04610 0.00 18.100 0 0.6930 5.9870 100.00 1.5888 24 666.0 20.20 396.90 26.77 5.60
14.23620 0.00 18.100 0 0.6930 6.3430 100.00 1.5741 24 666.0 20.20 396.90 20.32 7.20
9.59571 0.00 18.100 0 0.6930 6.4040 100.00 1.6390 24 666.0 20.20 376.11 20.31 12.10
24.80170 0.00 18.100 0 0.6930 5.3490 96.00 1.7028 24 666.0 20.20 396.90 19.77 8.30
41.52920 0.00 18.100 0 0.6930 5.5310 85.40 1.6074 24 666.0 20.20 329.46 27.38 8.50
67.92080 0.00 18.100 0 0.6930 5.6830 100.00 1.4254 24 666.0 20.20 384.97 22.98 5.00
20.71620 0.00 18.100 0 0.6590 4.1380 100.00 1.1781 24 666.0 20.20 370.22 23.34 11.90
11.95110 0.00 18.100 0 0.6590 5.6080 100.00 1.2852 24 666.0 20.20 332.09 12.13 27.90
7.40389 0.00 18.100 0 0.5970 5.6170 97.90 1.4547 24 666.0 20.20 314.64 26.40 17.20
14.43830 0.00 18.100 0 0.5970 6.8520 100.00 1.4655 24 666.0 20.20 179.36 19.78 27.50
51.13580 0.00 18.100 0 0.5970 5.7570 100.00 1.4130 24 666.0 20.20 2.60 10.11 15.00
14.05070 0.00 18.100 0 0.5970 6.6570 100.00 1.5275 24 666.0 20.20 35.05 21.22 17.20
18.81100 0.00 18.100 0 0.5970 4.6280 100.00 1.5539 24 666.0 20.20 28.79 34.37 17.90
28.65580 0.00 18.100 0 0.5970 5.1550 100.00 1.5894 24 666.0 20.20 210.97 20.08 16.30
45.74610 0.00 18.100 0 0.6930 4.5190 100.00 1.6582 24 666.0 20.20 88.27 36.98 7.00
18.08460 0.00 18.100 0 0.6790 6.4340 100.00 1.8347 24 666.0 20.20 27.25 29.05 7.20
10.83420 0.00 18.100 0 0.6790 6.7820 90.80 1.8195 24 666.0 20.20 21.57 25.79 7.50
25.94060 0.00 18.100 0 0.6790 5.3040 89.10 1.6475 24 666.0 20.20 127.36 26.64 10.40
73.53410 0.00 18.100 0 0.6790 5.9570 100.00 1.8026 24 666.0 20.20 16.45 20.62 8.80
11.81230 0.00 18.100 0 0.7180 6.8240 76.50 1.7940 24 666.0 20.20 48.45 22.74 8.40
11.08740 0.00 18.100 0 0.7180 6.4110 100.00 1.8589 24 666.0 20.20 318.75 15.02 16.70
7.02259 0.00 18.100 0 0.7180 6.0060 95.30 1.8746 24 666.0 20.20 319.98 15.70 14.20
12.04820 0.00 18.100 0 0.6140 5.6480 87.60 1.9512 24 666.0 20.20 291.55 14.10 20.80
7.05042 0.00 18.100 0 0.6140 6.1030 85.10 2.0218 24 666.0 20.20 2.52 23.29 13.40
8.79212 0.00 18.100 0 0.5840 5.5650 70.60 2.0635 24 666.0 20.20 3.65 17.16 11.70
15.86030 0.00 18.100 0 0.6790 5.8960 95.40 1.9096 24 666.0 20.20 7.68 24.39 8.30
12.24720 0.00 18.100 0 0.5840 5.8370 59.70 1.9976 24 666.0 20.20 24.65 15.69 10.20
37.66190 0.00 18.100 0 0.6790 6.2020 78.70 1.8629 24 666.0 20.20 18.82 14.52 10.90
7.36711 0.00 18.100 0 0.6790 6.1930 78.10 1.9356 24 666.0 20.20 96.73 21.52 11.00
9.33889 0.00 18.100 0 0.6790 6.3800 95.60 1.9682 24 666.0 20.20 60.72 24.08 9.50
8.49213 0.00 18.100 0 0.5840 6.3480 86.10 2.0527 24 666.0 20.20 83.45 17.64 14.50
10.06230 0.00 18.100 0 0.5840 6.8330 94.30 2.0882 24 666.0 20.20 81.33 19.69 14.10
6.44405 0.00 18.100 0 0.5840 6.4250 74.80 2.2004 24 666.0 20.20 97.95 12.03 16.10
5.58107 0.00 18.100 0 0.7130 6.4360 87.90 2.3158 24 666.0 20.20 100.19 16.22 14.30
13.91340 0.00 18.100 0 0.7130 6.2080 95.00 2.2222 24 666.0 20.20 100.63 15.17 11.70
11.16040 0.00 18.100 0 0.7400 6.6290 94.60 2.1247 24 666.0 20.20 109.85 23.27 13.40
14.42080 0.00 18.100 0 0.7400 6.4610 93.30 2.0026 24 666.0 20.20 27.49 18.05 9.60
15.17720 0.00 18.100 0 0.7400 6.1520 100.00 1.9142 24 666.0 20.20 9.32 26.45 8.70
13.67810 0.00 18.100 0 0.7400 5.9350 87.90 1.8206 24 666.0 20.20 68.95 34.02 8.40
9.39063 0.00 18.100 0 0.7400 5.6270 93.90 1.8172 24 666.0 20.20 396.90 22.88 12.80
22.05110 0.00 18.100 0 0.7400 5.8180 92.40 1.8662 24 666.0 20.20 391.45 22.11 10.50
9.72418 0.00 18.100 0 0.7400 6.4060 97.20 2.0651 24 666.0 20.20 385.96 19.52 17.10
5.66637 0.00 18.100 0 0.7400 6.2190 100.00 2.0048 24 666.0 20.20 395.69 16.59 18.40
9.96654 0.00 18.100 0 0.7400 6.4850 100.00 1.9784 24 666.0 20.20 386.73 18.85 15.40
12.80230 0.00 18.100 0 0.7400 5.8540 96.60 1.8956 24 666.0 20.20 240.52 23.79 10.80
10.67180 0.00 18.100 0 0.7400 6.4590 94.80 1.9879 24 666.0 20.20 43.06 23.98 11.80
6.28807 0.00 18.100 0 0.7400 6.3410 96.40 2.0720 24 666.0 20.20 318.01 17.79 14.90
9.92485 0.00 18.100 0 0.7400 6.2510 96.60 2.1980 24 666.0 20.20 388.52 16.44 12.60
9.32909 0.00 18.100 0 0.7130 6.1850 98.70 2.2616 24 666.0 20.20 396.90 18.13 14.10
7.52601 0.00 18.100 0 0.7130 6.4170 98.30 2.1850 24 666.0 20.20 304.21 19.31 13.00
6.71772 0.00 18.100 0 0.7130 6.7490 92.60 2.3236 24 666.0 20.20 0.32 17.44 13.40
5.44114 0.00 18.100 0 0.7130 6.6550 98.20 2.3552 24 666.0 20.20 355.29 17.73 15.20
5.09017 0.00 18.100 0 0.7130 6.2970 91.80 2.3682 24 666.0 20.20 385.09 17.27 16.10
8.24809 0.00 18.100 0 0.7130 7.3930 99.30 2.4527 24 666.0 20.20 375.87 16.74 17.80
9.51363 0.00 18.100 0 0.7130 6.7280 94.10 2.4961 24 666.0 20.20 6.68 18.71 14.90
4.75237 0.00 18.100 0 0.7130 6.5250 86.50 2.4358 24 666.0 20.20 50.92 18.13 14.10
4.66883 0.00 18.100 0 0.7130 5.9760 87.90 2.5806 24 666.0 20.20 10.48 19.01 12.70
8.20058 0.00 18.100 0 0.7130 5.9360 80.30 2.7792 24 666.0 20.20 3.50 16.94 13.50
7.75223 0.00 18.100 0 0.7130 6.3010 83.70 2.7831 24 666.0 20.20 272.21 16.23 14.90
6.80117 0.00 18.100 0 0.7130 6.0810 84.40 2.7175 24 666.0 20.20 396.90 14.70 20.00
4.81213 0.00 18.100 0 0.7130 6.7010 90.00 2.5975 24 666.0 20.20 255.23 16.42 16.40
3.69311 0.00 18.100 0 0.7130 6.3760 88.40 2.5671 24 666.0 20.20 391.43 14.65 17.70
6.65492 0.00 18.100 0 0.7130 6.3170 83.00 2.7344 24 666.0 20.20 396.90 13.99 19.50
5.82115 0.00 18.100 0 0.7130 6.5130 89.90 2.8016 24 666.0 20.20 393.82 10.29 20.20
7.83932 0.00 18.100 0 0.6550 6.2090 65.40 2.9634 24 666.0 20.20 396.90 13.22 21.40
3.16360 0.00 18.100 0 0.6550 5.7590 48.20 3.0665 24 666.0 20.20 334.40 14.13 19.90
3.77498 0.00 18.100 0 0.6550 5.9520 84.70 2.8715 24 666.0 20.20 22.01 17.15 19.00
4.42228 0.00 18.100 0 0.5840 6.0030 94.50 2.5403 24 666.0 20.20 331.29 21.32 19.10
15.57570 0.00 18.100 0 0.5800 5.9260 71.00 2.9084 24 666.0 20.20 368.74 18.13 19.10
13.07510 0.00 18.100 0 0.5800 5.7130 56.70 2.8237 24 666.0 20.20 396.90 14.76 20.10
4.34879 0.00 18.100 0 0.5800 6.1670 84.00 3.0334 24 666.0 20.20 396.90 16.29 19.90
4.03841 0.00 18.100 0 0.5320 6.2290 90.70 3.0993 24 666.0 20.20 395.33 12.87 19.60
3.56868 0.00 18.100 0 0.5800 6.4370 75.00 2.8965 24 666.0 20.20 393.37 14.36 23.20
4.64689 0.00 18.100 0 0.6140 6.9800 67.60 2.5329 24 666.0 20.20 374.68 11.66 29.80
8.05579 0.00 18.100 0 0.5840 5.4270 95.40 2.4298 24 666.0 20.20 352.58 18.14 13.80
6.39312 0.00 18.100 0 0.5840 6.1620 97.40 2.2060 24 666.0 20.20 302.76 24.10 13.30
4.87141 0.00 18.100 0 0.6140 6.4840 93.60 2.3053 24 666.0 20.20 396.21 18.68 16.70
15.02340 0.00 18.100 0 0.6140 5.3040 97.30 2.1007 24 666.0 20.20 349.48 24.91 12.00
10.23300 0.00 18.100 0 0.6140 6.1850 96.70 2.1705 24 666.0 20.20 379.70 18.03 14.60
14.33370 0.00 18.100 0 0.6140 6.2290 88.00 1.9512 24 666.0 20.20 383.32 13.11 21.40
5.82401 0.00 18.100 0 0.5320 6.2420 64.70 3.4242 24 666.0 20.20 396.90 10.74 23.00
5.70818 0.00 18.100 0 0.5320 6.7500 74.90 3.3317 24 666.0 20.20 393.07 7.74 23.70
5.73116 0.00 18.100 0 0.5320 7.0610 77.00 3.4106 24 666.0 20.20 395.28 7.01 25.00
2.81838 0.00 18.100 0 0.5320 5.7620 40.30 4.0983 24 666.0 20.20 392.92 10.42 21.80
2.37857 0.00 18.100 0 0.5830 5.8710 41.90 3.7240 24 666.0 20.20 370.73 13.34 20.60
3.67367 0.00 18.100 0 0.5830 6.3120 51.90 3.9917 24 666.0 20.20 388.62 10.58 21.20
5.69175 0.00 18.100 0 0.5830 6.1140 79.80 3.5459 24 666.0 20.20 392.68 14.98 19.10
4.83567 0.00 18.100 0 0.5830 5.9050 53.20 3.1523 24 666.0 20.20 388.22 11.45 20.60
0.15086 0.00 27.740 0 0.6090 5.4540 92.70 1.8209 4 711.0 20.10 395.09 18.06 15.20
0.18337 0.00 27.740 0 0.6090 5.4140 98.30 1.7554 4 711.0 20.10 344.05 23.97 7.00
0.20746 0.00 27.740 0 0.6090 5.0930 98.00 1.8226 4 711.0 20.10 318.43 29.68 8.10
0.10574 0.00 27.740 0 0.6090 5.9830 98.80 1.8681 4 711.0 20.10 390.11 18.07 13.60
0.11132 0.00 27.740 0 0.6090 5.9830 83.50 2.1099 4 711.0 20.10 396.90 13.35 20.10
0.17331 0.00 9.690 0 0.5850 5.7070 54.00 2.3817 6 391.0 19.20 396.90 12.01 21.80
0.27957 0.00 9.690 0 0.5850 5.9260 42.60 2.3817 6 391.0 19.20 396.90 13.59 24.50
0.17899 0.00 9.690 0 0.5850 5.6700 28.80 2.7986 6 391.0 19.20 393.29 17.60 23.10
0.28960 0.00 9.690 0 0.5850 5.3900 72.90 2.7986 6 391.0 19.20 396.90 21.14 19.70
0.26838 0.00 9.690 0 0.5850 5.7940 70.60 2.8927 6 391.0 19.20 396.90 14.10 18.30
0.23912 0.00 9.690 0 0.5850 6.0190 65.30 2.4091 6 391.0 19.20 396.90 12.92 21.20
0.17783 0.00 9.690 0 0.5850 5.5690 73.50 2.3999 6 391.0 19.20 395.77 15.10 17.50
0.22438 0.00 9.690 0 0.5850 6.0270 79.70 2.4982 6 391.0 19.20 396.90 14.33 16.80
0.06263 0.00 11.930 0 0.5730 6.5930 69.10 2.4786 1 273.0 21.00 391.99 9.67 22.40
0.04527 0.00 11.930 0 0.5730 6.1200 76.70 2.2875 1 273.0 21.00 396.90 9.08 20.60
0.06076 0.00 11.930 0 0.5730 6.9760 91.00 2.1675 1 273.0 21.00 396.90 5.64 23.90
0.10959 0.00 11.930 0 0.5730 6.7940 89.30 2.3889 1 273.0 21.00 393.45 6.48 22.00
0.04741 0.00 11.930 0 0.5730 6.0300 80.80 2.5050 1 273.0 21.00 396.90 7.88 11.90