diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 38a7bb3d..05ce282c 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -5,6 +5,7 @@ import numpy as np import sys from .. import kern from model import Model +from mapping import Mapping from parameterization import ObsAr from .. import likelihoods from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation @@ -34,7 +35,7 @@ class GP(Model): """ - def __init__(self, X, Y, kernel, likelihood, inference_method=None, name='gp', Y_metadata=None, normalizer=False): + def __init__(self, X, Y, kernel, likelihood, mean_function=None, inference_method=None, name='gp', Y_metadata=None, normalizer=False): super(GP, self).__init__(name) assert X.ndim == 2 @@ -75,6 +76,15 @@ class GP(Model): assert isinstance(likelihood, likelihoods.Likelihood) self.likelihood = likelihood + #handle the mean function + self.mean_function = mean_function + if mean_function is not None: + assert isinstance(self.mean_function, Mapping) + assert mean_function.input_dim == self.input_dim + assert mean_function.output_dim == self.output_dim + self.link_parameter(mean_function) + + #find a sensible inference method logger.info("initializing inference method") if inference_method is None: @@ -153,9 +163,11 @@ class GP(Model): This method is not designed to be called manually, the framework is set up to automatically call this method upon changes to parameters, if you call this method yourself, there may be unexpected consequences. """ - self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y_normalized, self.Y_metadata) + self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y_normalized, self.mean_function, self.Y_metadata) self.likelihood.update_gradients(self.grad_dict['dL_dthetaL']) self.kern.update_gradients_full(self.grad_dict['dL_dK'], self.X) + if self.mean_function is not None: + self.mean_function.update_gradients(self.grad_dict['dL_dm'], self.X) def log_likelihood(self): """ @@ -192,6 +204,10 @@ class GP(Model): #force mu to be a column vector if len(mu.shape)==1: mu = mu[:,None] + + #add the mean function in + if not self.mean_function is None: + mu += self.mean_function.f(_Xnew) return mu, var def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None): @@ -241,12 +257,14 @@ class GP(Model): def predictive_gradients(self, Xnew): """ - Compute the derivatives of the latent function with respect to X* + Compute the derivatives of the predicted latent function with respect to X* Given a set of points at which to predict X* (size [N*,Q]), compute the derivatives of the mean and variance. Resulting arrays are sized: dmu_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one). + Note that this is not the same as computing the mean and variance of the derivative of the function! + dv_dX* -- [N*, Q], (since all outputs have the same variance) :param X: The points at which to get the predictive gradients :type X: np.ndarray (Xnew x self.input_dim) diff --git a/GPy/core/mapping.py b/GPy/core/mapping.py index 25fe504a..dd45a26e 100644 --- a/GPy/core/mapping.py +++ b/GPy/core/mapping.py @@ -1,4 +1,5 @@ # Copyright (c) 2013,2014, GPy authors (see AUTHORS.txt). +# Copyright (c) 2015, James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) import sys @@ -7,7 +8,7 @@ import numpy as np class Mapping(Parameterized): """ - Base model for shared behavior between models that can act like a mapping. + Base model for shared mapping behaviours """ def __init__(self, input_dim, output_dim, name='mapping'): @@ -18,49 +19,12 @@ class Mapping(Parameterized): def f(self, X): raise NotImplementedError - def df_dX(self, dL_df, X): - """Evaluate derivatives of mapping outputs with respect to inputs. - - :param dL_df: gradient of the objective with respect to the function. - :type dL_df: ndarray (num_data x output_dim) - :param X: the input locations where derivatives are to be evaluated. - :type X: ndarray (num_data x input_dim) - :returns: matrix containing gradients of the function with respect to the inputs. - """ + def gradients_X(self, dL_dF, X): raise NotImplementedError - def df_dtheta(self, dL_df, X): - """The gradient of the outputs of the mapping with respect to each of the parameters. - - :param dL_df: gradient of the objective with respect to the function. - :type dL_df: ndarray (num_data x output_dim) - :param X: input locations where the function is evaluated. - :type X: ndarray (num_data x input_dim) - :returns: Matrix containing gradients with respect to parameters of each output for each input data. - :rtype: ndarray (num_params length) - """ - + def update_gradients(self, dL_dF, X): raise NotImplementedError - def plot(self, *args): - """ - Plots the mapping associated with the model. - - In one dimension, the function is plotted. - - In two dimensions, a contour-plot shows the function - - In higher dimensions, we've not implemented this yet !TODO! - - Can plot only part of the data and part of the posterior functions - using which_data and which_functions - - This is a convenience function: arguments are passed to - GPy.plotting.matplot_dep.models_plots.plot_mapping - """ - - if "matplotlib" in sys.modules: - from ..plotting.matplot_dep import models_plots - mapping_plots.plot_mapping(self,*args) - else: - raise NameError, "matplotlib package has not been imported." class Bijective_mapping(Mapping): """ diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py index 36079f2e..0804091c 100644 --- a/GPy/core/parameterization/variational.py +++ b/GPy/core/parameterization/variational.py @@ -50,31 +50,29 @@ class SpikeAndSlabPrior(VariationalPrior): def KL_divergence(self, variational_posterior): mu = variational_posterior.mean S = variational_posterior.variance - gamma,gamma1 = variational_posterior.gamma_probabilities() - log_gamma,log_gamma1 = variational_posterior.gamma_log_prob() + gamma = variational_posterior.gamma.values if len(self.pi.shape)==2: - idx = np.unique(gamma._raveled_index()/gamma.shape[-1]) + idx = np.unique(variational_posterior.gamma._raveled_index()/gamma.shape[-1]) pi = self.pi[idx] else: pi = self.pi var_mean = np.square(mu)/self.variance var_S = (S/self.variance - np.log(S)) - var_gamma = (gamma*(log_gamma-np.log(pi))).sum()+(gamma1*(log_gamma1-np.log(1-pi))).sum() + var_gamma = (gamma*np.log(gamma/pi)).sum()+((1-gamma)*np.log((1-gamma)/(1-pi))).sum() return var_gamma+ (gamma* (np.log(self.variance)-1. +var_mean + var_S)).sum()/2. def update_gradients_KL(self, variational_posterior): mu = variational_posterior.mean S = variational_posterior.variance - gamma,gamma1 = variational_posterior.gamma_probabilities() - log_gamma,log_gamma1 = variational_posterior.gamma_log_prob() + gamma = variational_posterior.gamma.values if len(self.pi.shape)==2: - idx = np.unique(gamma._raveled_index()/gamma.shape[-1]) + idx = np.unique(variational_posterior.gamma._raveled_index()/gamma.shape[-1]) pi = self.pi[idx] else: pi = self.pi - variational_posterior.binary_prob.gradient -= (np.log((1-pi)/pi)+log_gamma-log_gamma1+((np.square(mu)+S)/self.variance-np.log(S)+np.log(self.variance)-1.)/2.)*gamma*gamma1 + variational_posterior.binary_prob.gradient -= np.log((1-pi)/pi*gamma/(1.-gamma))+((np.square(mu)+S)/self.variance-np.log(S)+np.log(self.variance)-1.)/2. mu.gradient -= gamma*mu/self.variance S.gradient -= (1./self.variance - 1./S) * gamma /2. if self.learnPi: @@ -161,24 +159,8 @@ class SpikeAndSlabPosterior(VariationalPosterior): binary_prob : the probability of the distribution on the slab part. """ super(SpikeAndSlabPosterior, self).__init__(means, variances, name) - self.gamma = Param("binary_prob",binary_prob) + self.gamma = Param("binary_prob",binary_prob,Logistic(0.,1.)) self.link_parameter(self.gamma) - - @Cache_this(limit=5) - def gamma_probabilities(self): - prob = np.zeros_like(param_to_array(self.gamma)) - prob[self.gamma>-710] = 1./(1.+np.exp(-self.gamma[self.gamma>-710])) - prob1 = -np.zeros_like(param_to_array(self.gamma)) - prob1[self.gamma<710] = 1./(1.+np.exp(self.gamma[self.gamma<710])) - return prob, prob1 - - @Cache_this(limit=5) - def gamma_log_prob(self): - loggamma = param_to_array(self.gamma).copy() - loggamma[loggamma>-40] = -np.log1p(np.exp(-loggamma[loggamma>-40])) - loggamma1 = -param_to_array(self.gamma).copy() - loggamma1[loggamma1>-40] = -np.log1p(np.exp(-loggamma1[loggamma1>-40])) - return loggamma,loggamma1 def set_gradients(self, grad): self.mean.gradient, self.variance.gradient, self.gamma.gradient = grad diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index 4fcade79..a81b77fa 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -19,7 +19,7 @@ class SparseGP(GP): This model allows (approximate) inference using variational DTC or FITC (Gaussian likelihoods) as well as non-conjugate sparse methods based on these. - + This is not for missing data, as the implementation for missing data involves some inefficient optimization routine decisions. See missing data SparseGP implementation in py:class:'~GPy.models.sparse_gp_minibatch.SparseGPMiniBatch'. @@ -39,7 +39,7 @@ class SparseGP(GP): """ - def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None, + def __init__(self, X, Y, Z, kernel, likelihood, mean_function=None, inference_method=None, name='sparse gp', Y_metadata=None, normalizer=False): #pick a sensible inference method if inference_method is None: @@ -53,7 +53,7 @@ class SparseGP(GP): self.Z = Param('inducing inputs', Z) self.num_inducing = Z.shape[0] - GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer) + GP.__init__(self, X, Y, kernel, likelihood, mean_function, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer) logger.info("Adding Z as parameter") self.link_parameter(self.Z, index=0) @@ -61,7 +61,7 @@ class SparseGP(GP): def has_uncertain_inputs(self): return isinstance(self.X, VariationalPosterior) - + def set_Z(self, Z, trigger_update=True): if trigger_update: self.update_model(False) self.unlink_parameter(self.Z) @@ -110,8 +110,8 @@ class SparseGP(GP): def _raw_predict(self, Xnew, full_cov=False, kern=None): """ - Make a prediction for the latent function values. - + Make a prediction for the latent function values. + For certain inputs we give back a full_cov of shape NxN, if there is missing data, each dimension has its own full_cov of shape NxNxD, and if full_cov is of, we take only the diagonal elements across N. @@ -136,6 +136,9 @@ class SparseGP(GP): else: Kxx = kern.Kdiag(Xnew) var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T + #add in the mean function + if self.mean_function is not None: + mu += self.mean_function.f(Xnew) else: psi0_star = self.kern.psi0(self.Z, Xnew) psi1_star = self.kern.psi1(self.Z, Xnew) @@ -165,4 +168,5 @@ class SparseGP(GP): var[i] = var_ else: var[i] = np.diag(var_)+p0-t2 + return mu, var diff --git a/GPy/core/svgp.py b/GPy/core/svgp.py index 1966dbef..7783f3b1 100644 --- a/GPy/core/svgp.py +++ b/GPy/core/svgp.py @@ -9,7 +9,7 @@ from ..inference.latent_function_inference import SVGP as svgp_inf class SVGP(SparseGP): - def __init__(self, X, Y, Z, kernel, likelihood, name='SVGP', Y_metadata=None, batchsize=None): + def __init__(self, X, Y, Z, kernel, likelihood, mean_function=None, name='SVGP', Y_metadata=None, batchsize=None): """ Stochastic Variational GP. @@ -38,7 +38,7 @@ class SVGP(SparseGP): #create the SVI inference method inf_method = svgp_inf() - SparseGP.__init__(self, X_batch, Y_batch, Z, kernel, likelihood, inference_method=inf_method, + SparseGP.__init__(self, X_batch, Y_batch, Z, kernel, likelihood, mean_function=mean_function, inference_method=inf_method, name=name, Y_metadata=Y_metadata, normalizer=False) self.m = Param('q_u_mean', np.zeros((self.num_inducing, Y.shape[1]))) @@ -48,7 +48,7 @@ class SVGP(SparseGP): self.link_parameter(self.m) def parameters_changed(self): - self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.q_u_mean, self.q_u_chol, self.kern, self.X, self.Z, self.likelihood, self.Y, self.Y_metadata, KL_scale=1.0, batch_scale=float(self.X_all.shape[0])/float(self.X.shape[0])) + self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.q_u_mean, self.q_u_chol, self.kern, self.X, self.Z, self.likelihood, self.Y, self.mean_function, self.Y_metadata, KL_scale=1.0, batch_scale=float(self.X_all.shape[0])/float(self.X.shape[0])) #update the kernel gradients self.kern.update_gradients_full(self.grad_dict['dL_dKmm'], self.Z) @@ -65,6 +65,13 @@ class SVGP(SparseGP): self.m.gradient = self.grad_dict['dL_dm'] self.chol.gradient = self.grad_dict['dL_dchol'] + if self.mean_function is not None: + self.mean_function.update_gradients(self.grad_dict['dL_dmfX'], self.X) + g = self.mean_function.gradient[:].copy() + self.mean_function.update_gradients(self.grad_dict['dL_dmfZ'], self.Z) + self.mean_function.gradient[:] += g + self.Z.gradient[:] += self.mean_function.gradients_X(self.grad_dict['dL_dmfZ'], self.Z) + def set_data(self, X, Y): """ Set the data without calling parameters_changed to avoid wasted computation diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 37a18f63..0e68d0bf 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -505,3 +505,48 @@ def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True): print m return m + +def simple_mean_function(max_iters=100, optimize=True, plot=True): + """ + The simplest possible mean function. No parameters, just a simple Sinusoid. + """ + #create simple mean function + mf = GPy.core.Mapping(1,1) + mf.f = np.sin + mf.update_gradients = lambda a,b: None + + X = np.linspace(0,10,50).reshape(-1,1) + Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + + k =GPy.kern.RBF(1) + lik = GPy.likelihoods.Gaussian() + m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf) + if optimize: + m.optimize(max_iters=max_iters) + if plot: + m.plot(plot_limits=(-10,15)) + return m + +def parametric_mean_function(max_iters=100, optimize=True, plot=True): + """ + A linear mean function with parameters that we'll learn alongside the kernel + """ + #create simple mean function + mf = GPy.core.Mapping(1,1) + mf.f = np.sin + + X = np.linspace(0,10,50).reshape(-1,1) + Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X + + mf = GPy.mappings.Linear(1,1) + + k =GPy.kern.RBF(1) + lik = GPy.likelihoods.Gaussian() + m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf) + if optimize: + m.optimize(max_iters=max_iters) + if plot: + m.plot() + return m + + diff --git a/GPy/inference/latent_function_inference/__init__.py b/GPy/inference/latent_function_inference/__init__.py index 67f57638..dc7789ba 100644 --- a/GPy/inference/latent_function_inference/__init__.py +++ b/GPy/inference/latent_function_inference/__init__.py @@ -50,19 +50,19 @@ class InferenceMethodList(LatentFunctionInference, list): def on_optimization_end(self): for inf in self: inf.on_optimization_end() - + def __getstate__(self): state = [] for inf in self: state.append(inf) return state - + def __setstate__(self, state): for inf in state: self.append(inf) from exact_gaussian_inference import ExactGaussianInference -from laplace import Laplace +from laplace import Laplace, LaplaceBlock from GPy.inference.latent_function_inference.var_dtc import VarDTC from expectation_propagation import EP from expectation_propagation_dtc import EPDTC @@ -78,9 +78,9 @@ from svgp import SVGP # class EMLikeLatentFunctionInference(LatentFunctionInference): # def update_approximation(self): # """ -# This function gets called when the +# This function gets called when the # """ -# +# # def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): # """ # Do inference on the latent functions given a covariance function `kern`, @@ -88,7 +88,7 @@ from svgp import SVGP # Additional metadata for the outputs `Y` can be given in `Y_metadata`. # """ # raise NotImplementedError, "Abstract base class for full inference" -# +# # class VariationalLatentFunctionInference(LatentFunctionInference): # def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): # """ diff --git a/GPy/inference/latent_function_inference/dtc.py b/GPy/inference/latent_function_inference/dtc.py index 5590a079..a12726e2 100644 --- a/GPy/inference/latent_function_inference/dtc.py +++ b/GPy/inference/latent_function_inference/dtc.py @@ -20,7 +20,8 @@ class DTC(LatentFunctionInference): def __init__(self): self.const_jitter = 1e-6 - def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): + def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None): + assert mean_function is None, "inference with a mean function not implemented" assert X_variance is None, "cannot use X_variance with DTC. Try varDTC." num_inducing, _ = Z.shape @@ -88,7 +89,8 @@ class vDTC(object): def __init__(self): self.const_jitter = 1e-6 - def inference(self, kern, X, X_variance, Z, likelihood, Y, Y_metadata): + def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None): + assert mean_function is None, "inference with a mean function not implemented" assert X_variance is None, "cannot use X_variance with DTC. Try varDTC." num_inducing, _ = Z.shape diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py index 1312d36a..b2f1b7d0 100644 --- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py +++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py @@ -36,11 +36,18 @@ class ExactGaussianInference(LatentFunctionInference): #print "WARNING: N>D of Y, we need caching of L, such that L*L^T = Y, returning Y still!" return Y - def inference(self, kern, X, likelihood, Y, Y_metadata=None): + def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None): """ Returns a Posterior class containing essential quantities of the posterior """ - YYT_factor = self.get_YYTfactor(Y) + + if mean_function is None: + m = 0 + else: + m = mean_function.f(X) + + + YYT_factor = self.get_YYTfactor(Y-m) K = kern.K(X) @@ -56,4 +63,4 @@ class ExactGaussianInference(LatentFunctionInference): dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK),Y_metadata) - return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} + return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha} diff --git a/GPy/inference/latent_function_inference/expectation_propagation.py b/GPy/inference/latent_function_inference/expectation_propagation.py index 26144974..e09cf6d0 100644 --- a/GPy/inference/latent_function_inference/expectation_propagation.py +++ b/GPy/inference/latent_function_inference/expectation_propagation.py @@ -33,15 +33,19 @@ class EP(LatentFunctionInference): # TODO: update approximation in the end as well? Maybe even with a switch? pass - def inference(self, kern, X, likelihood, Y, Y_metadata=None, Z=None): + def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, Z=None): + assert mean_function is None, "inference with a mean function not implemented" num_data, output_dim = Y.shape assert output_dim ==1, "ep in 1D only (for now!)" K = kern.K(X) if self._ep_approximation is None: + + #if we don't yet have the results of runnign EP, run EP and store the computed factors in self._ep_approximation mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata) else: + #if we've already run EP, just use the existing approximation stored in self._ep_approximation mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde)) diff --git a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py index 35b1b7dc..466cbbb2 100644 --- a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py +++ b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py @@ -64,7 +64,8 @@ class EPDTC(LatentFunctionInference): self.old_mutilde, self.old_vtilde = None, None self._ep_approximation = None - def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): + def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None): + assert mean_function is None, "inference with a mean function not implemented" num_data, output_dim = Y.shape assert output_dim ==1, "ep in 1D only (for now!)" diff --git a/GPy/inference/latent_function_inference/fitc.py b/GPy/inference/latent_function_inference/fitc.py index a184c6c4..f99b35ff 100644 --- a/GPy/inference/latent_function_inference/fitc.py +++ b/GPy/inference/latent_function_inference/fitc.py @@ -18,7 +18,8 @@ class FITC(LatentFunctionInference): """ const_jitter = 1e-6 - def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): + def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None): + assert mean_function is None, "inference with a mean function not implemented" num_inducing, _ = Z.shape num_data, output_dim = Y.shape diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py index 05711b0b..862eff2a 100644 --- a/GPy/inference/latent_function_inference/laplace.py +++ b/GPy/inference/latent_function_inference/laplace.py @@ -39,10 +39,11 @@ class Laplace(LatentFunctionInference): self.first_run = True self._previous_Ki_fhat = None - def inference(self, kern, X, likelihood, Y, Y_metadata=None): + def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None): """ Returns a Posterior class containing essential quantities of the posterior """ + assert mean_function is None, "inference with a mean function not implemented" # Compute K K = kern.K(X) @@ -50,21 +51,25 @@ class Laplace(LatentFunctionInference): #Find mode if self.bad_fhat or self.first_run: Ki_f_init = np.zeros_like(Y) - first_run = False + self.first_run = False else: Ki_f_init = self._previous_Ki_fhat + Ki_f_init = np.zeros_like(Y)# FIXME: take this out + f_hat, Ki_fhat = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata) + self.f_hat = f_hat - self.Ki_fhat = Ki_fhat - self.K = K.copy() + #self.Ki_fhat = Ki_fhat + #self.K = K.copy() + #Compute hessian and other variables at mode log_marginal, woodbury_inv, dL_dK, dL_dthetaL = self.mode_computations(f_hat, Ki_fhat, K, Y, likelihood, kern, Y_metadata) self._previous_Ki_fhat = Ki_fhat.copy() return Posterior(woodbury_vector=Ki_fhat, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} - def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None): + def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None, *args, **kwargs): """ Rasmussen's numerically stable mode finding For nomenclature see Rasmussen & Williams 2006 @@ -89,7 +94,12 @@ class Laplace(LatentFunctionInference): #define the objective function (to be maximised) def obj(Ki_f, f): - return -0.5*np.dot(Ki_f.flatten(), f.flatten()) + np.sum(likelihood.logpdf(f, Y, Y_metadata=Y_metadata)) + ll = -0.5*np.sum(np.dot(Ki_f.T, f)) + np.sum(likelihood.logpdf(f, Y, Y_metadata=Y_metadata)) + if np.isnan(ll): + return -np.inf + else: + return ll + difference = np.inf iteration = 0 @@ -104,7 +114,7 @@ class Laplace(LatentFunctionInference): W_f = W*f b = W_f + grad # R+W p46 line 6. - W12BiW12, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave) + W12BiW12, _, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave, *args, **kwargs) W12BiW12Kb = np.dot(W12BiW12, np.dot(K, b)) #Work out the DIRECTION that we want to move in, but don't choose the stepsize yet @@ -121,7 +131,9 @@ class Laplace(LatentFunctionInference): step = optimize.brent(inner_obj, tol=1e-4, maxiter=12) Ki_f_new = Ki_f + step*dKi_f f_new = np.dot(K, Ki_f_new) - + #print "new {} vs old {}".format(obj(Ki_f_new, f_new), obj(Ki_f, f)) + if obj(Ki_f_new, f_new) < obj(Ki_f, f): + raise ValueError("Shouldn't happen, brent optimization failing") difference = np.abs(np.sum(f_new - f)) + np.abs(np.sum(Ki_f_new - Ki_f)) Ki_f = Ki_f_new f = f_new @@ -152,14 +164,10 @@ class Laplace(LatentFunctionInference): if np.any(np.isnan(W)): raise ValueError('One or more element(s) of W is NaN') - K_Wi_i, L, LiW12 = self._compute_B_statistics(K, W, likelihood.log_concave) - - #compute vital matrices - C = np.dot(LiW12, K) - Ki_W_i = K - C.T.dot(C) + K_Wi_i, logdet_I_KW, I_KW_i, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave) #compute the log marginal - log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - np.sum(np.log(np.diag(L))) + log_marginal = -0.5*np.sum(np.dot(Ki_f.T, f_hat)) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - 0.5*logdet_I_KW # Compute matrices for derivatives dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat @@ -196,23 +204,23 @@ class Laplace(LatentFunctionInference): dL_dthetaL = np.zeros(num_params) for thetaL_i in range(num_params): #Explicit - dL_dthetaL_exp = ( np.sum(dlik_dthetaL[thetaL_i]) + dL_dthetaL_exp = ( np.sum(dlik_dthetaL[thetaL_i,:, :]) # The + comes from the fact that dlik_hess_dthetaL == -dW_dthetaL - + 0.5*np.sum(np.diag(Ki_W_i).flatten()*dlik_hess_dthetaL[:, thetaL_i].flatten()) + + 0.5*np.sum(np.diag(Ki_W_i)*np.squeeze(dlik_hess_dthetaL[thetaL_i, :, :])) ) #Implicit - dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[:, thetaL_i]) - #dfhat_dthetaL = mdot(Ki_W_i, dlik_grad_dthetaL[:, thetaL_i]) + dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[thetaL_i, :, :]) + #dfhat_dthetaL = mdot(Ki_W_i, dlik_grad_dthetaL[thetaL_i, :, :]) dL_dthetaL_imp = np.dot(dL_dfhat.T, dfhat_dthetaL) - dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp + dL_dthetaL[thetaL_i] = np.sum(dL_dthetaL_exp + dL_dthetaL_imp) else: dL_dthetaL = np.zeros(likelihood.size) return log_marginal, K_Wi_i, dL_dK, dL_dthetaL - def _compute_B_statistics(self, K, W, log_concave): + def _compute_B_statistics(self, K, W, log_concave, *args, **kwargs): """ Rasmussen suggests the use of a numerically stable positive definite matrix B Which has a positive diagonal elements and can be easily inverted @@ -225,7 +233,7 @@ class Laplace(LatentFunctionInference): """ if not log_concave: #print "Under 1e-10: {}".format(np.sum(W < 1e-6)) - W[W<1e-6] = 1e-6 + W = np.clip(W, 1e-6, 1e+30) # NOTE: when setting a parameter inside parameters_changed it will allways come to closed update circles!!! #W.__setitem__(W < 1e-6, 1e-6, update=False) # FIXME-HACK: This is a hack since GPy can't handle negative variances which can occur # If the likelihood is non-log-concave. We wan't to say that there is a negative variance @@ -247,5 +255,160 @@ class Laplace(LatentFunctionInference): #K_Wi_i_2 , _= dpotri(L2) #symmetrify(K_Wi_i_2) - return K_Wi_i, L, LiW12 + #compute vital matrices + C = np.dot(LiW12, K) + Ki_W_i = K - C.T.dot(C) + I_KW_i = np.eye(K.shape[0]) - np.dot(K, K_Wi_i) + logdet_I_KW = 2*np.sum(np.log(np.diag(L))) + + return K_Wi_i, logdet_I_KW, I_KW_i, Ki_W_i + +class LaplaceBlock(Laplace): + def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None, *args, **kwargs): + Ki_f = Ki_f_init.copy() + f = np.dot(K, Ki_f) + + #define the objective function (to be maximised) + def obj(Ki_f, f): + ll = -0.5*np.dot(Ki_f.T, f) + np.sum(likelihood.logpdf_sum(f, Y, Y_metadata=Y_metadata)) + if np.isnan(ll): + return -np.inf + else: + return ll + + difference = np.inf + iteration = 0 + + I = np.eye(K.shape[0]) + while difference > self._mode_finding_tolerance and iteration < self._mode_finding_max_iter: + W = -likelihood.d2logpdf_df2(f, Y, Y_metadata=Y_metadata) + + W[np.diag_indices_from(W)] = np.clip(np.diag(W), 1e-6, 1e+30) + + W_f = np.dot(W, f) + grad = likelihood.dlogpdf_df(f, Y, Y_metadata=Y_metadata) + + b = W_f + grad # R+W p46 line 6. + K_Wi_i, _, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave, *args, **kwargs) + + #Work out the DIRECTION that we want to move in, but don't choose the stepsize yet + #a = (I - (K+Wi)i*K)*b + full_step_Ki_f = np.dot(I - np.dot(K_Wi_i, K), b) + dKi_f = full_step_Ki_f - Ki_f + + #define an objective for the line search (minimize this one) + def inner_obj(step_size): + Ki_f_trial = Ki_f + step_size*dKi_f + f_trial = np.dot(K, Ki_f_trial) + return -obj(Ki_f_trial, f_trial) + + #use scipy for the line search, the compute new values of f, Ki_f + step = optimize.brent(inner_obj, tol=1e-4, maxiter=12) + + Ki_f_new = Ki_f + step*dKi_f + f_new = np.dot(K, Ki_f_new) + + difference = np.abs(np.sum(f_new - f)) + np.abs(np.sum(Ki_f_new - Ki_f)) + Ki_f = Ki_f_new + f = f_new + iteration += 1 + + #Warn of bad fits + if difference > self._mode_finding_tolerance: + if not self.bad_fhat: + warnings.warn("Not perfect f_hat fit difference: {}".format(difference)) + self._previous_Ki_fhat = np.zeros_like(Y) + self.bad_fhat = True + elif self.bad_fhat: + self.bad_fhat = False + warnings.warn("f_hat now fine again") + if iteration > self._mode_finding_max_iter: + warnings.warn("didn't find the best") + + return f, Ki_f + + def mode_computations(self, f_hat, Ki_f, K, Y, likelihood, kern, Y_metadata): + #At this point get the hessian matrix (or vector as W is diagonal) + W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata) + + W[np.diag_indices_from(W)] = np.clip(np.diag(W), 1e-6, 1e+30) + + K_Wi_i, log_B_det, I_KW_i, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave) + + #compute the log marginal + #FIXME: The derterminant should be output_dim*0.5 I think, gradients may now no longer check + log_marginal = -0.5*np.dot(f_hat.T, Ki_f) + np.sum(likelihood.logpdf_sum(f_hat, Y, Y_metadata=Y_metadata)) - 0.5*log_B_det + + #Compute vival matrices for derivatives + dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat + + #dL_dfhat = np.zeros((f_hat.shape[0])) + #for i in range(f_hat.shape[0]): + #dL_dfhat[i] = -0.5*np.trace(np.dot(Ki_W_i, dW_df[:,:,i])) + + dL_dfhat = -0.5*np.einsum('ij,ijk->k', Ki_W_i, dW_df) + + woodbury_vector = likelihood.dlogpdf_df(f_hat, Y, Y_metadata=Y_metadata) + + #################### + #compute dL_dK# + #################### + if kern.size > 0 and not kern.is_fixed: + #Explicit + explicit_part = 0.5*(np.dot(Ki_f, Ki_f.T) - K_Wi_i) + + #Implicit + implicit_part = woodbury_vector.dot(dL_dfhat[None,:]).dot(I_KW_i) + #implicit_part = Ki_f.dot(dL_dfhat[None,:]).dot(I_KW_i) + + dL_dK = explicit_part + implicit_part + else: + dL_dK = np.zeros_like(K) + + #################### + #compute dL_dthetaL# + #################### + if likelihood.size > 0 and not likelihood.is_fixed: + raise NotImplementedError + else: + dL_dthetaL = np.zeros(likelihood.size) + + #self.K_Wi_i = K_Wi_i + #self.Ki_W_i = Ki_W_i + #self.W = W + #self.K = K + #self.dL_dfhat = dL_dfhat + #self.explicit_part = explicit_part + #self.implicit_part = implicit_part + return log_marginal, K_Wi_i, dL_dK, dL_dthetaL + + def _compute_B_statistics(self, K, W, log_concave, *args, **kwargs): + """ + Rasmussen suggests the use of a numerically stable positive definite matrix B + Which has a positive diagonal element and can be easyily inverted + + :param K: Prior Covariance matrix evaluated at locations X + :type K: NxN matrix + :param W: Negative hessian at a point (diagonal matrix) + :type W: Vector of diagonal values of hessian (1xN) + :returns: (K_Wi_i, L_B, not_provided) + """ + #w = GPy.util.diag.view(W) + #W[:] = np.where(w<1e-6, 1e-6, w) + + #B = I + KW + B = np.eye(K.shape[0]) + np.dot(K, W) + #Bi, L, Li, logdetB = pdinv(B) + Bi = np.linalg.inv(B) + + #K_Wi_i = np.eye(K.shape[0]) - mdot(W, Bi, K) + K_Wi_i = np.dot(W, Bi) + + #self.K_Wi_i_brute = np.linalg.inv(K + np.linalg.inv(W)) + #self.B = B + #self.Bi = Bi + Ki_W_i = np.dot(Bi, K) + + sign, logdetB = np.linalg.slogdet(B) + return K_Wi_i, sign*logdetB, Bi, Ki_W_i diff --git a/GPy/inference/latent_function_inference/posterior.py b/GPy/inference/latent_function_inference/posterior.py index 34f0b3bb..a1d42c74 100644 --- a/GPy/inference/latent_function_inference/posterior.py +++ b/GPy/inference/latent_function_inference/posterior.py @@ -15,7 +15,7 @@ class Posterior(object): the function at any new point x_* by integrating over this posterior. """ - def __init__(self, woodbury_chol=None, woodbury_vector=None, K=None, mean=None, cov=None, K_chol=None, woodbury_inv=None): + def __init__(self, woodbury_chol=None, woodbury_vector=None, K=None, mean=None, cov=None, K_chol=None, woodbury_inv=None, prior_mean=0): """ woodbury_chol : a lower triangular matrix L that satisfies posterior_covariance = K - K L^{-T} L^{-1} K woodbury_vector : a matrix (or vector, as Nx1 matrix) M which satisfies posterior_mean = K M @@ -67,6 +67,7 @@ class Posterior(object): #option 2: self._mean = mean self._covariance = cov + self._prior_mean = prior_mean #compute this lazily self._precision = None @@ -175,7 +176,7 @@ class Posterior(object): $$ """ if self._woodbury_vector is None: - self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean) + self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean - self._prior_mean) return self._woodbury_vector @property diff --git a/GPy/inference/latent_function_inference/svgp.py b/GPy/inference/latent_function_inference/svgp.py index 1974991b..2215356e 100644 --- a/GPy/inference/latent_function_inference/svgp.py +++ b/GPy/inference/latent_function_inference/svgp.py @@ -6,7 +6,8 @@ from posterior import Posterior class SVGP(LatentFunctionInference): - def inference(self, q_u_mean, q_u_chol, kern, X, Z, likelihood, Y, Y_metadata=None, KL_scale=1.0, batch_scale=1.0): + def inference(self, q_u_mean, q_u_chol, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None, KL_scale=1.0, batch_scale=1.0): + num_inducing = Z.shape[0] num_data, num_outputs = Y.shape @@ -22,6 +23,15 @@ class SVGP(LatentFunctionInference): #S = S + np.eye(S.shape[0])*1e-5*np.max(np.max(S)) #Si, Lnew, _,_ = linalg.pdinv(S) + #compute mean function stuff + if mean_function is not None: + prior_mean_u = mean_function.f(Z) + prior_mean_f = mean_function.f(X) + else: + prior_mean_u = np.zeros((num_inducing, num_outputs)) + prior_mean_f = np.zeros((num_data, num_outputs)) + + #compute kernel related stuff Kmm = kern.K(Z) Knm = kern.K(X, Z) @@ -30,29 +40,43 @@ class SVGP(LatentFunctionInference): #compute the marginal means and variances of q(f) A = np.dot(Knm, Kmmi) - mu = np.dot(A, q_u_mean) + mu = prior_mean_f + np.dot(A, q_u_mean - prior_mean_u) v = Knn_diag[:,None] - np.sum(A*Knm,1)[:,None] + np.sum(A[:,:,None] * np.einsum('ij,jkl->ikl', A, S),1) #compute the KL term Kmmim = np.dot(Kmmi, q_u_mean) KLs = -0.5*logdetS -0.5*num_inducing + 0.5*logdetKmm + 0.5*np.einsum('ij,ijk->k', Kmmi, S) + 0.5*np.sum(q_u_mean*Kmmim,0) KL = KLs.sum() - dKL_dm = Kmmim + #gradient of the KL term (assuming zero mean function) + dKL_dm = Kmmim.copy() dKL_dS = 0.5*(Kmmi[:,:,None] - Si) dKL_dKmm = 0.5*num_outputs*Kmmi - 0.5*Kmmi.dot(S.sum(-1)).dot(Kmmi) - 0.5*Kmmim.dot(Kmmim.T) + if mean_function is not None: + #adjust KL term for mean function + Kmmi_mfZ = np.dot(Kmmi, prior_mean_u) + KL += -np.sum(q_u_mean*Kmmi_mfZ) + KL += 0.5*np.sum(Kmmi_mfZ*prior_mean_u) + + #adjust gradient for mean fucntion + dKL_dm -= Kmmi_mfZ + dKL_dKmm += Kmmim.dot(Kmmi_mfZ.T) + dKL_dKmm -= 0.5*Kmmi_mfZ.dot(Kmmi_mfZ.T) + + #compute gradients for mean_function + dKL_dmfZ = Kmmi_mfZ - Kmmim #quadrature for the likelihood F, dF_dmu, dF_dv, dF_dthetaL = likelihood.variational_expectations(Y, mu, v, Y_metadata=Y_metadata) #rescale the F term if working on a batch F, dF_dmu, dF_dv = F*batch_scale, dF_dmu*batch_scale, dF_dv*batch_scale + if dF_dthetaL is not None: + dF_dthetaL = dF_dthetaL.sum(1).sum(1)*batch_scale - #derivatives of expected likelihood + #derivatives of expected likelihood, assuming zero mean function Adv = A.T[:,:,None]*dF_dv[None,:,:] # As if dF_Dv is diagonal Admu = A.T.dot(dF_dmu) - #AdvA = np.einsum('ijk,jl->ilk', Adv, A) - #AdvA = np.dot(A.T, Adv).swapaxes(0,1) AdvA = np.dstack([np.dot(A.T, Adv[:,:,i].T) for i in range(num_outputs)]) tmp = np.einsum('ijk,jlk->il', AdvA, S).dot(Kmmi) dF_dKmm = -Admu.dot(Kmmim.T) + AdvA.sum(-1) - tmp - tmp.T @@ -62,6 +86,14 @@ class SVGP(LatentFunctionInference): dF_dm = Admu dF_dS = AdvA + #adjust gradient to account for mean function + if mean_function is not None: + dF_dmfX = dF_dmu.copy() + dF_dmfZ = -Admu + dF_dKmn -= np.dot(Kmmi_mfZ, dF_dmu.T) + dF_dKmm += Admu.dot(Kmmi_mfZ.T) + + #sum (gradients of) expected likelihood and KL part log_marginal = F.sum() - KL dL_dm, dL_dS, dL_dKmm, dL_dKmn = dF_dm - dKL_dm, dF_dS- dKL_dS, dF_dKmm- dKL_dKmm, dF_dKmn @@ -69,4 +101,8 @@ class SVGP(LatentFunctionInference): dL_dchol = np.dstack([2.*np.dot(dL_dS[:,:,i], L[:,:,i]) for i in range(num_outputs)]) dL_dchol = choleskies.triang_to_flat(dL_dchol) - return Posterior(mean=q_u_mean, cov=S, K=Kmm), log_marginal, {'dL_dKmm':dL_dKmm, 'dL_dKmn':dL_dKmn, 'dL_dKdiag': dF_dv, 'dL_dm':dL_dm, 'dL_dchol':dL_dchol, 'dL_dthetaL':dF_dthetaL} + grad_dict = {'dL_dKmm':dL_dKmm, 'dL_dKmn':dL_dKmn, 'dL_dKdiag': dF_dv.sum(1), 'dL_dm':dL_dm, 'dL_dchol':dL_dchol, 'dL_dthetaL':dF_dthetaL} + if mean_function is not None: + grad_dict['dL_dmfZ'] = dF_dmfZ - dKL_dmfZ + grad_dict['dL_dmfX'] = dF_dmfX + return Posterior(mean=q_u_mean, cov=S, K=Kmm, prior_mean=prior_mean_u), log_marginal, grad_dict diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index cac69872..2e633e16 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -169,11 +169,13 @@ class VarDTC_minibatch(LatentFunctionInference): Kmm = kern.K(Z).copy() diag.add(Kmm, self.const_jitter) - Lm = jitchol(Kmm, maxtries=100) + if not np.isfinite(Kmm).all(): + print Kmm + Lm = jitchol(Kmm) LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right') Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT - LL = jitchol(Lambda, maxtries=100) + LL = jitchol(Lambda) logdet_L = 2.*np.sum(np.log(np.diag(LL))) b = dtrtrs(LL,dtrtrs(Lm,psi1Y_full.T)[0])[0] bbt = np.square(b).sum() diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index 718be74f..0e1f8a0d 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -16,5 +16,6 @@ from _src.poly import Poly from _src.eq_ode2 import EQ_ODE2 from _src.trunclinear import TruncLinear,TruncLinear_inf -from _src.splitKern import SplitKern,DiffGenomeKern +from _src.splitKern import SplitKern,DEtime +from _src.splitKern import DEtime as DiffGenomeKern diff --git a/GPy/kern/_src/prod.py b/GPy/kern/_src/prod.py index bff6d841..63b23f45 100644 --- a/GPy/kern/_src/prod.py +++ b/GPy/kern/_src/prod.py @@ -6,6 +6,20 @@ from kern import CombinationKernel from ...util.caching import Cache_this import itertools + +def numpy_invalid_op_as_exception(func): + """ + A decorator that allows catching numpy invalid operations + as exceptions (the default behaviour is raising warnings). + """ + def func_wrapper(*args, **kwargs): + np.seterr(invalid='raise') + result = func(*args, **kwargs) + np.seterr(invalid='warn') + return result + return func_wrapper + + class Prod(CombinationKernel): """ Computes the product of 2 kernels @@ -46,18 +60,20 @@ class Prod(CombinationKernel): self.parts[0].update_gradients_full(dL_dK*self.parts[1].K(X,X2), X, X2) self.parts[1].update_gradients_full(dL_dK*self.parts[0].K(X,X2), X, X2) else: - k = self.K(X,X2)*dL_dK - for p in self.parts: - p.update_gradients_full(k/p.K(X,X2),X,X2) + for combination in itertools.combinations(self.parts, len(self.parts) - 1): + prod = reduce(np.multiply, [p.K(X, X2) for p in combination]) + to_update = list(set(self.parts) - set(combination))[0] + to_update.update_gradients_full(dL_dK * prod, X, X2) def update_gradients_diag(self, dL_dKdiag, X): if len(self.parts)==2: self.parts[0].update_gradients_diag(dL_dKdiag*self.parts[1].Kdiag(X), X) self.parts[1].update_gradients_diag(dL_dKdiag*self.parts[0].Kdiag(X), X) else: - k = self.Kdiag(X)*dL_dKdiag - for p in self.parts: - p.update_gradients_diag(k/p.Kdiag(X),X) + for combination in itertools.combinations(self.parts, len(self.parts) - 1): + prod = reduce(np.multiply, [p.Kdiag(X) for p in combination]) + to_update = list(set(self.parts) - set(combination))[0] + to_update.update_gradients_diag(dL_dKdiag * prod, X) def gradients_X(self, dL_dK, X, X2=None): target = np.zeros(X.shape) @@ -65,9 +81,10 @@ class Prod(CombinationKernel): target += self.parts[0].gradients_X(dL_dK*self.parts[1].K(X, X2), X, X2) target += self.parts[1].gradients_X(dL_dK*self.parts[0].K(X, X2), X, X2) else: - k = self.K(X,X2)*dL_dK - for p in self.parts: - target += p.gradients_X(k/p.K(X,X2),X,X2) + for combination in itertools.combinations(self.parts, len(self.parts) - 1): + prod = reduce(np.multiply, [p.K(X, X2) for p in combination]) + to_update = list(set(self.parts) - set(combination))[0] + target += to_update.gradients_X(dL_dK * prod, X, X2) return target def gradients_X_diag(self, dL_dKdiag, X): @@ -80,3 +97,5 @@ class Prod(CombinationKernel): for p in self.parts: target += p.gradients_X_diag(k/p.Kdiag(X),X) return target + + diff --git a/GPy/kern/_src/psi_comp/sslinear_psi_comp.py b/GPy/kern/_src/psi_comp/sslinear_psi_comp.py index 5f261785..d431cd61 100644 --- a/GPy/kern/_src/psi_comp/sslinear_psi_comp.py +++ b/GPy/kern/_src/psi_comp/sslinear_psi_comp.py @@ -37,11 +37,11 @@ def psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variati # Compute for psi0 and psi1 mu2S = np.square(mu)+S - dL_dvar += np.einsum('n,nq,nq->q',dL_dpsi0,gamma,mu2S) + np.einsum('nm,nq,mq,nq->q',dL_dpsi1,gamma,Z,mu) - dL_dgamma += np.einsum('n,q,nq->nq',dL_dpsi0,variance,mu2S) + np.einsum('nm,q,mq,nq->nq',dL_dpsi1,variance,Z,mu) - dL_dmu += np.einsum('n,nq,q,nq->nq',dL_dpsi0,gamma,2.*variance,mu) + np.einsum('nm,nq,q,mq->nq',dL_dpsi1,gamma,variance,Z) - dL_dS += np.einsum('n,nq,q->nq',dL_dpsi0,gamma,variance) - dL_dZ += np.einsum('nm,nq,q,nq->mq',dL_dpsi1,gamma, variance,mu) + dL_dvar += (dL_dpsi0[:,None]*gamma*mu2S).sum(axis=0) + (dL_dpsi1.T.dot(gamma*mu)*Z).sum(axis=0) + dL_dgamma += dL_dpsi0[:,None]*variance*mu2S+ dL_dpsi1.dot(Z)*mu*variance + dL_dmu += dL_dpsi0[:,None]*2.*variance*gamma*mu + dL_dpsi1.dot(Z)*gamma*variance + dL_dS += dL_dpsi0[:,None]*variance*gamma + dL_dZ += dL_dpsi1.T.dot(gamma*mu)*variance return dL_dvar, dL_dZ, dL_dmu, dL_dS, dL_dgamma @@ -64,29 +64,23 @@ def _psi2computations(dL_dpsi2, variance, Z, mu, S, gamma): gamma2 = np.square(gamma) variance2 = np.square(variance) mu2S = mu2+S # NxQ - gvm = np.einsum('nq,nq,q->nq',gamma,mu,variance) - common_sum = np.einsum('nq,mq->nm',gvm,Z) -# common_sum = np.einsum('nq,q,mq,nq->nm',gamma,variance,Z,mu) # NxM - Z_expect = np.einsum('mo,mq,oq->q',dL_dpsi2,Z,Z) + gvm = gamma*mu*variance + common_sum = gvm.dot(Z.T) + Z_expect = (np.dot(dL_dpsi2,Z)*Z).sum(axis=0) + Z_expect_var2 = Z_expect*variance2 dL_dpsi2T = dL_dpsi2+dL_dpsi2.T - tmp = np.einsum('mo,oq->mq',dL_dpsi2T,Z) - common_expect = np.einsum('mq,nm->nq',tmp,common_sum) -# common_expect = np.einsum('mo,mq,no->nq',dL_dpsi2+dL_dpsi2.T,Z,common_sum) - Z2_expect = np.einsum('om,nm->no',dL_dpsi2T,common_sum) - Z1_expect = np.einsum('om,mq->oq',dL_dpsi2T,Z) + common_expect = common_sum.dot(dL_dpsi2T).dot(Z) + Z2_expect = common_sum.dot(dL_dpsi2T) + Z1_expect = dL_dpsi2T.dot(Z) - dL_dvar = np.einsum('nq,q,q->q',2.*(gamma*mu2S-gamma2*mu2),variance,Z_expect)+\ - np.einsum('nq,nq,nq->q',common_expect,gamma,mu) + dL_dvar = variance*Z_expect*2.*(gamma*mu2S-gamma2*mu2).sum(axis=0)+(common_expect*gamma*mu).sum(axis=0) - dL_dgamma = np.einsum('q,q,nq->nq',Z_expect,variance2,(mu2S-2.*gamma*mu2))+\ - np.einsum('nq,q,nq->nq',common_expect,variance,mu) + dL_dgamma = Z_expect_var2*(mu2S-2.*gamma*mu2)+common_expect*mu*variance + + dL_dmu = Z_expect_var2*mu*2.*(gamma-gamma2) + common_expect*gamma*variance + + dL_dS = gamma*Z_expect_var2 - dL_dmu = np.einsum('q,q,nq,nq->nq',Z_expect,variance2,mu,2.*(gamma-gamma2))+\ - np.einsum('nq,nq,q->nq',common_expect,gamma,variance) - - dL_dS = np.einsum('q,nq,q->nq',Z_expect,gamma,variance2) - -# dL_dZ = 2.*(np.einsum('om,nq,q,mq,nq->oq',dL_dpsi2,gamma,variance2,Z,(mu2S-gamma*mu2))+np.einsum('om,nq,q,nq,nm->oq',dL_dpsi2,gamma,variance,mu,common_sum)) - dL_dZ = Z1_expect*np.einsum('nq,q,nq->q',gamma,variance2,(mu2S-gamma*mu2))+np.einsum('nq,q,nq,nm->mq',gamma,variance,mu,Z2_expect) + dL_dZ = (gamma*(mu2S-gamma*mu2)).sum(axis=0)*variance2*Z1_expect+ Z2_expect.T.dot(gamma*mu)*variance return dL_dvar, dL_dgamma, dL_dmu, dL_dS, dL_dZ diff --git a/GPy/kern/_src/psi_comp/ssrbf_psi_comp.py b/GPy/kern/_src/psi_comp/ssrbf_psi_comp.py index 18a4d751..f6a24c86 100644 --- a/GPy/kern/_src/psi_comp/ssrbf_psi_comp.py +++ b/GPy/kern/_src/psi_comp/ssrbf_psi_comp.py @@ -22,12 +22,14 @@ try: # _psi1 NxM mu = variational_posterior.mean S = variational_posterior.variance + gamma = variational_posterior.binary_prob N,M,Q = mu.shape[0],Z.shape[0],mu.shape[1] l2 = np.square(lengthscale) log_denom1 = np.log(S/l2+1) log_denom2 = np.log(2*S/l2+1) - log_gamma,log_gamma1 = variational_posterior.gamma_log_prob() + log_gamma = np.log(gamma) + log_gamma1 = np.log(1.-gamma) variance = float(variance) psi0 = np.empty(N) psi0[:] = variance @@ -37,6 +39,7 @@ try: from ....util.misc import param_to_array S = param_to_array(S) mu = param_to_array(mu) + gamma = param_to_array(gamma) Z = param_to_array(Z) support_code = """ @@ -79,7 +82,7 @@ try: } } """ - weave.inline(code, support_code=support_code, arg_names=['psi1','psi2n','N','M','Q','variance','l2','Z','mu','S','log_denom1','log_denom2','log_gamma','log_gamma1'], type_converters=weave.converters.blitz) + weave.inline(code, support_code=support_code, arg_names=['psi1','psi2n','N','M','Q','variance','l2','Z','mu','S','gamma','log_denom1','log_denom2','log_gamma','log_gamma1'], type_converters=weave.converters.blitz) psi2 = psi2n.sum(axis=0) return psi0,psi1,psi2,psi2n @@ -94,12 +97,13 @@ try: mu = variational_posterior.mean S = variational_posterior.variance + gamma = variational_posterior.binary_prob N,M,Q = mu.shape[0],Z.shape[0],mu.shape[1] l2 = np.square(lengthscale) log_denom1 = np.log(S/l2+1) log_denom2 = np.log(2*S/l2+1) - log_gamma,log_gamma1 = variational_posterior.gamma_log_prob() - gamma, gamma1 = variational_posterior.gamma_probabilities() + log_gamma = np.log(gamma) + log_gamma1 = np.log(1.-gamma) variance = float(variance) dvar = np.zeros(1) @@ -113,6 +117,7 @@ try: from ....util.misc import param_to_array S = param_to_array(S) mu = param_to_array(mu) + gamma = param_to_array(gamma) Z = param_to_array(Z) support_code = """ @@ -130,7 +135,6 @@ try: double Zm1q = Z(m1,q); double Zm2q = Z(m2,q); double gnq = gamma(n,q); - double g1nq = gamma1(n,q); double mu_nq = mu(n,q); if(m2==0) { @@ -156,7 +160,7 @@ try: dmu(n,q) += lpsi1*Zmu*d_exp1/(denom*exp_sum); dS(n,q) += lpsi1*(Zmu2_denom-1.)*d_exp1/(denom*exp_sum)/2.; - dgamma(n,q) += lpsi1*(d_exp1*g1nq-d_exp2*gnq)/exp_sum; + dgamma(n,q) += lpsi1*(d_exp1/gnq-d_exp2/(1.-gnq))/exp_sum; dl(q) += lpsi1*((Zmu2_denom+Snq/lq)/denom*d_exp1+Zm1q*Zm1q/(lq*lq)*d_exp2)/(2.*exp_sum); dZ(m1,q) += lpsi1*(-Zmu/denom*d_exp1-Zm1q/lq*d_exp2)/exp_sum; } @@ -184,7 +188,7 @@ try: dmu(n,q) += -2.*lpsi2*muZhat/denom*d_exp1/exp_sum; dS(n,q) += lpsi2*(2.*muZhat2_denom-1.)/denom*d_exp1/exp_sum; - dgamma(n,q) += lpsi2*(d_exp1*g1nq-d_exp2*gnq)/exp_sum; + dgamma(n,q) += lpsi2*(d_exp1/gnq-d_exp2/(1.-gnq))/exp_sum; dl(q) += lpsi2*(((Snq/lq+muZhat2_denom)/denom+dZm1m2*dZm1m2/(4.*lq*lq))*d_exp1+Z2/(2.*lq*lq)*d_exp2)/exp_sum; dZ(m1,q) += 2.*lpsi2*((muZhat/denom-dZm1m2/(2*lq))*d_exp1-Zm1q/lq*d_exp2)/exp_sum; } @@ -192,7 +196,7 @@ try: } } """ - weave.inline(code, support_code=support_code, arg_names=['dL_dpsi1','dL_dpsi2','psi1','psi2n','N','M','Q','variance','l2','Z','mu','S','gamma','gamma1','log_denom1','log_denom2','log_gamma','log_gamma1','dvar','dl','dmu','dS','dgamma','dZ'], type_converters=weave.converters.blitz) + weave.inline(code, support_code=support_code, arg_names=['dL_dpsi1','dL_dpsi2','psi1','psi2n','N','M','Q','variance','l2','Z','mu','S','gamma','log_denom1','log_denom2','log_gamma','log_gamma1','dvar','dl','dmu','dS','dgamma','dZ'], type_converters=weave.converters.blitz) dl *= 2.*lengthscale if not ARD: diff --git a/GPy/kern/_src/splitKern.py b/GPy/kern/_src/splitKern.py index 27e4f76b..3b2e5716 100644 --- a/GPy/kern/_src/splitKern.py +++ b/GPy/kern/_src/splitKern.py @@ -7,7 +7,7 @@ from kern import Kern,CombinationKernel from .independent_outputs import index_to_slices import itertools -class DiffGenomeKern(Kern): +class DEtime(Kern): def __init__(self, kernel, idx_p, Xp, index_dim=-1, name='DiffGenomeKern'): self.idx_p = idx_p diff --git a/GPy/kern/_src/static.py b/GPy/kern/_src/static.py index f4223bf4..7f59f5df 100644 --- a/GPy/kern/_src/static.py +++ b/GPy/kern/_src/static.py @@ -60,7 +60,10 @@ class White(Static): return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64) def update_gradients_full(self, dL_dK, X, X2=None): - self.variance.gradient = np.trace(dL_dK) + if X2 is None: + self.variance.gradient = np.trace(dL_dK) + else: + self.variance.gradient = 0. def update_gradients_diag(self, dL_dKdiag, X): self.variance.gradient = dL_dKdiag.sum() diff --git a/GPy/kern/_src/stationary.py b/GPy/kern/_src/stationary.py index 06671b23..5fa846d5 100644 --- a/GPy/kern/_src/stationary.py +++ b/GPy/kern/_src/stationary.py @@ -296,6 +296,8 @@ class Exponential(Stationary): return -0.5*self.K_of_r(r) + + class OU(Stationary): """ OU kernel: diff --git a/GPy/likelihoods/bernoulli.py b/GPy/likelihoods/bernoulli.py index 26de274b..f5690aa4 100644 --- a/GPy/likelihoods/bernoulli.py +++ b/GPy/likelihoods/bernoulli.py @@ -77,7 +77,7 @@ class Bernoulli(Likelihood): return Z_hat, mu_hat, sigma2_hat - def variational_expectations(self, Y, m, v, gh_points=None): + def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None): if isinstance(self.gp_link, link_functions.Probit): if gh_points is None: diff --git a/GPy/likelihoods/gaussian.py b/GPy/likelihoods/gaussian.py index 4e7de9e3..f46339d1 100644 --- a/GPy/likelihoods/gaussian.py +++ b/GPy/likelihoods/gaussian.py @@ -34,7 +34,9 @@ class Gaussian(Likelihood): if gp_link is None: gp_link = link_functions.Identity() - assert isinstance(gp_link, link_functions.Identity), "the likelihood only implemented for the identity link" + if not isinstance(gp_link, link_functions.Identity): + print "Warning, Exact inference is not implemeted for non-identity link functions,\ + if you are not already, ensure Laplace inference_method is used" super(Gaussian, self).__init__(gp_link, name=name) @@ -263,16 +265,19 @@ class Gaussian(Likelihood): return d2logpdf_dlink2_dvar def dlogpdf_link_dtheta(self, f, y, Y_metadata=None): - dlogpdf_dvar = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata) - return dlogpdf_dvar + dlogpdf_dtheta = np.zeros((self.size, f.shape[0], f.shape[1])) + dlogpdf_dtheta[0,:,:] = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata) + return dlogpdf_dtheta def dlogpdf_dlink_dtheta(self, f, y, Y_metadata=None): - dlogpdf_dlink_dvar = self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata) - return dlogpdf_dlink_dvar + dlogpdf_dlink_dtheta = np.zeros((self.size, f.shape[0], f.shape[1])) + dlogpdf_dlink_dtheta[0, :, :]= self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata) + return dlogpdf_dlink_dtheta def d2logpdf_dlink2_dtheta(self, f, y, Y_metadata=None): - d2logpdf_dlink2_dvar = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata) - return d2logpdf_dlink2_dvar + d2logpdf_dlink2_dtheta = np.zeros((self.size, f.shape[0], f.shape[1])) + d2logpdf_dlink2_dtheta[0, :, :] = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata) + return d2logpdf_dlink2_dtheta def _mean(self, gp): """ @@ -305,18 +310,17 @@ class Gaussian(Likelihood): Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp]) return Ysim.reshape(orig_shape) - def log_predictive_density(self, y_test, mu_star, var_star): + def log_predictive_density(self, y_test, mu_star, var_star, Y_metadata=None): """ assumes independence """ v = var_star + self.variance return -0.5*np.log(2*np.pi) -0.5*np.log(v) - 0.5*np.square(y_test - mu_star)/v - def variational_expectations(self, Y, m, v, gh_points=None): + def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None): lik_var = float(self.variance) F = -0.5*np.log(2*np.pi) -0.5*np.log(lik_var) - 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/lik_var dF_dmu = (Y - m)/lik_var dF_dv = np.ones_like(v)*(-0.5/lik_var) - dF_dlik_var = np.sum(-0.5/lik_var + 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/(lik_var**2)) - dF_dtheta = [dF_dlik_var] - return F, dF_dmu, dF_dv, dF_dtheta + dF_dtheta = -0.5/lik_var + 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/(lik_var**2) + return F, dF_dmu, dF_dv, dF_dtheta.reshape(1, Y.shape[0], Y.shape[1]) diff --git a/GPy/likelihoods/likelihood.py b/GPy/likelihoods/likelihood.py index b1e78b93..1295245c 100644 --- a/GPy/likelihoods/likelihood.py +++ b/GPy/likelihoods/likelihood.py @@ -5,7 +5,7 @@ import numpy as np from scipy import stats,special import scipy as sp import link_functions -from ..util.misc import chain_1, chain_2, chain_3 +from ..util.misc import chain_1, chain_2, chain_3, blockify_dhess_dtheta, blockify_third, blockify_hessian, safe_exp from scipy.integrate import quad import warnings from ..core.parameterization import Parameterized @@ -39,6 +39,7 @@ class Likelihood(Parameterized): assert isinstance(gp_link,link_functions.GPTransformation), "gp_link is not a valid GPTransformation." self.gp_link = gp_link self.log_concave = False + self.not_block_really = False def _gradients(self,partial): return np.zeros(0) @@ -177,7 +178,12 @@ class Likelihood(Parameterized): if np.any(np.isnan(dF_dm)) or np.any(np.isinf(dF_dm)): stop - dF_dtheta = None # Not yet implemented + if self.size: + dF_dtheta = self.dlogpdf_dtheta(X, Y[:,None]) # Ntheta x (orig size) x N_{quad_points} + dF_dtheta = np.dot(dF_dtheta, gh_w) + dF_dtheta = dF_dtheta.reshape(self.size, shape[0], shape[1]) + else: + dF_dtheta = None # Not yet implemented return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), dF_dtheta def predictive_mean(self, mu, variance, Y_metadata=None): @@ -189,20 +195,27 @@ class Likelihood(Parameterized): """ #conditional_mean: the edpected value of y given some f, under this likelihood + fmin = -np.inf + fmax = np.inf def int_mean(f,m,v): - p = np.exp(-(0.5/v)*np.square(f - m)) + exponent = -(0.5/v)*np.square(f - m) + #If exponent is under -30 then exp(exponent) will be very small, so don't exp it!) #If p is zero then conditional_mean will overflow + assert v.all() > 0 + p = safe_exp(exponent) + + #If p is zero then conditional_variance will overflow if p < 1e-10: return 0. else: return self.conditional_mean(f)*p - scaled_mean = [quad(int_mean, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] + scaled_mean = [quad(int_mean, fmin, fmax,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] mean = np.array(scaled_mean)[:,None] / np.sqrt(2*np.pi*(variance)) return mean def _conditional_mean(self, f): - """Quadrature calculation of the conditional mean: E(Y_star|f)""" + """Quadrature calculation of the conditional mean: E(Y_star|f_star)""" raise NotImplementedError, "implement this function to make predictions" def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None): @@ -210,7 +223,7 @@ class Likelihood(Parameterized): Approximation to the predictive variance: V(Y_star) The following variance decomposition is used: - V(Y_star) = E( V(Y_star|f_star) ) + V( E(Y_star|f_star) ) + V(Y_star) = E( V(Y_star|f_star)**2 ) + V( E(Y_star|f_star) )**2 :param mu: mean of posterior :param sigma: standard deviation of posterior @@ -220,15 +233,22 @@ class Likelihood(Parameterized): #sigma2 = sigma**2 normalizer = np.sqrt(2*np.pi*variance) + fmin_v = -np.inf + fmin_m = np.inf + fmin = -np.inf + fmax = np.inf + + from ..util.misc import safe_exp # E( V(Y_star|f_star) ) def int_var(f,m,v): - p = np.exp(-(0.5/v)*np.square(f - m)) + exponent = -(0.5/v)*np.square(f - m) + p = safe_exp(exponent) #If p is zero then conditional_variance will overflow if p < 1e-10: return 0. else: return self.conditional_variance(f)*p - scaled_exp_variance = [quad(int_var, -np.inf, np.inf,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] + scaled_exp_variance = [quad(int_var, fmin_v, fmax,args=(mj,s2j))[0] for mj,s2j in zip(mu,variance)] exp_var = np.array(scaled_exp_variance)[:,None] / normalizer #V( E(Y_star|f_star) ) = E( E(Y_star|f_star)**2 ) - E( E(Y_star|f_star) )**2 @@ -240,14 +260,15 @@ class Likelihood(Parameterized): #E( E(Y_star|f_star)**2 ) def int_pred_mean_sq(f,m,v,predictive_mean_sq): - p = np.exp(-(0.5/v)*np.square(f - m)) + exponent = -(0.5/v)*np.square(f - m) + p = np.exp(exponent) #If p is zero then conditional_mean**2 will overflow if p < 1e-10: return 0. else: return self.conditional_mean(f)**2*p - scaled_exp_exp2 = [quad(int_pred_mean_sq, -np.inf, np.inf,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)] + scaled_exp_exp2 = [quad(int_pred_mean_sq, fmin_m, fmax,args=(mj,s2j,pm2j))[0] for mj,s2j,pm2j in zip(mu,variance,predictive_mean_sq)] exp_exp2 = np.array(scaled_exp_exp2)[:,None] / normalizer var_exp = exp_exp2 - predictive_mean_sq @@ -295,8 +316,18 @@ class Likelihood(Parameterized): :returns: likelihood evaluated for this point :rtype: float """ - inv_link_f = self.gp_link.transf(f) - return self.pdf_link(inv_link_f, y, Y_metadata=Y_metadata) + if isinstance(self.gp_link, link_functions.Identity): + return self.pdf_link(f, y, Y_metadata=Y_metadata) + else: + inv_link_f = self.gp_link.transf(f) + return self.pdf_link(inv_link_f, y, Y_metadata=Y_metadata) + + def logpdf_sum(self, f, y, Y_metadata=None): + """ + Convenience function that can overridden for functions where this could + be computed more efficiently + """ + return np.sum(self.logpdf(f, y, Y_metadata=Y_metadata)) def logpdf(self, f, y, Y_metadata=None): """ @@ -313,8 +344,11 @@ class Likelihood(Parameterized): :returns: log likelihood evaluated for this point :rtype: float """ - inv_link_f = self.gp_link.transf(f) - return self.logpdf_link(inv_link_f, y, Y_metadata=Y_metadata) + if isinstance(self.gp_link, link_functions.Identity): + return self.logpdf_link(f, y, Y_metadata=Y_metadata) + else: + inv_link_f = self.gp_link.transf(f) + return self.logpdf_link(inv_link_f, y, Y_metadata=Y_metadata) def dlogpdf_df(self, f, y, Y_metadata=None): """ @@ -332,11 +366,15 @@ class Likelihood(Parameterized): :returns: derivative of log likelihood evaluated for this point :rtype: 1xN array """ - inv_link_f = self.gp_link.transf(f) - dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) - dlink_df = self.gp_link.dtransf_df(f) - return chain_1(dlogpdf_dlink, dlink_df) + if isinstance(self.gp_link, link_functions.Identity): + return self.dlogpdf_dlink(f, y, Y_metadata=Y_metadata) + else: + inv_link_f = self.gp_link.transf(f) + dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) + dlink_df = self.gp_link.dtransf_df(f) + return chain_1(dlogpdf_dlink, dlink_df) + @blockify_hessian def d2logpdf_df2(self, f, y, Y_metadata=None): """ Evaluates the link function link(f) then computes the second derivative of log likelihood using it @@ -353,13 +391,18 @@ class Likelihood(Parameterized): :returns: second derivative of log likelihood evaluated for this point (diagonal only) :rtype: 1xN array """ - inv_link_f = self.gp_link.transf(f) - d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata) - dlink_df = self.gp_link.dtransf_df(f) - dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) - d2link_df2 = self.gp_link.d2transf_df2(f) - return chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2) + if isinstance(self.gp_link, link_functions.Identity): + d2logpdf_df2 = self.d2logpdf_dlink2(f, y, Y_metadata=Y_metadata) + else: + inv_link_f = self.gp_link.transf(f) + d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata) + dlink_df = self.gp_link.dtransf_df(f) + dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) + d2link_df2 = self.gp_link.d2transf_df2(f) + d2logpdf_df2 = chain_2(d2logpdf_dlink2, dlink_df, dlogpdf_dlink, d2link_df2) + return d2logpdf_df2 + @blockify_third def d3logpdf_df3(self, f, y, Y_metadata=None): """ Evaluates the link function link(f) then computes the third derivative of log likelihood using it @@ -376,64 +419,96 @@ class Likelihood(Parameterized): :returns: third derivative of log likelihood evaluated for this point :rtype: float """ - inv_link_f = self.gp_link.transf(f) - d3logpdf_dlink3 = self.d3logpdf_dlink3(inv_link_f, y, Y_metadata=Y_metadata) - dlink_df = self.gp_link.dtransf_df(f) - d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata) - d2link_df2 = self.gp_link.d2transf_df2(f) - dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) - d3link_df3 = self.gp_link.d3transf_df3(f) - return chain_3(d3logpdf_dlink3, dlink_df, d2logpdf_dlink2, d2link_df2, dlogpdf_dlink, d3link_df3) + if isinstance(self.gp_link, link_functions.Identity): + d3logpdf_df3 = self.d3logpdf_dlink3(f, y, Y_metadata=Y_metadata) + else: + inv_link_f = self.gp_link.transf(f) + d3logpdf_dlink3 = self.d3logpdf_dlink3(inv_link_f, y, Y_metadata=Y_metadata) + dlink_df = self.gp_link.dtransf_df(f) + d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata) + d2link_df2 = self.gp_link.d2transf_df2(f) + dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) + d3link_df3 = self.gp_link.d3transf_df3(f) + d3logpdf_df3 = chain_3(d3logpdf_dlink3, dlink_df, d2logpdf_dlink2, d2link_df2, dlogpdf_dlink, d3link_df3) + return d3logpdf_df3 + def dlogpdf_dtheta(self, f, y, Y_metadata=None): """ TODO: Doc strings """ if self.size > 0: - inv_link_f = self.gp_link.transf(f) - return self.dlogpdf_link_dtheta(inv_link_f, y, Y_metadata=Y_metadata) + if self.not_block_really: + raise NotImplementedError("Need to make a decorator for this!") + if isinstance(self.gp_link, link_functions.Identity): + return self.dlogpdf_link_dtheta(f, y, Y_metadata=Y_metadata) + else: + inv_link_f = self.gp_link.transf(f) + return self.dlogpdf_link_dtheta(inv_link_f, y, Y_metadata=Y_metadata) else: # There are no parameters so return an empty array for derivatives - return np.zeros([1, 0]) + return np.zeros((0, f.shape[0], f.shape[1])) def dlogpdf_df_dtheta(self, f, y, Y_metadata=None): """ TODO: Doc strings """ if self.size > 0: - inv_link_f = self.gp_link.transf(f) - dlink_df = self.gp_link.dtransf_df(f) - dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata) - return chain_1(dlogpdf_dlink_dtheta, dlink_df) + if self.not_block_really: + raise NotImplementedError("Need to make a decorator for this!") + if isinstance(self.gp_link, link_functions.Identity): + return self.dlogpdf_dlink_dtheta(f, y, Y_metadata=Y_metadata) + else: + inv_link_f = self.gp_link.transf(f) + dlink_df = self.gp_link.dtransf_df(f) + dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata) + + dlogpdf_df_dtheta = np.zeros((self.size, f.shape[0], f.shape[1])) + #Chain each parameter of hte likelihood seperately + for p in range(self.size): + dlogpdf_df_dtheta[p, :, :] = chain_1(dlogpdf_dlink_dtheta[p,:,:], dlink_df) + return dlogpdf_df_dtheta + #return chain_1(dlogpdf_dlink_dtheta, dlink_df) else: # There are no parameters so return an empty array for derivatives - return np.zeros([f.shape[0], 0]) + return np.zeros((0, f.shape[0], f.shape[1])) def d2logpdf_df2_dtheta(self, f, y, Y_metadata=None): """ TODO: Doc strings """ if self.size > 0: - inv_link_f = self.gp_link.transf(f) - dlink_df = self.gp_link.dtransf_df(f) - d2link_df2 = self.gp_link.d2transf_df2(f) - d2logpdf_dlink2_dtheta = self.d2logpdf_dlink2_dtheta(inv_link_f, y, Y_metadata=Y_metadata) - dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata) - return chain_2(d2logpdf_dlink2_dtheta, dlink_df, dlogpdf_dlink_dtheta, d2link_df2) + if self.not_block_really: + raise NotImplementedError("Need to make a decorator for this!") + if isinstance(self.gp_link, link_functions.Identity): + return self.d2logpdf_dlink2_dtheta(f, y, Y_metadata=Y_metadata) + else: + inv_link_f = self.gp_link.transf(f) + dlink_df = self.gp_link.dtransf_df(f) + d2link_df2 = self.gp_link.d2transf_df2(f) + d2logpdf_dlink2_dtheta = self.d2logpdf_dlink2_dtheta(inv_link_f, y, Y_metadata=Y_metadata) + dlogpdf_dlink_dtheta = self.dlogpdf_dlink_dtheta(inv_link_f, y, Y_metadata=Y_metadata) + + d2logpdf_df2_dtheta = np.zeros((self.size, f.shape[0], f.shape[1])) + #Chain each parameter of hte likelihood seperately + for p in range(self.size): + d2logpdf_df2_dtheta[p, :, :] = chain_2(d2logpdf_dlink2_dtheta[p,:,:], dlink_df, dlogpdf_dlink_dtheta[p,:,:], d2link_df2) + return d2logpdf_df2_dtheta + #return chain_2(d2logpdf_dlink2_dtheta, dlink_df, dlogpdf_dlink_dtheta, d2link_df2) else: # There are no parameters so return an empty array for derivatives - return np.zeros([f.shape[0], 0]) + return np.zeros((0, f.shape[0], f.shape[1])) def _laplace_gradients(self, f, y, Y_metadata=None): - dlogpdf_dtheta = self.dlogpdf_dtheta(f, y, Y_metadata=Y_metadata).sum(axis=0) + dlogpdf_dtheta = self.dlogpdf_dtheta(f, y, Y_metadata=Y_metadata) dlogpdf_df_dtheta = self.dlogpdf_df_dtheta(f, y, Y_metadata=Y_metadata) d2logpdf_df2_dtheta = self.d2logpdf_df2_dtheta(f, y, Y_metadata=Y_metadata) #Parameters are stacked vertically. Must be listed in same order as 'get_param_names' # ensure we have gradients for every parameter we want to optimize - assert len(dlogpdf_dtheta) == self.size #1 x num_param array - assert dlogpdf_df_dtheta.shape[1] == self.size #f x num_param matrix - assert d2logpdf_df2_dtheta.shape[1] == self.size #f x num_param matrix + assert dlogpdf_dtheta.shape[0] == self.size #f, d x num_param array + assert dlogpdf_df_dtheta.shape[0] == self.size #f x d x num_param matrix or just f x num_param + assert d2logpdf_df2_dtheta.shape[0] == self.size #f x num_param matrix or f x d x num_param matrix, f x f x num_param or f x f x d x num_param return dlogpdf_dtheta, dlogpdf_df_dtheta, d2logpdf_df2_dtheta @@ -454,19 +529,98 @@ class Likelihood(Parameterized): def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None): #compute the quantiles by sampling!!! - N_samp = 1000 + N_samp = 500 s = np.random.randn(mu.shape[0], N_samp)*np.sqrt(var) + mu #ss_f = s.flatten() #ss_y = self.samples(ss_f, Y_metadata) + #ss_y = self.samples(s, Y_metadata, samples=100) ss_y = self.samples(s, Y_metadata) #ss_y = ss_y.reshape(mu.shape[0], N_samp) return [np.percentile(ss_y ,q, axis=1)[:,None] for q in quantiles] - def samples(self, gp, Y_metadata=None): + def samples(self, gp, Y_metadata=None, samples=1): """ Returns a set of samples of observations based on a given value of the latent variable. :param gp: latent variable + :param samples: number of samples to take for each f location """ - raise NotImplementedError + raise NotImplementedError("""May be possible to use MCMC with user-tuning, see + MCMC_pdf_samples in likelihood.py and write samples function + using this, beware this is a simple implementation + of Metropolis and will not work well for all likelihoods""") + + def MCMC_pdf_samples(self, fNew, num_samples=1000, starting_loc=None, stepsize=0.1, burn_in=1000, Y_metadata=None): + """ + Simple implementation of Metropolis sampling algorithm + + Will run a parallel chain for each input dimension (treats each f independently) + Thus assumes f*_1 independant of f*_2 etc. + + :param num_samples: Number of samples to take + :param fNew: f at which to sample around + :param starting_loc: Starting locations of the independant chains (usually will be conditional_mean of likelihood), often link_f + :param stepsize: Stepsize for the normal proposal distribution (will need modifying) + :param burnin: number of samples to use for burnin (will need modifying) + :param Y_metadata: Y_metadata for pdf + """ + print "Warning, using MCMC for sampling y*, needs to be tuned!" + if starting_loc is None: + starting_loc = fNew + from functools import partial + logpdf = partial(self.logpdf, f=fNew, Y_metadata=Y_metadata) + pdf = lambda y_star: np.exp(logpdf(y=y_star[:, None])) + #Should be the link function of f is a good starting point + #(i.e. the point before you corrupt it with the likelihood) + par_chains = starting_loc.shape[0] + chain_values = np.zeros((par_chains, num_samples)) + chain_values[:, 0][:,None] = starting_loc + #Use same stepsize for all par_chains + stepsize = np.ones(par_chains)*stepsize + accepted = np.zeros((par_chains, num_samples+burn_in)) + accept_ratio = np.zeros(num_samples+burn_in) + #Whilst burning in, only need to keep the previous lot + burnin_cache = np.zeros(par_chains) + burnin_cache[:] = starting_loc.flatten() + burning_in = True + for i in xrange(burn_in+num_samples): + next_ind = i-burn_in + if burning_in: + old_y = burnin_cache + else: + old_y = chain_values[:,next_ind-1] + + old_lik = pdf(old_y) + #Propose new y from Gaussian proposal + new_y = np.random.normal(loc=old_y, scale=stepsize) + new_lik = pdf(new_y) + #Accept using Metropolis (not hastings) acceptance + #Always accepts if new_lik > old_lik + accept_probability = np.minimum(1, new_lik/old_lik) + u = np.random.uniform(0,1,par_chains) + #print "Accept prob: ", accept_probability + accepts = u < accept_probability + if burning_in: + burnin_cache[accepts] = new_y[accepts] + burnin_cache[~accepts] = old_y[~accepts] + if i == burn_in: + burning_in = False + chain_values[:,0] = burnin_cache + else: + #If it was accepted then new_y becomes the latest sample + chain_values[accepts, next_ind] = new_y[accepts] + #Otherwise use old y as the sample + chain_values[~accepts, next_ind] = old_y[~accepts] + + accepted[~accepts, i] = 0 + accepted[accepts, i] = 1 + accept_ratio[i] = np.sum(accepted[:,i])/float(par_chains) + + #Show progress + if i % int((burn_in+num_samples)*0.1) == 0: + print "{}% of samples taken ({})".format((i/int((burn_in+num_samples)*0.1)*10), i) + print "Last run accept ratio: ", accept_ratio[i] + + print "Average accept ratio: ", np.mean(accept_ratio) + return chain_values diff --git a/GPy/likelihoods/student_t.py b/GPy/likelihoods/student_t.py index dbd4d94f..674972bf 100644 --- a/GPy/likelihoods/student_t.py +++ b/GPy/likelihoods/student_t.py @@ -35,8 +35,8 @@ class StudentT(Likelihood): self.log_concave = False - def parameters_changed(self): - self.variance = (self.v / float(self.v - 2)) * self.sigma2 + #def parameters_changed(self): + #self.variance = (self.v / float(self.v - 2)) * self.sigma2 def update_gradients(self, grads): """ @@ -180,7 +180,8 @@ class StudentT(Likelihood): :rtype: float """ e = y - inv_link_f - dlogpdf_dvar = self.v*(e**2 - self.sigma2)/(2*self.sigma2*(self.sigma2*self.v + e**2)) + e2 = np.square(e) + dlogpdf_dvar = self.v*(e2 - self.sigma2)/(2*self.sigma2*(self.sigma2*self.v + e2)) return dlogpdf_dvar def dlogpdf_dlink_dvar(self, inv_link_f, y, Y_metadata=None): @@ -226,17 +227,18 @@ class StudentT(Likelihood): def dlogpdf_link_dtheta(self, f, y, Y_metadata=None): dlogpdf_dvar = self.dlogpdf_link_dvar(f, y, Y_metadata=Y_metadata) dlogpdf_dv = np.zeros_like(dlogpdf_dvar) #FIXME: Not done yet - return np.hstack((dlogpdf_dvar, dlogpdf_dv)) + return np.array((dlogpdf_dvar, dlogpdf_dv)) def dlogpdf_dlink_dtheta(self, f, y, Y_metadata=None): dlogpdf_dlink_dvar = self.dlogpdf_dlink_dvar(f, y, Y_metadata=Y_metadata) dlogpdf_dlink_dv = np.zeros_like(dlogpdf_dlink_dvar) #FIXME: Not done yet - return np.hstack((dlogpdf_dlink_dvar, dlogpdf_dlink_dv)) + return np.array((dlogpdf_dlink_dvar, dlogpdf_dlink_dv)) def d2logpdf_dlink2_dtheta(self, f, y, Y_metadata=None): d2logpdf_dlink2_dvar = self.d2logpdf_dlink2_dvar(f, y, Y_metadata=Y_metadata) d2logpdf_dlink2_dv = np.zeros_like(d2logpdf_dlink2_dvar) #FIXME: Not done yet - return np.hstack((d2logpdf_dlink2_dvar, d2logpdf_dlink2_dv)) + + return np.array((d2logpdf_dlink2_dvar, d2logpdf_dlink2_dv)) def predictive_mean(self, mu, sigma, Y_metadata=None): # The comment here confuses mean and median. diff --git a/GPy/mappings/__init__.py b/GPy/mappings/__init__.py index d331c678..b1cb194b 100644 --- a/GPy/mappings/__init__.py +++ b/GPy/mappings/__init__.py @@ -4,4 +4,5 @@ from kernel import Kernel from linear import Linear from mlp import MLP -#from rbf import RBF +from additive import Additive +from compound import Compound diff --git a/GPy/mappings/additive.py b/GPy/mappings/additive.py index 5297982b..1c86b680 100644 --- a/GPy/mappings/additive.py +++ b/GPy/mappings/additive.py @@ -2,8 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from ..core.mapping import Mapping -import GPy +from ..core import Mapping class Additive(Mapping): """ @@ -17,45 +16,23 @@ class Additive(Mapping): :type mapping1: GPy.mappings.Mapping :param mapping2: second mapping to add together. :type mapping2: GPy.mappings.Mapping - :param tensor: whether or not to use the tensor product of input spaces - :type tensor: bool """ - def __init__(self, mapping1, mapping2, tensor=False): - if tensor: - input_dim = mapping1.input_dim + mapping2.input_dim - else: - input_dim = mapping1.input_dim - assert(mapping1.input_dim==mapping2.input_dim) + def __init__(self, mapping1, mapping2): + assert(mapping1.input_dim==mapping2.input_dim) assert(mapping1.output_dim==mapping2.output_dim) - output_dim = mapping1.output_dim + input_dim, output_dim = mapping1.input_dim, mapping1.output_dim Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim) self.mapping1 = mapping1 self.mapping2 = mapping2 - self.num_params = self.mapping1.num_params + self.mapping2.num_params - self.name = self.mapping1.name + '+' + self.mapping2.name - def _get_param_names(self): - return self.mapping1._get_param_names + self.mapping2._get_param_names - - def _get_params(self): - return np.hstack((self.mapping1._get_params(), self.mapping2._get_params())) - - def _set_params(self, x): - self.mapping1._set_params(x[:self.mapping1.num_params]) - self.mapping2._set_params(x[self.mapping1.num_params:]) - - def randomize(self): - self.mapping1._randomize() - self.mapping2._randomize() def f(self, X): return self.mapping1.f(X) + self.mapping2.f(X) - def df_dtheta(self, dL_df, X): - self._df_dA = (dL_df[:, :, None]*self.kern.K(X, self.X)[:, None, :]).sum(0).T - self._df_dbias = (dL_df.sum(0)) - return np.hstack((self._df_dA.flatten(), self._df_dbias)) + def update_gradients(self, dL_dF, X): + self.mapping1.update_gradients(dL_dF, X) + self.mapping2.update_gradients(dL_dF, X) - def df_dX(self, dL_df, X): - return self.kern.dK_dX((dL_df[:, None, :]*self.A[None, :, :]).sum(2), X, self.X) + def gradients_X(self, dL_dF, X): + return self.mapping1.gradients_X(dL_dF, X) + self.mapping2.gradients_X(dL_dF, X) diff --git a/GPy/mappings/compound.py b/GPy/mappings/compound.py new file mode 100644 index 00000000..5a1e8dd1 --- /dev/null +++ b/GPy/mappings/compound.py @@ -0,0 +1,39 @@ +# Copyright (c) 2015, James Hensman and Alan Saul +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +from ..core import Mapping + +class Compound(Mapping): + """ + Mapping based on passing one mapping through another + + .. math:: + + f(\mathbf{x}) = f_2(f_1(\mathbf{x})) + + :param mapping1: first mapping + :type mapping1: GPy.mappings.Mapping + :param mapping2: second mapping + :type mapping2: GPy.mappings.Mapping + + """ + + def __init__(self, mapping1, mapping2): + assert(mapping1.output_dim==mapping2.input_dim) + input_dim, output_dim = mapping1.input_dim, mapping2.output_dim + Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim) + self.mapping1 = mapping1 + self.mapping2 = mapping2 + self.link_parameters(self.mapping1, self.mapping2) + + def f(self, X): + return self.mapping2.f(self.mapping1.f(X)) + + def update_gradients(self, dL_dF, X): + hidden = self.mapping1.f(X) + self.mapping2.update_gradients(dL_dF, hidden) + self.mapping1.update_gradients(self.mapping2.gradients_X(dL_dF, hidden), X) + + def gradients_X(self, dL_dF, X): + hidden = self.mapping1.f(X) + return self.mapping1.gradients_X(self.mapping2.gradients_X(dL_dF, hidden), X) diff --git a/GPy/mappings/identity.py b/GPy/mappings/identity.py new file mode 100644 index 00000000..b15e476c --- /dev/null +++ b/GPy/mappings/identity.py @@ -0,0 +1,26 @@ +# Copyright (c) 2015, James Hensman + +from ..core.mapping import Mapping +from ..core import Param + +class Identity(Mapping): + """ + A mapping that does nothing! + """ + def __init__(self, input_dim, output_dim, name='identity'): + Mapping.__init__(self, input_dim, output_dim, name) + + def f(self, X): + return X + + def update_gradients(self, dL_dF, X): + pass + + def gradients_X(self, dL_dF, X): + return dL_dF + + + + + + diff --git a/GPy/mappings/kernel.py b/GPy/mappings/kernel.py index 74fa344f..ea1720db 100644 --- a/GPy/mappings/kernel.py +++ b/GPy/mappings/kernel.py @@ -1,9 +1,10 @@ # Copyright (c) 2013, GPy authors (see AUTHORS.txt). +# Copyright (c) 2015, James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np from ..core.mapping import Mapping -import GPy +from ..core import Param class Kernel(Mapping): """ @@ -11,50 +12,41 @@ class Kernel(Mapping): .. math:: - f(\mathbf{x}*) = \mathbf{A}\mathbf{k}(\mathbf{X}, \mathbf{x}^*) + \mathbf{b} + f(\mathbf{x}) = \sum_i \alpha_i k(\mathbf{z}_i, \mathbf{x}) - :param X: input observations containing :math:`\mathbf{X}` - :type X: ndarray + or for multple outputs + + .. math:: + + f_i(\mathbf{x}) = \sum_j \alpha_{i,j} k(\mathbf{z}_i, \mathbf{x}) + + + :param input_dim: dimension of input. + :type input_dim: int :param output_dim: dimension of output. :type output_dim: int + :param Z: input observations containing :math:`\mathbf{Z}` + :type Z: ndarray :param kernel: a GPy kernel, defaults to GPy.kern.RBF :type kernel: GPy.kern.kern """ - def __init__(self, X, output_dim=1, kernel=None): - Mapping.__init__(self, input_dim=X.shape[1], output_dim=output_dim) - if kernel is None: - kernel = GPy.kern.RBF(self.input_dim) + def __init__(self, input_dim, output_dim, Z, kernel, name='kernmap'): + Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name) self.kern = kernel - self.X = X - self.num_data = X.shape[0] - self.num_params = self.output_dim*(self.num_data + 1) - self.A = np.array((self.num_data, self.output_dim)) - self.bias = np.array(self.output_dim) - self.randomize() - self.name = 'kernel' - def _get_param_names(self): - return sum([['A_%i_%i' % (n, d) for d in range(self.output_dim)] for n in range(self.num_data)], []) + ['bias_%i' % d for d in range(self.output_dim)] - - def _get_params(self): - return np.hstack((self.A.flatten(), self.bias)) - - def _set_params(self, x): - self.A = x[:self.num_data * self.output_dim].reshape(self.num_data, self.output_dim).copy() - self.bias = x[self.num_data*self.output_dim:].copy() - - def randomize(self): - self.A = np.random.randn(self.num_data, self.output_dim)/np.sqrt(self.num_data+1) - self.bias = np.random.randn(self.output_dim)/np.sqrt(self.num_data+1) + self.Z = Z + self.num_bases, Zdim = Z.shape + assert Zdim == self.input_dim + self.A = Param('A', np.random.randn(self.num_bases, self.output_dim)) + self.link_parameter(self.A) def f(self, X): - return np.dot(self.kern.K(X, self.X),self.A) + self.bias + return np.dot(self.kern.K(X, self.Z), self.A) - def df_dtheta(self, dL_df, X): - self._df_dA = (dL_df[:, :, None]*self.kern.K(X, self.X)[:, None, :]).sum(0).T - self._df_dbias = (dL_df.sum(0)) - return np.hstack((self._df_dA.flatten(), self._df_dbias)) + def update_gradients(self, dL_dF, X): + self.kern.update_gradients_full(np.dot(dL_dF, self.A.T), X, self.Z) + self.A.gradient = np.dot( self.kern.K(self.Z, X), dL_dF) - def df_dX(self, dL_df, X): - return self.kern.gradients_X((dL_df[:, None, :]*self.A[None, :, :]).sum(2), X, self.X) + def gradients_X(self, dL_dF, X): + return self.kern.gradients_X(np.dot(dL_dF, self.A.T), X, self.Z) diff --git a/GPy/mappings/linear.py b/GPy/mappings/linear.py index 315dfc0e..ee464694 100644 --- a/GPy/mappings/linear.py +++ b/GPy/mappings/linear.py @@ -1,43 +1,39 @@ # Copyright (c) 2013, 2014 GPy authors (see AUTHORS.txt). +# Copyright (c) 2015, James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from ..core.mapping import Bijective_mapping +from ..core.mapping import Mapping from ..core.parameterization import Param -class Linear(Bijective_mapping): +class Linear(Mapping): """ - Mapping based on a linear model. + A Linear mapping. .. math:: - f(\mathbf{x}*) = \mathbf{W}\mathbf{x}^* + \mathbf{b} + F(\mathbf{x}) = \mathbf{A} \mathbf{x}) - :param X: input observations - :type X: ndarray + + :param input_dim: dimension of input. + :type input_dim: int :param output_dim: dimension of output. :type output_dim: int + :param kernel: a GPy kernel, defaults to GPy.kern.RBF + :type kernel: GPy.kern.kern """ - def __init__(self, input_dim=1, output_dim=1, name='linear'): - Bijective_mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name) - self.W = Param('W',np.array((self.input_dim, self.output_dim))) - self.bias = Param('bias',np.array(self.output_dim)) - self.link_parameters(self.W, self.bias) + def __init__(self, input_dim, output_dim, name='linmap'): + Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name) + self.A = Param('A', np.random.randn(self.input_dim, self.output_dim)) + self.link_parameter(self.A) def f(self, X): - return np.dot(X,self.W) + self.bias + return np.dot(X, self.A) - def g(self, f): - V = np.linalg.solve(np.dot(self.W.T, self.W), W.T) - return np.dot(f-self.bias, V) + def update_gradients(self, dL_dF, X): + self.A.gradient = np.dot( X.T, dL_dF) - def df_dtheta(self, dL_df, X): - df_dW = (dL_df[:, :, None]*X[:, None, :]).sum(0).T - df_dbias = (dL_df.sum(0)) - return np.hstack((df_dW.flatten(), df_dbias)) - - def dL_dX(self, partial, X): - """The gradient of L with respect to the inputs to the mapping, where L is a function that is dependent on the output of the mapping, f.""" - return (partial[:, None, :]*self.W[None, :, :]).sum(2) + def gradients_X(self, dL_dF, X): + return np.dot(dL_dF, self.A.T) diff --git a/GPy/mappings/mlp.py b/GPy/mappings/mlp.py index 46dbc2a9..4afc2fa1 100644 --- a/GPy/mappings/mlp.py +++ b/GPy/mappings/mlp.py @@ -3,128 +3,53 @@ import numpy as np from ..core.mapping import Mapping +from ..core import Param class MLP(Mapping): """ - Mapping based on a multi-layer perceptron neural network model. - - .. math:: - - f(\\mathbf{x}*) = \\mathbf{W}^0\\boldsymbol{\\phi}(\\mathbf{W}^1\\mathbf{x}+\\mathbf{b}^1)^* + \\mathbf{b}^0 - - where - - .. math:: - - \\phi(\\cdot) = \\text{tanh}(\\cdot) - - :param X: input observations - :type X: ndarray - :param output_dim: dimension of output. - :type output_dim: int - :param hidden_dim: dimension of hidden layer. If it is an int, there is one hidden layer of the given dimension. If it is a list of ints there are as manny hidden layers as the length of the list, each with the given number of hidden nodes in it. - :type hidden_dim: int or list of ints. - + Mapping based on a multi-layer perceptron neural network model, with a single hidden layer """ - def __init__(self, input_dim=1, output_dim=1, hidden_dim=3): - Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim) - self.name = 'mlp' - if isinstance(hidden_dim, int): - hidden_dim = [hidden_dim] + def __init__(self, input_dim=1, output_dim=1, hidden_dim=3, name='mlpmap'): + super(MLP, self).__init__(input_dim=input_dim, output_dim=output_dim, name=name) self.hidden_dim = hidden_dim - self.activation = [None]*len(self.hidden_dim) - self.W = [] - self._dL_dW = [] - self.bias = [] - self._dL_dbias = [] - self.W.append(np.zeros((self.input_dim, self.hidden_dim[0]))) - self._dL_dW.append(np.zeros((self.input_dim, self.hidden_dim[0]))) - self.bias.append(np.zeros(self.hidden_dim[0])) - self._dL_dbias.append(np.zeros(self.hidden_dim[0])) - self.num_params = self.hidden_dim[0]*(self.input_dim+1) - for h1, h0 in zip(hidden_dim[1:], hidden_dim[0:-1]): - self.W.append(np.zeros((h0, h1))) - self._dL_dW.append(np.zeros((h0, h1))) - self.bias.append(np.zeros(h1)) - self._dL_dbias.append(np.zeros(h1)) - self.num_params += h1*(h0+1) - self.W.append(np.zeros((self.hidden_dim[-1], self.output_dim))) - self._dL_dW.append(np.zeros((self.hidden_dim[-1], self.output_dim))) - self.bias.append(np.zeros(self.output_dim)) - self._dL_dbias.append(np.zeros(self.output_dim)) - self.num_params += self.output_dim*(self.hidden_dim[-1]+1) - self.randomize() + self.W1 = Param('W1', np.random.randn(self.input_dim, self.hidden_dim)) + self.b1 = Param('b1', np.random.randn(self.hidden_dim)) + self.W2 = Param('W2', np.random.randn(self.hidden_dim, self.output_dim)) + self.b2 = Param('b2', np.random.randn(self.output_dim)) + self.link_parameters(self.W1, self.b1, self.W2, self.b2) - def _get_param_names(self): - return sum([['W%i_%i_%i' % (i, n, d) for n in range(self.W[i].shape[0]) for d in range(self.W[i].shape[1])] + ['bias%i_%i' % (i, d) for d in range(self.W[i].shape[1])] for i in range(len(self.W))], []) - - def _get_params(self): - param = np.array([]) - for W, bias in zip(self.W, self.bias): - param = np.hstack((param, W.flatten(), bias)) - return param - - def _set_params(self, x): - start = 0 - for W, bias in zip(self.W, self.bias): - end = W.shape[0]*W.shape[1]+start - W[:] = x[start:end].reshape(W.shape[0], W.shape[1]).copy() - start = end - end = W.shape[1]+end - bias[:] = x[start:end].copy() - start = end - - def randomize(self): - for W, bias in zip(self.W, self.bias): - W[:] = np.random.randn(W.shape[0], W.shape[1])/np.sqrt(W.shape[0]+1) - bias[:] = np.random.randn(W.shape[1])/np.sqrt(W.shape[0]+1) def f(self, X): - self._f_computations(X) - return np.dot(np.tanh(self.activation[-1]), self.W[-1]) + self.bias[-1] + layer1 = np.dot(X, self.W1) + self.b1 + activations = np.tanh(layer1) + return np.dot(activations, self.W2) + self.b2 - def _f_computations(self, X): - W = self.W[0] - bias = self.bias[0] - self.activation[0] = np.dot(X,W) + bias - for W, bias, index in zip(self.W[1:-1], self.bias[1:-1], range(1, len(self.activation))): - self.activation[index] = np.dot(np.tanh(self.activation[index-1]), W)+bias + def update_gradients(self, dL_dF, X): + layer1 = np.dot(X,self.W1) + self.b1 + activations = np.tanh(layer1) + + #Evaluate second-layer gradients. + self.W2.gradient = np.dot(activations.T, dL_dF) + self.b2.gradient = np.sum(dL_dF, 0) + + # Backpropagation to hidden layer. + dL_dact = np.dot(dL_dF, self.W2.T) + dL_dlayer1 = dL_dact / np.square(np.cosh(layer1)) + + # Finally, evaluate the first-layer gradients. + self.W1.gradient = np.dot(X.T,dL_dlayer1) + self.b1.gradient = np.sum(dL_dlayer1, 0) + + def gradients_X(self, dL_dF, X): + layer1 = np.dot(X,self.W1) + self.b1 + activations = np.tanh(layer1) + + # Backpropagation to hidden layer. + dL_dact = np.dot(dL_dF, self.W2.T) + dL_dlayer1 = dL_dact / np.square(np.cosh(layer1)) + + return np.dot(dL_dlayer1, self.W1.T) - def df_dtheta(self, dL_df, X): - self._df_computations(dL_df, X) - g = np.array([]) - for gW, gbias in zip(self._dL_dW, self._dL_dbias): - g = np.hstack((g, gW.flatten(), gbias)) - return g - def _df_computations(self, dL_df, X): - self._f_computations(X) - a0 = self.activation[-1] - W = self.W[-1] - self._dL_dW[-1] = (dL_df[:, :, None]*np.tanh(a0[:, None, :])).sum(0).T - dL_dta=(dL_df[:, None, :]*W[None, :, :]).sum(2) - self._dL_dbias[-1] = (dL_df.sum(0)) - for dL_dW, dL_dbias, W, bias, a0, a1 in zip(self._dL_dW[-2:0:-1], - self._dL_dbias[-2:0:-1], - self.W[-2:0:-1], - self.bias[-2:0:-1], - self.activation[-2::-1], - self.activation[-1:0:-1]): - ta = np.tanh(a1) - dL_da = dL_dta*(1-ta*ta) - dL_dW[:] = (dL_da[:, :, None]*np.tanh(a0[:, None, :])).sum(0).T - dL_dbias[:] = (dL_da.sum(0)) - dL_dta = (dL_da[:, None, :]*W[None, :, :]).sum(2) - ta = np.tanh(self.activation[0]) - dL_da = dL_dta*(1-ta*ta) - W = self.W[0] - self._dL_dW[0] = (dL_da[:, :, None]*X[:, None, :]).sum(0).T - self._dL_dbias[0] = (dL_da.sum(0)) - self._dL_dX = (dL_da[:, None, :]*W[None, :, :]).sum(2) - - def df_dX(self, dL_df, X): - self._df_computations(dL_df, X) - return self._dL_dX - diff --git a/GPy/mappings/piecewise_linear.py b/GPy/mappings/piecewise_linear.py new file mode 100644 index 00000000..8bdee81e --- /dev/null +++ b/GPy/mappings/piecewise_linear.py @@ -0,0 +1,94 @@ +from GPy.core.mapping import Mapping +from GPy.core import Param +import numpy as np + +class PiecewiseLinear(Mapping): + """ + A piecewise-linear mapping. + + The parameters of this mapping are the positions and values of the function where it is broken (self.breaks, self.values). + + Outside the range of the breaks, the function is assumed to have gradient 1 + """ + def __init__(self, input_dim, output_dim, values, breaks, name='piecewise_linear'): + + assert input_dim==1 + assert output_dim==1 + + Mapping.__init__(self, input_dim, output_dim, name) + + values, breaks = np.array(values).flatten(), np.array(breaks).flatten() + assert values.size == breaks.size + self.values = Param('values', values) + self.breaks = Param('breaks', breaks) + self.link_parameter(self.values) + self.link_parameter(self.breaks) + + def parameters_changed(self): + self.order = np.argsort(self.breaks)*1 + self.reverse_order = np.zeros_like(self.order) + self.reverse_order[self.order] = np.arange(self.order.size) + + self.sorted_breaks = self.breaks[self.order] + self.sorted_values = self.values[self.order] + + self.grads = np.diff(self.sorted_values)/np.diff(self.sorted_breaks) + + def f(self, X): + x = X.flatten() + y = x.copy() + + #first adjus the points below the first value + y[xself.sorted_breaks[-1]] = x[x>self.sorted_breaks[-1]] + self.sorted_values[-1] - self.sorted_breaks[-1] + + #loop throught the pairs of points + for low, up, g, v in zip(self. sorted_breaks[:-1], self.sorted_breaks[1:], self.grads, self.sorted_values[:-1]): + i = np.logical_and(x>low, xlow, xself.sorted_breaks[-1]]) + dL_dv[0] += np.sum(dL_dF[xself.sorted_breaks[-1]]) + + #now put the gradients back in the correct order! + self.breaks.gradient = dL_db[self.reverse_order] + self.values.gradient = dL_dv[self.reverse_order] + + def gradients_X(self, dL_dF, X): + x = X.flatten() + + #outside the range of the breakpoints, the function is just offset by a contant, so the partial derivative is 1. + dL_dX = dL_dF.copy().flatten() + + #insude the breakpoints, the partial derivative is self.grads + for low, up, g, v in zip(self. sorted_breaks[:-1], self.sorted_breaks[1:], self.grads, self.sorted_values[:-1]): + i = np.logical_and(x>low, x1.-1e-9] = 1.-1e-9 + gamma[gamma<1e-9] = 1e-9 else: gamma = Gamma.copy() diff --git a/GPy/models/warped_gp.py b/GPy/models/warped_gp.py index 4b982ed2..5bc9a417 100644 --- a/GPy/models/warped_gp.py +++ b/GPy/models/warped_gp.py @@ -1,7 +1,6 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) - import numpy as np from ..util.warping_functions import * from ..core import GP @@ -10,14 +9,16 @@ from GPy.util.warping_functions import TanhWarpingFunction_d from GPy import kern class WarpedGP(GP): - def __init__(self, X, Y, kernel=None, warping_function=None, warping_terms=3, normalize_X=False, normalize_Y=False): + def __init__(self, X, Y, kernel=None, warping_function=None, warping_terms=3): if kernel is None: - kernel = kern.rbf(X.shape[1]) + kernel = kern.RBF(X.shape[1]) if warping_function == None: self.warping_function = TanhWarpingFunction_d(warping_terms) self.warping_params = (np.random.randn(self.warping_function.n_terms * 3 + 1,) * 1) + else: + self.warping_function = warping_function self.scale_data = False if self.scale_data: @@ -25,10 +26,10 @@ class WarpedGP(GP): self.has_uncertain_inputs = False self.Y_untransformed = Y.copy() self.predict_in_warped_space = False - likelihood = likelihoods.Gaussian(self.transform_data(), normalize=normalize_Y) + likelihood = likelihoods.Gaussian() - GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) - self._set_params(self._get_params()) + GP.__init__(self, X, self.transform_data(), likelihood=likelihood, kernel=kernel) + self.link_parameter(self.warping_function) def _scale_data(self, Y): self._Ymax = Y.max() @@ -38,62 +39,55 @@ class WarpedGP(GP): def _unscale_data(self, Y): return (Y + 0.5) * (self._Ymax - self._Ymin) + self._Ymin - def _set_params(self, x): - self.warping_params = x[:self.warping_function.num_parameters] - Y = self.transform_data() - self.likelihood.set_data(Y) - GP._set_params(self, x[self.warping_function.num_parameters:].copy()) + def parameters_changed(self): + self.Y[:] = self.transform_data() + super(WarpedGP, self).parameters_changed() - def _get_params(self): - return np.hstack((self.warping_params.flatten().copy(), GP._get_params(self).copy())) + Kiy = self.posterior.woodbury_vector.flatten() - def _get_param_names(self): - warping_names = self.warping_function._get_param_names() - param_names = GP._get_param_names(self) - return warping_names + param_names - - def transform_data(self): - Y = self.warping_function.f(self.Y_untransformed.copy(), self.warping_params).copy() - return Y - - def log_likelihood(self): - ll = GP.log_likelihood(self) - jacobian = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params) - return ll + np.log(jacobian).sum() - - def _log_likelihood_gradients(self): - ll_grads = GP._log_likelihood_gradients(self) - alpha = np.dot(self.Ki, self.likelihood.Y.flatten()) - warping_grads = self.warping_function_gradients(alpha) - - warping_grads = np.append(warping_grads[:, :-1].flatten(), warping_grads[0, -1]) - return np.hstack((warping_grads.flatten(), ll_grads.flatten())) - - def warping_function_gradients(self, Kiy): - grad_y = self.warping_function.fgrad_y(self.Y_untransformed, self.warping_params) - grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, self.warping_params, + grad_y = self.warping_function.fgrad_y(self.Y_untransformed) + grad_y_psi, grad_psi = self.warping_function.fgrad_y_psi(self.Y_untransformed, return_covar_chain=True) djac_dpsi = ((1.0 / grad_y[:, :, None, None]) * grad_y_psi).sum(axis=0).sum(axis=0) dquad_dpsi = (Kiy[:, None, None, None] * grad_psi).sum(axis=0).sum(axis=0) - return -dquad_dpsi + djac_dpsi + warping_grads = -dquad_dpsi + djac_dpsi + + self.warping_function.psi.gradient[:] = warping_grads[:, :-1] + self.warping_function.d.gradient[:] = warping_grads[0, -1] + + + def transform_data(self): + Y = self.warping_function.f(self.Y_untransformed.copy()).copy() + return Y + + def log_likelihood(self): + ll = GP.log_likelihood(self) + jacobian = self.warping_function.fgrad_y(self.Y_untransformed) + return ll + np.log(jacobian).sum() def plot_warping(self): - self.warping_function.plot(self.warping_params, self.Y_untransformed.min(), self.Y_untransformed.max()) + self.warping_function.plot(self.Y_untransformed.min(), self.Y_untransformed.max()) - def predict(self, Xnew, which_parts='all', full_cov=False, pred_init=None): + def predict(self, Xnew, which_parts='all', pred_init=None): # normalize X values - Xnew = (Xnew.copy() - self._Xoffset) / self._Xscale - mu, var = GP._raw_predict(self, Xnew, full_cov=full_cov, which_parts=which_parts) + # Xnew = (Xnew.copy() - self._Xoffset) / self._Xscale + mu, var = GP._raw_predict(self, Xnew) # now push through likelihood - mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) + mean, var = self.likelihood.predictive_values(mu, var) if self.predict_in_warped_space: - mean = self.warping_function.f_inv(mean, self.warping_params, y=pred_init) - var = self.warping_function.f_inv(var, self.warping_params) + mean = self.warping_function.f_inv(mean, y=pred_init) + var = self.warping_function.f_inv(var) if self.scale_data: mean = self._unscale_data(mean) - - return mean, var, _025pm, _975pm + + return mean, var + +if __name__ == '__main__': + X = np.random.randn(100, 1) + Y = np.sin(X) + np.random.randn(100, 1)*0.05 + + m = WarpedGP(X, Y) diff --git a/GPy/plotting/matplot_dep/maps.py b/GPy/plotting/matplot_dep/maps.py index fcb03b38..a651f34d 100644 --- a/GPy/plotting/matplot_dep/maps.py +++ b/GPy/plotting/matplot_dep/maps.py @@ -6,7 +6,11 @@ try: from matplotlib.patches import Polygon from matplotlib.collections import PatchCollection #from matplotlib import cm - pb.ion() + try: + __IPYTHON__ + pb.ion() + except NameError: + pass except: pass import re diff --git a/GPy/testing/inference_tests.py b/GPy/testing/inference_tests.py index ac92c519..d5039049 100644 --- a/GPy/testing/inference_tests.py +++ b/GPy/testing/inference_tests.py @@ -11,39 +11,38 @@ import GPy class InferenceXTestCase(unittest.TestCase): - + def genData(self): D1,D2,N = 12,12,50 - np.random.seed(1234) - + x = np.linspace(0, 4 * np.pi, N)[:, None] s1 = np.vectorize(lambda x: np.sin(x)) s2 = np.vectorize(lambda x: np.cos(x)**2) s3 = np.vectorize(lambda x:-np.exp(-np.cos(2 * x))) sS = np.vectorize(lambda x: np.cos(x)) - + s1 = s1(x) s2 = s2(x) s3 = s3(x) sS = sS(x) - + s1 -= s1.mean(); s1 /= s1.std(0) s2 -= s2.mean(); s2 /= s2.std(0) s3 -= s3.mean(); s3 /= s3.std(0) sS -= sS.mean(); sS /= sS.std(0) - + S1 = np.hstack([s1, sS]) S2 = np.hstack([s3, sS]) - + P1 = np.random.randn(S1.shape[1], D1) P2 = np.random.randn(S2.shape[1], D2) - + Y1 = S1.dot(P1) Y2 = S2.dot(P2) - + Y1 += .01 * np.random.randn(*Y1.shape) Y2 += .01 * np.random.randn(*Y2.shape) - + Y1 -= Y1.mean(0) Y2 -= Y2.mean(0) Y1 /= Y1.std(0) @@ -52,33 +51,34 @@ class InferenceXTestCase(unittest.TestCase): slist = [s1, s2, s3, sS] slist_names = ["s1", "s2", "s3", "sS"] Ylist = [Y1, Y2] - + return Ylist - + def test_inferenceX_BGPLVM(self): Ys = self.genData() m = GPy.models.BayesianGPLVM(Ys[0],5,kernel=GPy.kern.Linear(5,ARD=True)) - + x,mi = m.infer_newX(m.Y, optimize=False) self.assertTrue(mi.checkgrad()) - - m.optimize(max_iters=10000) - x,mi = m.infer_newX(m.Y) - self.assertTrue(np.allclose(m.X.mean, mi.X.mean)) - self.assertTrue(np.allclose(m.X.variance, mi.X.variance)) + m.optimize(max_iters=10000) + x, mi = m.infer_newX(m.Y) + + print m.X.mean - mi.X.mean + self.assertTrue(np.allclose(m.X.mean, mi.X.mean, rtol=1e-4, atol=1e-4)) + self.assertTrue(np.allclose(m.X.variance, mi.X.variance, rtol=1e-4, atol=1e-4)) def test_inferenceX_GPLVM(self): Ys = self.genData() m = GPy.models.GPLVM(Ys[0],3,kernel=GPy.kern.RBF(3,ARD=True)) - + x,mi = m.infer_newX(m.Y, optimize=False) self.assertTrue(mi.checkgrad()) - + # m.optimize(max_iters=10000) # x,mi = m.infer_newX(m.Y) # self.assertTrue(np.allclose(m.X, x)) - + if __name__ == "__main__": unittest.main() diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index c1bb9265..458f5cd8 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -256,13 +256,23 @@ class KernelGradientTestsContinuous(unittest.TestCase): k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + def test_Prod1(self): + k = GPy.kern.RBF(self.D) * GPy.kern.Linear(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + def test_Prod2(self): - k = (GPy.kern.RBF(2, active_dims=[0,4]) * GPy.kern.Linear(self.D)) + k = GPy.kern.RBF(2, active_dims=[0,4]) * GPy.kern.Linear(self.D) k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) def test_Prod3(self): - k = (GPy.kern.RBF(2, active_dims=[0,4]) * GPy.kern.Linear(self.D)) + k = GPy.kern.RBF(self.D) * GPy.kern.Linear(self.D) * GPy.kern.Bias(self.D) + k.randomize() + self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) + + def test_Prod4(self): + k = GPy.kern.RBF(2, active_dims=[0,4]) * GPy.kern.Linear(self.D) * GPy.kern.Matern32(2, active_dims=[0,1]) k.randomize() self.assertTrue(check_kernel_gradient_functions(k, X=self.X, X2=self.X2, verbose=verbose)) @@ -401,11 +411,27 @@ class Coregionalize_weave_test(unittest.TestCase): GPy.util.config.config.set('weave', 'working', 'False') +class KernelTestsProductWithZeroValues(unittest.TestCase): + + def setUp(self): + self.X = np.array([[0,1],[1,0]]) + self.k = GPy.kern.Linear(2) * GPy.kern.Bias(2) + + def test_zero_valued_kernel_full(self): + self.k.update_gradients_full(1, self.X) + self.assertFalse(np.isnan(self.k['linear.variances'].gradient), + "Gradient resulted in NaN") + + def test_zero_valued_kernel_gradients_X(self): + target = self.k.gradients_X(1, self.X) + self.assertFalse(np.any(np.isnan(target)), + "Gradient resulted in NaN") if __name__ == "__main__": print "Running unit tests, please be (very) patient..." unittest.main() + # np.random.seed(0) # N0 = 3 # N1 = 9 diff --git a/GPy/testing/likelihood_tests.py b/GPy/testing/likelihood_tests.py index 877d1aa0..7b6164c1 100644 --- a/GPy/testing/likelihood_tests.py +++ b/GPy/testing/likelihood_tests.py @@ -10,7 +10,7 @@ from GPy.likelihoods import link_functions from GPy.core.parameterization import Param from functools import partial #np.random.seed(300) -#np.random.seed(7) +#np.random.seed(4) #np.seterr(divide='raise') def dparam_partial(inst_func, *args): @@ -52,8 +52,17 @@ def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None, zipped_params = zip(params, params_names) for param_ind, (param_val, param_name) in enumerate(zipped_params): #Check one parameter at a time, make sure it is 2d (as some gradients only return arrays) then strip out the parameter - fnum = np.atleast_2d(partial_f(param_val, param_name))[:, param_ind].shape[0] - dfnum = np.atleast_2d(partial_df(param_val, param_name))[:, param_ind].shape[0] + f_ = partial_f(param_val, param_name) + df_ = partial_df(param_val, param_name) + #Reshape it such that we have a 3d matrix incase, that is we want it (?, N, D) regardless of whether ? is num_params or not + f_ = f_.reshape(-1, f_.shape[0], f_.shape[1]) + df_ = df_.reshape(-1, f_.shape[0], f_.shape[1]) + + #Get the number of f and number of dimensions + fnum = f_.shape[-2] + fdim = f_.shape[-1] + dfnum = df_.shape[-2] + for fixed_val in range(dfnum): #dlik and dlik_dvar gives back 1 value for each f_ind = min(fnum, fixed_val+1) - 1 @@ -61,9 +70,13 @@ def dparam_checkgrad(func, dfunc, params, params_names, args, constraints=None, #Make grad checker with this param moving, note that set_params is NOT being called #The parameter is being set directly with __setattr__ #Check only the parameter and function value we wish to check at a time - grad = GradientChecker(lambda p_val: np.atleast_2d(partial_f(p_val, param_name))[f_ind, param_ind], - lambda p_val: np.atleast_2d(partial_df(p_val, param_name))[fixed_val, param_ind], - param_val, [param_name]) + #func = lambda p_val, fnum, fdim, param_ind, f_ind, param_ind: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :] + #dfunc_dparam = lambda d_val, fnum, fdim, param_ind, fixed_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :] + + #First we reshape the output such that it is (num_params, N, D) then we pull out the relavent parameter-findex and checkgrad just this index at a time + func = lambda p_val: partial_f(p_val, param_name).reshape(-1, fnum, fdim)[param_ind, f_ind, :] + dfunc_dparam = lambda d_val: partial_df(d_val, param_name).reshape(-1, fnum, fdim)[param_ind, fixed_val, :] + grad = GradientChecker(func, dfunc_dparam, param_val, [param_name]) if constraints is not None: for constrain_param, constraint in constraints: @@ -104,37 +117,9 @@ class TestNoiseModels(object): self.var = 0.2 - self.var = np.random.rand(1) - #Make a bigger step as lower bound can be quite curved self.step = 1e-4 - def tearDown(self): - self.Y = None - self.f = None - self.X = None - - def test_scale2_models(self): - self.setUp() - - #################################################### - # Constraint wrappers so we can just list them off # - #################################################### - def constrain_fixed(regex, model): - model[regex].constrain_fixed() - - def constrain_negative(regex, model): - model[regex].constrain_negative() - - def constrain_positive(regex, model): - model[regex].constrain_positive() - - def constrain_bounded(regex, model, lower, upper): - """ - Used like: partial(constrain_bounded, lower=0, upper=1) - """ - model[regex].constrain_bounded(lower, upper) - """ Dictionary where we nest models we would like to check Name: { @@ -149,136 +134,170 @@ class TestNoiseModels(object): "link_f_constraints": [constraint_wrappers, listed_here] } """ - noise_models = {"Student_t_default": { - "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), - "grad_params": { - "names": [".*t_scale2"], - "vals": [self.var], - "constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] - #"constraints": [("t_scale2", constrain_positive), ("deg_free", partial(constrain_fixed, value=5))] - }, - "laplace": True - }, - "Student_t_1_var": { - "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), - "grad_params": { - "names": [".*t_scale2"], - "vals": [1.0], - "constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] - }, - "laplace": True - }, - "Student_t_small_deg_free": { - "model": GPy.likelihoods.StudentT(deg_free=1.5, sigma2=self.var), - "grad_params": { - "names": [".*t_scale2"], - "vals": [self.var], - "constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] - }, - "laplace": True - }, - "Student_t_small_var": { - "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), - "grad_params": { - "names": [".*t_scale2"], - "vals": [0.001], - "constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] - }, - "laplace": True - }, - "Student_t_large_var": { - "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), - "grad_params": { - "names": [".*t_scale2"], - "vals": [10.0], - "constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] - }, - "laplace": True - }, - "Student_t_approx_gauss": { - "model": GPy.likelihoods.StudentT(deg_free=1000, sigma2=self.var), - "grad_params": { - "names": [".*t_scale2"], - "vals": [self.var], - "constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] - }, - "laplace": True - }, - "Student_t_log": { - "model": GPy.likelihoods.StudentT(gp_link=link_functions.Log(), deg_free=5, sigma2=self.var), - "grad_params": { - "names": [".*t_scale2"], - "vals": [self.var], - "constraints": [(".*t_scale2", constrain_positive), (".*deg_free", constrain_fixed)] - }, - "laplace": True - }, - "Gaussian_default": { - "model": GPy.likelihoods.Gaussian(variance=self.var), - "grad_params": { - "names": [".*variance"], - "vals": [self.var], - "constraints": [(".*variance", constrain_positive)] - }, - "laplace": True, - "ep": False # FIXME: Should be True when we have it working again - }, - #"Gaussian_log": { - #"model": GPy.likelihoods.gaussian(gp_link=link_functions.Log(), variance=self.var, D=self.D, N=self.N), - #"grad_params": { - #"names": ["noise_model_variance"], - #"vals": [self.var], - #"constraints": [constrain_positive] - #}, - #"laplace": True - #}, - #"Gaussian_probit": { - #"model": GPy.likelihoods.gaussian(gp_link=link_functions.Probit(), variance=self.var, D=self.D, N=self.N), - #"grad_params": { - #"names": ["noise_model_variance"], - #"vals": [self.var], - #"constraints": [constrain_positive] - #}, - #"laplace": True - #}, - #"Gaussian_log_ex": { - #"model": GPy.likelihoods.gaussian(gp_link=link_functions.Log_ex_1(), variance=self.var, D=self.D, N=self.N), - #"grad_params": { - #"names": ["noise_model_variance"], - #"vals": [self.var], - #"constraints": [constrain_positive] - #}, - #"laplace": True - #}, - "Bernoulli_default": { - "model": GPy.likelihoods.Bernoulli(), - "link_f_constraints": [partial(constrain_bounded, lower=0, upper=1)], - "laplace": True, - "Y": self.binary_Y, - "ep": False # FIXME: Should be True when we have it working again - }, - "Exponential_default": { - "model": GPy.likelihoods.Exponential(), - "link_f_constraints": [constrain_positive], - "Y": self.positive_Y, - "laplace": True, - }, - "Poisson_default": { - "model": GPy.likelihoods.Poisson(), - "link_f_constraints": [constrain_positive], - "Y": self.integer_Y, - "laplace": True, - "ep": False #Should work though... - }#, - #GAMMA needs some work!"Gamma_default": { - #"model": GPy.likelihoods.Gamma(), - #"link_f_constraints": [constrain_positive], - #"Y": self.positive_Y, - #"laplace": True - #} - } + self.noise_models = {"Student_t_default": { + "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), + "grad_params": { + "names": [".*t_scale2"], + "vals": [self.var], + "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)] + }, + "laplace": True + }, + "Student_t_1_var": { + "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), + "grad_params": { + "names": [".*t_scale2"], + "vals": [1.0], + "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)] + }, + "laplace": True + }, + "Student_t_small_deg_free": { + "model": GPy.likelihoods.StudentT(deg_free=1.5, sigma2=self.var), + "grad_params": { + "names": [".*t_scale2"], + "vals": [self.var], + "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)] + }, + "laplace": True + }, + "Student_t_small_var": { + "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), + "grad_params": { + "names": [".*t_scale2"], + "vals": [0.001], + "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)] + }, + "laplace": True + }, + "Student_t_large_var": { + "model": GPy.likelihoods.StudentT(deg_free=5, sigma2=self.var), + "grad_params": { + "names": [".*t_scale2"], + "vals": [10.0], + "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)] + }, + "laplace": True + }, + "Student_t_approx_gauss": { + "model": GPy.likelihoods.StudentT(deg_free=1000, sigma2=self.var), + "grad_params": { + "names": [".*t_scale2"], + "vals": [self.var], + "constraints": [(".*t_scale2", self.constrain_positive), (".*deg_free", self.constrain_fixed)] + }, + "laplace": True + }, + #"Student_t_log": { + #"model": GPy.likelihoods.StudentT(gp_link=link_functions.Log(), deg_free=5, sigma2=self.var), + #"grad_params": { + #"names": [".*t_noise"], + #"vals": [self.var], + #"constraints": [(".*t_noise", self.constrain_positive), (".*deg_free", self.constrain_fixed)] + #}, + #"laplace": True + #}, + "Gaussian_default": { + "model": GPy.likelihoods.Gaussian(variance=self.var), + "grad_params": { + "names": [".*variance"], + "vals": [self.var], + "constraints": [(".*variance", self.constrain_positive)] + }, + "laplace": True, + "ep": False # FIXME: Should be True when we have it working again + }, + "Gaussian_log": { + "model": GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var), + "grad_params": { + "names": [".*variance"], + "vals": [self.var], + "constraints": [(".*variance", self.constrain_positive)] + }, + "laplace": True + }, + #"Gaussian_probit": { + #"model": GPy.likelihoods.gaussian(gp_link=link_functions.Probit(), variance=self.var, D=self.D, N=self.N), + #"grad_params": { + #"names": ["noise_model_variance"], + #"vals": [self.var], + #"constraints": [constrain_positive] + #}, + #"laplace": True + #}, + #"Gaussian_log_ex": { + #"model": GPy.likelihoods.gaussian(gp_link=link_functions.Log_ex_1(), variance=self.var, D=self.D, N=self.N), + #"grad_params": { + #"names": ["noise_model_variance"], + #"vals": [self.var], + #"constraints": [constrain_positive] + #}, + #"laplace": True + #}, + "Bernoulli_default": { + "model": GPy.likelihoods.Bernoulli(), + "link_f_constraints": [partial(self.constrain_bounded, lower=0, upper=1)], + "laplace": True, + "Y": self.binary_Y, + "ep": False # FIXME: Should be True when we have it working again + }, + "Exponential_default": { + "model": GPy.likelihoods.Exponential(), + "link_f_constraints": [self.constrain_positive], + "Y": self.positive_Y, + "laplace": True, + }, + "Poisson_default": { + "model": GPy.likelihoods.Poisson(), + "link_f_constraints": [self.constrain_positive], + "Y": self.integer_Y, + "laplace": True, + "ep": False #Should work though... + }, + #, + #GAMMA needs some work!"Gamma_default": { + #"model": GPy.likelihoods.Gamma(), + #"link_f_constraints": [constrain_positive], + #"Y": self.positive_Y, + #"laplace": True + #} + } - for name, attributes in noise_models.iteritems(): + + #################################################### + # Constraint wrappers so we can just list them off # + #################################################### + def constrain_fixed(self, regex, model): + model[regex].constrain_fixed() + + def constrain_negative(self, regex, model): + model[regex].constrain_negative() + + def constrain_positive(self, regex, model): + model[regex].constrain_positive() + + def constrain_fixed_below(self, regex, model, up_to): + model[regex][0:up_to].constrain_fixed() + + def constrain_fixed_above(self, regex, model, above): + model[regex][above:].constrain_fixed() + + def constrain_bounded(self, regex, model, lower, upper): + """ + Used like: partial(constrain_bounded, lower=0, upper=1) + """ + model[regex].constrain_bounded(lower, upper) + + + def tearDown(self): + self.Y = None + self.f = None + self.X = None + + def test_scale2_models(self): + self.setUp() + + for name, attributes in self.noise_models.iteritems(): model = attributes["model"] if "grad_params" in attributes: params = attributes["grad_params"] @@ -290,7 +309,7 @@ class TestNoiseModels(object): param_vals = [] param_names = [] constrain_positive = [] - param_constraints = [] # ??? TODO: Saul to Fix. + param_constraints = [] if "link_f_constraints" in attributes: link_f_constraints = attributes["link_f_constraints"] else: @@ -303,6 +322,10 @@ class TestNoiseModels(object): f = attributes["f"].copy() else: f = self.f.copy() + if "Y_metadata" in attributes: + Y_metadata = attributes["Y_metadata"].copy() + else: + Y_metadata = None if "laplace" in attributes: laplace = attributes["laplace"] else: @@ -317,30 +340,30 @@ class TestNoiseModels(object): #Required by all #Normal derivatives - yield self.t_logpdf, model, Y, f - yield self.t_dlogpdf_df, model, Y, f - yield self.t_d2logpdf_df2, model, Y, f + yield self.t_logpdf, model, Y, f, Y_metadata + yield self.t_dlogpdf_df, model, Y, f, Y_metadata + yield self.t_d2logpdf_df2, model, Y, f, Y_metadata #Link derivatives - yield self.t_dlogpdf_dlink, model, Y, f, link_f_constraints - yield self.t_d2logpdf_dlink2, model, Y, f, link_f_constraints + yield self.t_dlogpdf_dlink, model, Y, f, Y_metadata, link_f_constraints + yield self.t_d2logpdf_dlink2, model, Y, f, Y_metadata, link_f_constraints if laplace: #Laplace only derivatives - yield self.t_d3logpdf_df3, model, Y, f - yield self.t_d3logpdf_dlink3, model, Y, f, link_f_constraints + yield self.t_d3logpdf_df3, model, Y, f, Y_metadata + yield self.t_d3logpdf_dlink3, model, Y, f, Y_metadata, link_f_constraints #Params - yield self.t_dlogpdf_dparams, model, Y, f, param_vals, param_names, param_constraints - yield self.t_dlogpdf_df_dparams, model, Y, f, param_vals, param_names, param_constraints - yield self.t_d2logpdf2_df2_dparams, model, Y, f, param_vals, param_names, param_constraints + yield self.t_dlogpdf_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints + yield self.t_dlogpdf_df_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints + yield self.t_d2logpdf2_df2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints #Link params - yield self.t_dlogpdf_link_dparams, model, Y, f, param_vals, param_names, param_constraints - yield self.t_dlogpdf_dlink_dparams, model, Y, f, param_vals, param_names, param_constraints - yield self.t_d2logpdf2_dlink2_dparams, model, Y, f, param_vals, param_names, param_constraints + yield self.t_dlogpdf_link_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints + yield self.t_dlogpdf_dlink_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints + yield self.t_d2logpdf2_dlink2_dparams, model, Y, f, Y_metadata, param_vals, param_names, param_constraints #laplace likelihood gradcheck - yield self.t_laplace_fit_rbf_white, model, self.X, Y, f, self.step, param_vals, param_names, param_constraints + yield self.t_laplace_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints if ep: #ep likelihood gradcheck - yield self.t_ep_fit_rbf_white, model, self.X, Y, f, self.step, param_vals, param_names, param_constraints + yield self.t_ep_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints self.tearDown() @@ -349,41 +372,41 @@ class TestNoiseModels(object): # dpdf_df's # ############# @with_setup(setUp, tearDown) - def t_logpdf(self, model, Y, f): + def t_logpdf(self, model, Y, f, Y_metadata): print "\n{}".format(inspect.stack()[0][3]) print model #print model._get_params() np.testing.assert_almost_equal( - model.pdf(f.copy(), Y.copy()).prod(), - np.exp(model.logpdf(f.copy(), Y.copy()).sum()) + model.pdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).prod(), + np.exp(model.logpdf(f.copy(), Y.copy(), Y_metadata=Y_metadata).sum()) ) @with_setup(setUp, tearDown) - def t_dlogpdf_df(self, model, Y, f): + def t_dlogpdf_df(self, model, Y, f, Y_metadata): print "\n{}".format(inspect.stack()[0][3]) self.description = "\n{}".format(inspect.stack()[0][3]) - logpdf = functools.partial(np.sum(model.logpdf), y=Y) - dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y) + logpdf = functools.partial(np.sum(model.logpdf), y=Y, Y_metadata=Y_metadata) + dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata) grad = GradientChecker(logpdf, dlogpdf_df, f.copy(), 'g') grad.randomize() print model assert grad.checkgrad(verbose=1) @with_setup(setUp, tearDown) - def t_d2logpdf_df2(self, model, Y, f): + def t_d2logpdf_df2(self, model, Y, f, Y_metadata): print "\n{}".format(inspect.stack()[0][3]) - dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y) - d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y) + dlogpdf_df = functools.partial(model.dlogpdf_df, y=Y, Y_metadata=Y_metadata) + d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata) grad = GradientChecker(dlogpdf_df, d2logpdf_df2, f.copy(), 'g') grad.randomize() print model assert grad.checkgrad(verbose=1) @with_setup(setUp, tearDown) - def t_d3logpdf_df3(self, model, Y, f): + def t_d3logpdf_df3(self, model, Y, f, Y_metadata): print "\n{}".format(inspect.stack()[0][3]) - d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y) - d3logpdf_df3 = functools.partial(model.d3logpdf_df3, y=Y) + d2logpdf_df2 = functools.partial(model.d2logpdf_df2, y=Y, Y_metadata=Y_metadata) + d3logpdf_df3 = functools.partial(model.d3logpdf_df3, y=Y, Y_metadata=Y_metadata) grad = GradientChecker(d2logpdf_df2, d3logpdf_df3, f.copy(), 'g') grad.randomize() print model @@ -393,32 +416,32 @@ class TestNoiseModels(object): # df_dparams # ############## @with_setup(setUp, tearDown) - def t_dlogpdf_dparams(self, model, Y, f, params, params_names, param_constraints): + def t_dlogpdf_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints): print "\n{}".format(inspect.stack()[0][3]) print model assert ( dparam_checkgrad(model.logpdf, model.dlogpdf_dtheta, - params, params_names, args=(f, Y), constraints=param_constraints, + params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints, randomize=False, verbose=True) ) @with_setup(setUp, tearDown) - def t_dlogpdf_df_dparams(self, model, Y, f, params, params_names, param_constraints): + def t_dlogpdf_df_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints): print "\n{}".format(inspect.stack()[0][3]) print model assert ( dparam_checkgrad(model.dlogpdf_df, model.dlogpdf_df_dtheta, - params, params_names, args=(f, Y), constraints=param_constraints, + params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints, randomize=False, verbose=True) ) @with_setup(setUp, tearDown) - def t_d2logpdf2_df2_dparams(self, model, Y, f, params, params_names, param_constraints): + def t_d2logpdf2_df2_dparams(self, model, Y, f, Y_metadata, params, params_names, param_constraints): print "\n{}".format(inspect.stack()[0][3]) print model assert ( dparam_checkgrad(model.d2logpdf_df2, model.d2logpdf_df2_dtheta, - params, params_names, args=(f, Y), constraints=param_constraints, + params, params_names, args=(f, Y, Y_metadata), constraints=param_constraints, randomize=False, verbose=True) ) @@ -426,10 +449,10 @@ class TestNoiseModels(object): # dpdf_dlink's # ################ @with_setup(setUp, tearDown) - def t_dlogpdf_dlink(self, model, Y, f, link_f_constraints): + def t_dlogpdf_dlink(self, model, Y, f, Y_metadata, link_f_constraints): print "\n{}".format(inspect.stack()[0][3]) - logpdf = functools.partial(model.logpdf_link, y=Y) - dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y) + logpdf = functools.partial(model.logpdf_link, y=Y, Y_metadata=Y_metadata) + dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata) grad = GradientChecker(logpdf, dlogpdf_dlink, f.copy(), 'g') #Apply constraints to link_f values @@ -442,10 +465,10 @@ class TestNoiseModels(object): assert grad.checkgrad(verbose=1) @with_setup(setUp, tearDown) - def t_d2logpdf_dlink2(self, model, Y, f, link_f_constraints): + def t_d2logpdf_dlink2(self, model, Y, f, Y_metadata, link_f_constraints): print "\n{}".format(inspect.stack()[0][3]) - dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y) - d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y) + dlogpdf_dlink = functools.partial(model.dlogpdf_dlink, y=Y, Y_metadata=Y_metadata) + d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata) grad = GradientChecker(dlogpdf_dlink, d2logpdf_dlink2, f.copy(), 'g') #Apply constraints to link_f values @@ -458,10 +481,10 @@ class TestNoiseModels(object): assert grad.checkgrad(verbose=1) @with_setup(setUp, tearDown) - def t_d3logpdf_dlink3(self, model, Y, f, link_f_constraints): + def t_d3logpdf_dlink3(self, model, Y, f, Y_metadata, link_f_constraints): print "\n{}".format(inspect.stack()[0][3]) - d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y) - d3logpdf_dlink3 = functools.partial(model.d3logpdf_dlink3, y=Y) + d2logpdf_dlink2 = functools.partial(model.d2logpdf_dlink2, y=Y, Y_metadata=Y_metadata) + d3logpdf_dlink3 = functools.partial(model.d3logpdf_dlink3, y=Y, Y_metadata=Y_metadata) grad = GradientChecker(d2logpdf_dlink2, d3logpdf_dlink3, f.copy(), 'g') #Apply constraints to link_f values @@ -477,32 +500,32 @@ class TestNoiseModels(object): # dlink_dparams # ################# @with_setup(setUp, tearDown) - def t_dlogpdf_link_dparams(self, model, Y, f, params, param_names, param_constraints): + def t_dlogpdf_link_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints): print "\n{}".format(inspect.stack()[0][3]) print model assert ( dparam_checkgrad(model.logpdf_link, model.dlogpdf_link_dtheta, - params, param_names, args=(f, Y), constraints=param_constraints, + params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints, randomize=False, verbose=True) ) @with_setup(setUp, tearDown) - def t_dlogpdf_dlink_dparams(self, model, Y, f, params, param_names, param_constraints): + def t_dlogpdf_dlink_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints): print "\n{}".format(inspect.stack()[0][3]) print model assert ( dparam_checkgrad(model.dlogpdf_dlink, model.dlogpdf_dlink_dtheta, - params, param_names, args=(f, Y), constraints=param_constraints, + params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints, randomize=False, verbose=True) ) @with_setup(setUp, tearDown) - def t_d2logpdf2_dlink2_dparams(self, model, Y, f, params, param_names, param_constraints): + def t_d2logpdf2_dlink2_dparams(self, model, Y, f, Y_metadata, params, param_names, param_constraints): print "\n{}".format(inspect.stack()[0][3]) print model assert ( dparam_checkgrad(model.d2logpdf_dlink2, model.d2logpdf_dlink2_dtheta, - params, param_names, args=(f, Y), constraints=param_constraints, + params, param_names, args=(f, Y, Y_metadata), constraints=param_constraints, randomize=False, verbose=True) ) @@ -510,14 +533,15 @@ class TestNoiseModels(object): # laplace test # ################ @with_setup(setUp, tearDown) - def t_laplace_fit_rbf_white(self, model, X, Y, f, step, param_vals, param_names, constraints): + def t_laplace_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints): print "\n{}".format(inspect.stack()[0][3]) #Normalize Y = Y/Y.max() - white_var = 1e-6 + white_var = 1e-5 kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1]) laplace_likelihood = GPy.inference.latent_function_inference.Laplace() - m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, inference_method=laplace_likelihood) + + m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=laplace_likelihood) m['.*white'].constrain_fixed(white_var) #Set constraints @@ -526,6 +550,7 @@ class TestNoiseModels(object): print m m.randomize() + m.randomize() #Set params for param_num in range(len(param_names)): @@ -545,14 +570,15 @@ class TestNoiseModels(object): # EP test # ########### @with_setup(setUp, tearDown) - def t_ep_fit_rbf_white(self, model, X, Y, f, step, param_vals, param_names, constraints): + def t_ep_fit_rbf_white(self, model, X, Y, f, Y_metadata, step, param_vals, param_names, constraints): print "\n{}".format(inspect.stack()[0][3]) #Normalize Y = Y/Y.max() white_var = 1e-6 kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1]) ep_inf = GPy.inference.latent_function_inference.EP() - m = GPy.core.GP(X.copy(), Y.copy(), kernel=kernel, likelihood=model, inference_method=ep_inf) + + m = GPy.core.GP(X.copy(), Y.copy(), kernel=kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=ep_inf) m['.*white'].constrain_fixed(white_var) for param_num in range(len(param_names)): @@ -571,8 +597,8 @@ class LaplaceTests(unittest.TestCase): """ def setUp(self): - self.N = 5 - self.D = 3 + self.N = 15 + self.D = 1 self.X = np.random.rand(self.N, self.D)*10 self.real_std = 0.1 @@ -636,20 +662,20 @@ class LaplaceTests(unittest.TestCase): exact_inf = GPy.inference.latent_function_inference.ExactGaussianInference() m1 = GPy.core.GP(X, Y.copy(), kernel=kernel1, likelihood=gauss_distr1, inference_method=exact_inf) m1['.*white'].constrain_fixed(1e-6) - m1['.*rbf.variance'] = initial_var_guess - m1['.*rbf.variance'].constrain_bounded(1e-4, 10) + m1['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10) m1.randomize() gauss_distr2 = GPy.likelihoods.Gaussian(variance=initial_var_guess) laplace_inf = GPy.inference.latent_function_inference.Laplace() m2 = GPy.core.GP(X, Y.copy(), kernel=kernel2, likelihood=gauss_distr2, inference_method=laplace_inf) m2['.*white'].constrain_fixed(1e-6) - m2['.*rbf.variance'].constrain_bounded(1e-4, 10) + m2['.*Gaussian_noise.variance'].constrain_bounded(1e-4, 10) m2.randomize() if debug: print m1 print m2 + optimizer = 'scg' print "Gaussian" m1.optimize(optimizer, messages=debug, ipython_notebook=False) @@ -687,8 +713,6 @@ class LaplaceTests(unittest.TestCase): pb.scatter(X, m1.likelihood.Y, c='g') pb.scatter(X, m2.likelihood.Y, c='r', marker='x') - - #Check Y's are the same np.testing.assert_almost_equal(m1.Y, m2.Y, decimal=5) #Check marginals are the same diff --git a/GPy/testing/mapping_tests.py b/GPy/testing/mapping_tests.py new file mode 100644 index 00000000..2e32dad3 --- /dev/null +++ b/GPy/testing/mapping_tests.py @@ -0,0 +1,72 @@ +# Copyright (c) 2012, 2013 GPy authors (see AUTHORS.txt). +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import unittest +import numpy as np +import GPy + +class MappingGradChecker(GPy.core.Model): + """ + This class has everything we need to check the gradient of a mapping. It + implement a simple likelihood which is a weighted sum of the outputs of the + mapping. the gradients are checked against the parameters of the mapping + and the input. + """ + def __init__(self, mapping, X, name='map_grad_check'): + super(MappingGradChecker, self).__init__(name) + self.mapping = mapping + self.link_parameter(self.mapping) + self.X = GPy.core.Param('X',X) + self.link_parameter(self.X) + self.dL_dY = np.random.randn(self.X.shape[0], self.mapping.output_dim) + def log_likelihood(self): + return np.sum(self.mapping.f(self.X) * self.dL_dY) + def parameters_changed(self): + self.X.gradient = self.mapping.gradients_X(self.dL_dY, self.X) + self.mapping.update_gradients(self.dL_dY, self.X) + + + + + + + +class MappingTests(unittest.TestCase): + + def test_kernelmapping(self): + X = np.random.randn(100,3) + Z = np.random.randn(10,3) + mapping = GPy.mappings.Kernel(3, 2, Z, GPy.kern.RBF(3)) + self.assertTrue(MappingGradChecker(mapping, X).checkgrad()) + + def test_linearmapping(self): + mapping = GPy.mappings.Linear(3, 2) + X = np.random.randn(100,3) + self.assertTrue(MappingGradChecker(mapping, X).checkgrad()) + + def test_mlpmapping(self): + mapping = GPy.mappings.MLP(input_dim=3, hidden_dim=5, output_dim=2) + X = np.random.randn(100,3) + self.assertTrue(MappingGradChecker(mapping, X).checkgrad()) + + def test_addmapping(self): + m1 = GPy.mappings.MLP(input_dim=3, hidden_dim=5, output_dim=2) + m2 = GPy.mappings.Linear(input_dim=3, output_dim=2) + mapping = GPy.mappings.Additive(m1, m2) + X = np.random.randn(100,3) + self.assertTrue(MappingGradChecker(mapping, X).checkgrad()) + + def test_compoundmapping(self): + m1 = GPy.mappings.MLP(input_dim=3, hidden_dim=5, output_dim=2) + Z = np.random.randn(10,2) + m2 = GPy.mappings.Kernel(2, 4, Z, GPy.kern.RBF(2)) + mapping = GPy.mappings.Compound(m1, m2) + X = np.random.randn(100,3) + self.assertTrue(MappingGradChecker(mapping, X).checkgrad()) + + + + +if __name__ == "__main__": + print "Running unit tests, please be (very) patient..." + unittest.main() diff --git a/GPy/testing/meanfunc_tests.py b/GPy/testing/meanfunc_tests.py new file mode 100644 index 00000000..1d875377 --- /dev/null +++ b/GPy/testing/meanfunc_tests.py @@ -0,0 +1,56 @@ +# Copyright (c) 2015, James Hensman +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import unittest +import numpy as np +import GPy + +class MFtests(unittest.TestCase): + def simple_mean_function(): + """ + The simplest possible mean function. No parameters, just a simple Sinusoid. + """ + #create simple mean function + mf = GPy.core.Mapping(1,1) + mf.f = np.sin + mf.update_gradients = lambda a,b: None + + X = np.linspace(0,10,50).reshape(-1,1) + Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + + k =GPy.kern.RBF(1) + lik = GPy.likelihoods.Gaussian() + m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf) + self.assertTrue(m.checkgrad()) + + def test_parametric_mean_function(self): + """ + A linear mean function with parameters that we'll learn alongside the kernel + """ + + X = np.linspace(0,10,50).reshape(-1,1) + Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X + + mf = GPy.mappings.Linear(1,1) + + k =GPy.kern.RBF(1) + lik = GPy.likelihoods.Gaussian() + m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf) + self.assertTrue(m.checkgrad()) + + def test_svgp_mean_function(self): + + # an instance of the SVIGOP with a men function + X = np.linspace(0,10,500).reshape(-1,1) + Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + Y = np.where(Y>0, 1,0) # make aclassificatino problem + + mf = GPy.mappings.Linear(1,1) + Z = np.linspace(0,10,50).reshape(-1,1) + lik = GPy.likelihoods.Bernoulli() + k =GPy.kern.RBF(1) + GPy.kern.White(1, 1e-4) + m = GPy.core.SVGP(X, Y,Z=Z, kernel=k, likelihood=lik, mean_function=mf) + self.assertTrue(m.checkgrad()) + + + diff --git a/GPy/testing/misc_tests.py b/GPy/testing/misc_tests.py new file mode 100644 index 00000000..e620fa7e --- /dev/null +++ b/GPy/testing/misc_tests.py @@ -0,0 +1,18 @@ +import numpy as np +import scipy as sp +import GPy + +class MiscTests(np.testing.TestCase): + """ + Testing some utilities of misc + """ + def setUp(self): + self._lim_val = np.finfo(np.float64).max + self._lim_val_exp = np.log(self._lim_val) + + def test_safe_exp_upper(self): + assert np.exp(self._lim_val_exp + 1) == np.inf + assert GPy.util.misc.safe_exp(self._lim_val_exp + 1) < np.inf + + def test_safe_exp_lower(self): + assert GPy.util.misc.safe_exp(1e-10) < np.inf diff --git a/GPy/testing/svgp_tests.py b/GPy/testing/svgp_tests.py new file mode 100644 index 00000000..beb9c00d --- /dev/null +++ b/GPy/testing/svgp_tests.py @@ -0,0 +1,54 @@ +import numpy as np +import scipy as sp +import GPy + +class SVGP_nonconvex(np.testing.TestCase): + """ + Inference in the SVGP with a student-T likelihood + """ + def setUp(self): + X = np.linspace(0,10,100).reshape(-1,1) + Z = np.linspace(0,10,10).reshape(-1,1) + Y = np.sin(X) + np.random.randn(*X.shape)*0.1 + Y[50] += 3 + + lik = GPy.likelihoods.StudentT(deg_free=2) + k = GPy.kern.RBF(1, lengthscale=5.) + GPy.kern.White(1, 1e-6) + self.m = GPy.core.SVGP(X, Y, Z=Z, likelihood=lik, kernel=k) + def test_grad(self): + assert self.m.checkgrad(step=1e-4) + +class SVGP_classification(np.testing.TestCase): + """ + Inference in the SVGP with a Bernoulli likelihood + """ + def setUp(self): + X = np.linspace(0,10,100).reshape(-1,1) + Z = np.linspace(0,10,10).reshape(-1,1) + Y = np.where((np.sin(X) + np.random.randn(*X.shape)*0.1)>0, 1,0) + + lik = GPy.likelihoods.Bernoulli() + k = GPy.kern.RBF(1, lengthscale=5.) + GPy.kern.White(1, 1e-6) + self.m = GPy.core.SVGP(X, Y, Z=Z, likelihood=lik, kernel=k) + def test_grad(self): + assert self.m.checkgrad(step=1e-4) + +class SVGP_Poisson_with_meanfunction(np.testing.TestCase): + """ + Inference in the SVGP with a Bernoulli likelihood + """ + def setUp(self): + X = np.linspace(0,10,100).reshape(-1,1) + Z = np.linspace(0,10,10).reshape(-1,1) + latent_f = np.exp(0.1*X * 0.05*X**2) + Y = np.array([np.random.poisson(f) for f in latent_f.flatten()]).reshape(-1,1) + + mf = GPy.mappings.Linear(1,1) + + lik = GPy.likelihoods.Poisson() + k = GPy.kern.RBF(1, lengthscale=5.) + GPy.kern.White(1, 1e-6) + self.m = GPy.core.SVGP(X, Y, Z=Z, likelihood=lik, kernel=k, mean_function=mf) + def test_grad(self): + assert self.m.checkgrad(step=1e-4) + + diff --git a/GPy/util/block_matrices.py b/GPy/util/block_matrices.py index 95920868..464e3ba1 100644 --- a/GPy/util/block_matrices.py +++ b/GPy/util/block_matrices.py @@ -17,6 +17,54 @@ def get_blocks(A, blocksizes): count_i += i return B +def get_block_shapes(B): + assert B.dtype is np.dtype('object'), "Must be a block matrix" + return [B[b,b].shape[0] for b in range(0, B.shape[0])] + +def unblock(B): + assert B.dtype is np.dtype('object'), "Must be a block matrix" + block_shapes = get_block_shapes(B) + num_elements = np.sum(block_shapes) + A = np.empty(shape=(num_elements, num_elements)) + count_i = 0 + for Bi, i in enumerate(block_shapes): + count_j = 0 + for Bj, j in enumerate(block_shapes): + A[count_i:count_i + i, count_j:count_j + j] = B[Bi, Bj] + count_j += j + count_i += i + return A + +def block_dot(A, B): + """ + Element wise dot product on block matricies + + +------+------+ +------+------+ +-------+-------+ + | | | | | | |A11.B11|B12.B12| + | A11 | A12 | | B11 | B12 | | | | + +------+------+ o +------+------| = +-------+-------+ + | | | | | | |A21.B21|A22.B22| + | A21 | A22 | | B21 | B22 | | | | + +-------------+ +------+------+ +-------+-------+ + + ..Note + If either (A or B) of the diagonal matrices are stored as vectors then a more + efficient dot product using numpy broadcasting will be used, i.e. A11*B11 + """ + #Must have same number of blocks and be a block matrix + assert A.dtype is np.dtype('object'), "Must be a block matrix" + assert B.dtype is np.dtype('object'), "Must be a block matrix" + Ashape = A.shape + Bshape = B.shape + assert Ashape == Bshape + def f(A,B): + if Ashape[0] == Ashape[1] or Bshape[0] == Bshape[1]: + #FIXME: Careful if one is transpose of other, would make a matrix + return A*B + else: + return np.dot(A,B) + dot = np.vectorize(f, otypes = [np.object]) + return dot(A,B) if __name__=='__main__': @@ -24,3 +72,5 @@ if __name__=='__main__': B = get_blocks(A,[2,3]) B[0,0] += 7 print B + + assert np.all(unblock(B) == A) diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py index b148f2f4..1089b557 100644 --- a/GPy/util/linalg.py +++ b/GPy/util/linalg.py @@ -96,16 +96,21 @@ def jitchol(A, maxtries=5): num_tries = 1 while num_tries <= maxtries and np.isfinite(jitter): try: + print jitter L = linalg.cholesky(A + np.eye(A.shape[0]) * jitter, lower=True) - logging.warning('Added {} rounds of jitter, jitter of {:.10e}\n'.format(num_tries, jitter)) return L except: jitter *= 10 + finally: num_tries += 1 + raise linalg.LinAlgError, "not positive definite, even with jitter." import traceback - logging.warning('\n'.join(['Added {} rounds of jitter, jitter of {:.10e}'.format(num_tries-1, jitter), - ' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]])) - raise linalg.LinAlgError, "not positive definite, even with jitter." + try: raise + except: + logging.warning('\n'.join(['Added jitter of {:.10e}'.format(jitter), + ' in '+traceback.format_list(traceback.extract_stack(limit=2)[-2:-1])[0][2:]])) + import ipdb;ipdb.set_trace() + return L # def dtrtri(L, lower=1): # """ diff --git a/GPy/util/misc.py b/GPy/util/misc.py index bf37159d..99bd62b3 100644 --- a/GPy/util/misc.py +++ b/GPy/util/misc.py @@ -4,6 +4,16 @@ import numpy as np from config import * +_lim_val = np.finfo(np.float64).max + +_lim_val_exp = np.log(_lim_val) +_lim_val_square = np.sqrt(_lim_val) +_lim_val_cube = np.power(_lim_val, -3) + +def safe_exp(f): + clip_f = np.clip(f, -np.inf, _lim_val_exp) + return np.exp(clip_f) + def chain_1(df_dg, dg_dx): """ Generic chaining function for first derivative @@ -11,6 +21,11 @@ def chain_1(df_dg, dg_dx): .. math:: \\frac{d(f . g)}{dx} = \\frac{df}{dg} \\frac{dg}{dx} """ + if np.all(dg_dx==1.): + return df_dg + if len(df_dg) > 1 and df_dg.shape[-1] > 1: + import ipdb; ipdb.set_trace() # XXX BREAKPOINT + raise NotImplementedError('Not implemented for matricies yet') return df_dg * dg_dx def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2): @@ -20,7 +35,13 @@ def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2): .. math:: \\frac{d^{2}(f . g)}{dx^{2}} = \\frac{d^{2}f}{dg^{2}}(\\frac{dg}{dx})^{2} + \\frac{df}{dg}\\frac{d^{2}g}{dx^{2}} """ - return d2f_dg2*(dg_dx**2) + df_dg*d2g_dx2 + if np.all(dg_dx==1.) and np.all(d2g_dx2 == 0): + return d2f_dg2 + if len(d2f_dg2) > 1 and d2f_dg2.shape[-1] > 1: + raise NotImplementedError('Not implemented for matricies yet') + #dg_dx_2 = np.clip(dg_dx, 1e-12, _lim_val_square)**2 + dg_dx_2 = dg_dx**2 + return d2f_dg2*(dg_dx_2) + df_dg*d2g_dx2 def chain_3(d3f_dg3, dg_dx, d2f_dg2, d2g_dx2, df_dg, d3g_dx3): """ @@ -29,11 +50,18 @@ def chain_3(d3f_dg3, dg_dx, d2f_dg2, d2g_dx2, df_dg, d3g_dx3): .. math:: \\frac{d^{3}(f . g)}{dx^{3}} = \\frac{d^{3}f}{dg^{3}}(\\frac{dg}{dx})^{3} + 3\\frac{d^{2}f}{dg^{2}}\\frac{dg}{dx}\\frac{d^{2}g}{dx^{2}} + \\frac{df}{dg}\\frac{d^{3}g}{dx^{3}} """ - return d3f_dg3*(dg_dx**3) + 3*d2f_dg2*dg_dx*d2g_dx2 + df_dg*d3g_dx3 + if np.all(dg_dx==1.) and np.all(d2g_dx2==0) and np.all(d3g_dx3==0): + return d3f_dg3 + if ( (len(d2f_dg2) > 1 and d2f_dg2.shape[-1] > 1) + or (len(d3f_dg3) > 1 and d3f_dg3.shape[-1] > 1)): + raise NotImplementedError('Not implemented for matricies yet') + #dg_dx_3 = np.clip(dg_dx, 1e-12, _lim_val_cube)**3 + dg_dx_3 = dg_dx**3 + return d3f_dg3*(dg_dx_3) + 3*d2f_dg2*dg_dx*d2g_dx2 + df_dg*d3g_dx3 def opt_wrapper(m, **kwargs): """ - This function just wraps the optimization procedure of a GPy + Thit function just wraps the optimization procedure of a GPy object so that optimize() pickleable (necessary for multiprocessing). """ m.optimize(**kwargs) @@ -96,3 +124,47 @@ from :class:ndarray)""" if len(param) == 1: return param[0].view(np.ndarray) return [x.view(np.ndarray) for x in param] + +def blockify_hessian(func): + def wrapper_func(self, *args, **kwargs): + # Invoke the wrapped function first + retval = func(self, *args, **kwargs) + # Now do something here with retval and/or action + if self.not_block_really and (retval.shape[0] != retval.shape[1]): + return np.diagflat(retval) + else: + return retval + return wrapper_func + +def blockify_third(func): + def wrapper_func(self, *args, **kwargs): + # Invoke the wrapped function first + retval = func(self, *args, **kwargs) + # Now do something here with retval and/or action + if self.not_block_really and (len(retval.shape) < 3): + num_data = retval.shape[0] + d3_block_cache = np.zeros((num_data, num_data, num_data)) + diag_slice = range(num_data) + d3_block_cache[diag_slice, diag_slice, diag_slice] = np.squeeze(retval) + return d3_block_cache + else: + return retval + return wrapper_func + +def blockify_dhess_dtheta(func): + def wrapper_func(self, *args, **kwargs): + # Invoke the wrapped function first + retval = func(self, *args, **kwargs) + # Now do something here with retval and/or action + if self.not_block_really and (len(retval.shape) < 3): + num_data = retval.shape[0] + num_params = retval.shape[-1] + dhess_dtheta = np.zeros((num_data, num_data, num_params)) + diag_slice = range(num_data) + for param_ind in range(num_params): + dhess_dtheta[diag_slice, diag_slice, param_ind] = np.squeeze(retval[:,param_ind]) + return dhess_dtheta + else: + return retval + return wrapper_func + diff --git a/GPy/util/warping_functions.py b/GPy/util/warping_functions.py index a0a385e0..a7547be6 100644 --- a/GPy/util/warping_functions.py +++ b/GPy/util/warping_functions.py @@ -1,17 +1,18 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) - import numpy as np +from GPy.core.parameterization import Parameterized, Param +from ..core.parameterization.transformations import Logexp -class WarpingFunction(object): +class WarpingFunction(Parameterized): """ abstract function for warping z = f(y) """ - def __init__(self): - raise NotImplementedError + def __init__(self, name): + super(WarpingFunction, self).__init__(name=name) def f(self,y,psi): """function transformation @@ -34,9 +35,10 @@ class WarpingFunction(object): def _get_param_names(self): raise NotImplementedError - def plot(self, psi, xmin, xmax): + def plot(self, xmin, xmax): + psi = self.psi y = np.arange(xmin, xmax, 0.01) - f_y = self.f(y, psi) + f_y = self.f(y) from matplotlib import pyplot as plt plt.figure() plt.plot(y, f_y) @@ -50,6 +52,7 @@ class TanhWarpingFunction(WarpingFunction): """n_terms specifies the number of tanh terms to be used""" self.n_terms = n_terms self.num_parameters = 3 * self.n_terms + super(TanhWarpingFunction, self).__init__(name='warp_tanh') def f(self,y,psi): """ @@ -163,8 +166,18 @@ class TanhWarpingFunction_d(WarpingFunction): """n_terms specifies the number of tanh terms to be used""" self.n_terms = n_terms self.num_parameters = 3 * self.n_terms + 1 + self.psi = np.ones((self.n_terms, 3)) - def f(self,y,psi): + super(TanhWarpingFunction_d, self).__init__(name='warp_tanh') + self.psi = Param('psi', self.psi) + self.psi[:, :2].constrain_positive() + + self.d = Param('%s' % ('d'), 1.0, Logexp()) + self.link_parameter(self.psi) + self.link_parameter(self.d) + + + def f(self,y): """ Transform y with f using parameter vector psi psi = [[a,b,c]] @@ -175,9 +188,9 @@ class TanhWarpingFunction_d(WarpingFunction): #1. check that number of params is consistent # assert psi.shape[0] == self.n_terms, 'inconsistent parameter dimensions' # assert psi.shape[1] == 4, 'inconsistent parameter dimensions' - mpsi = psi.copy() - d = psi[-1] - mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + + d = self.d + mpsi = self.psi #3. transform data z = d*y.copy() @@ -187,7 +200,7 @@ class TanhWarpingFunction_d(WarpingFunction): return z - def f_inv(self, z, psi, max_iterations=1000, y=None): + def f_inv(self, z, max_iterations=1000, y=None): """ calculate the numerical inverse of f @@ -198,12 +211,12 @@ class TanhWarpingFunction_d(WarpingFunction): z = z.copy() if y is None: y = np.ones_like(z) - + it = 0 update = np.inf while it == 0 or (np.abs(update).sum() > 1e-10 and it < max_iterations): - update = (self.f(y, psi) - z)/self.fgrad_y(y, psi) + update = (self.f(y) - z)/self.fgrad_y(y) y -= update it += 1 if it == max_iterations: @@ -212,7 +225,7 @@ class TanhWarpingFunction_d(WarpingFunction): return y - def fgrad_y(self, y, psi, return_precalc = False): + def fgrad_y(self, y,return_precalc = False): """ gradient of f w.r.t to y ([N x 1]) @@ -221,9 +234,8 @@ class TanhWarpingFunction_d(WarpingFunction): """ - mpsi = psi.copy() - d = psi[-1] - mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) + d = self.d + mpsi = self.psi # vectorized version @@ -240,7 +252,7 @@ class TanhWarpingFunction_d(WarpingFunction): return GRAD - def fgrad_y_psi(self, y, psi, return_covar_chain = False): + def fgrad_y_psi(self, y, return_covar_chain = False): """ gradient of f w.r.t to y and psi @@ -248,10 +260,10 @@ class TanhWarpingFunction_d(WarpingFunction): """ - mpsi = psi.copy() - mpsi = mpsi[:self.num_parameters-1].reshape(self.n_terms, 3) - w, s, r, d = self.fgrad_y(y, psi, return_precalc = True) + mpsi = self.psi + + w, s, r, d = self.fgrad_y(y, return_precalc = True) gradients = np.zeros((y.shape[0], y.shape[1], len(mpsi), 4)) for i in range(len(mpsi)): diff --git a/benchmarks/boston_housing.py b/benchmarks/boston_housing.py new file mode 100644 index 00000000..0dcff082 --- /dev/null +++ b/benchmarks/boston_housing.py @@ -0,0 +1,44 @@ +import numpy as np +import GPy + +def load_housing_data(): + X = np.loadtxt('housing.data') + X, Y = X[:,:-1], X[:,-1:] + + #scale the X data + xmax, xmin = X.max(0), X.min(0) + X = (X-xmin)/(xmax-xmin) + + #loy the response + Y = np.log(Y) + return X, Y + +def fit_full_GP(): + X, Y = load_housing_data() + k = GPy.kern.RBF(X.shape[1], ARD=True) + GPy.kern.Linear(X.shape[1]) + m = GPy.models.GPRegression(X, Y, kernel=k) + m.optimize('bfgs', max_iters=400, gtol=0) + return m + +def fit_svgp_st(): + np.random.seed(0) + X, Y = load_housing_data() + + Z = X[np.random.permutation(X.shape[0])[:100]] + k = GPy.kern.RBF(X.shape[1], ARD=True) + GPy.kern.Linear(X.shape[1], ARD=True) + GPy.kern.White(1,0.01) + GPy.kern.Bias(1) + + lik = GPy.likelihoods.StudentT(deg_free=3.) + m = GPy.core.SVGP(X, Y, Z=Z, kernel=k, likelihood=lik) + [m.optimize('scg', max_iters=40, gtol=0, messages=1, xtol=0, ftol=0) for i in range(10)] + m.optimize('bfgs', max_iters=4000, gtol=0, messages=1, xtol=0, ftol=0) + return m + + + + + + +if __name__=="__main__": + import timeit + + diff --git a/benchmarks/housing.data b/benchmarks/housing.data new file mode 100644 index 00000000..f83ac564 --- /dev/null +++ b/benchmarks/housing.data @@ -0,0 +1,506 @@ + 0.00632 18.00 2.310 0 0.5380 6.5750 65.20 4.0900 1 296.0 15.30 396.90 4.98 24.00 + 0.02731 0.00 7.070 0 0.4690 6.4210 78.90 4.9671 2 242.0 17.80 396.90 9.14 21.60 + 0.02729 0.00 7.070 0 0.4690 7.1850 61.10 4.9671 2 242.0 17.80 392.83 4.03 34.70 + 0.03237 0.00 2.180 0 0.4580 6.9980 45.80 6.0622 3 222.0 18.70 394.63 2.94 33.40 + 0.06905 0.00 2.180 0 0.4580 7.1470 54.20 6.0622 3 222.0 18.70 396.90 5.33 36.20 + 0.02985 0.00 2.180 0 0.4580 6.4300 58.70 6.0622 3 222.0 18.70 394.12 5.21 28.70 + 0.08829 12.50 7.870 0 0.5240 6.0120 66.60 5.5605 5 311.0 15.20 395.60 12.43 22.90 + 0.14455 12.50 7.870 0 0.5240 6.1720 96.10 5.9505 5 311.0 15.20 396.90 19.15 27.10 + 0.21124 12.50 7.870 0 0.5240 5.6310 100.00 6.0821 5 311.0 15.20 386.63 29.93 16.50 + 0.17004 12.50 7.870 0 0.5240 6.0040 85.90 6.5921 5 311.0 15.20 386.71 17.10 18.90 + 0.22489 12.50 7.870 0 0.5240 6.3770 94.30 6.3467 5 311.0 15.20 392.52 20.45 15.00 + 0.11747 12.50 7.870 0 0.5240 6.0090 82.90 6.2267 5 311.0 15.20 396.90 13.27 18.90 + 0.09378 12.50 7.870 0 0.5240 5.8890 39.00 5.4509 5 311.0 15.20 390.50 15.71 21.70 + 0.62976 0.00 8.140 0 0.5380 5.9490 61.80 4.7075 4 307.0 21.00 396.90 8.26 20.40 + 0.63796 0.00 8.140 0 0.5380 6.0960 84.50 4.4619 4 307.0 21.00 380.02 10.26 18.20 + 0.62739 0.00 8.140 0 0.5380 5.8340 56.50 4.4986 4 307.0 21.00 395.62 8.47 19.90 + 1.05393 0.00 8.140 0 0.5380 5.9350 29.30 4.4986 4 307.0 21.00 386.85 6.58 23.10 + 0.78420 0.00 8.140 0 0.5380 5.9900 81.70 4.2579 4 307.0 21.00 386.75 14.67 17.50 + 0.80271 0.00 8.140 0 0.5380 5.4560 36.60 3.7965 4 307.0 21.00 288.99 11.69 20.20 + 0.72580 0.00 8.140 0 0.5380 5.7270 69.50 3.7965 4 307.0 21.00 390.95 11.28 18.20 + 1.25179 0.00 8.140 0 0.5380 5.5700 98.10 3.7979 4 307.0 21.00 376.57 21.02 13.60 + 0.85204 0.00 8.140 0 0.5380 5.9650 89.20 4.0123 4 307.0 21.00 392.53 13.83 19.60 + 1.23247 0.00 8.140 0 0.5380 6.1420 91.70 3.9769 4 307.0 21.00 396.90 18.72 15.20 + 0.98843 0.00 8.140 0 0.5380 5.8130 100.00 4.0952 4 307.0 21.00 394.54 19.88 14.50 + 0.75026 0.00 8.140 0 0.5380 5.9240 94.10 4.3996 4 307.0 21.00 394.33 16.30 15.60 + 0.84054 0.00 8.140 0 0.5380 5.5990 85.70 4.4546 4 307.0 21.00 303.42 16.51 13.90 + 0.67191 0.00 8.140 0 0.5380 5.8130 90.30 4.6820 4 307.0 21.00 376.88 14.81 16.60 + 0.95577 0.00 8.140 0 0.5380 6.0470 88.80 4.4534 4 307.0 21.00 306.38 17.28 14.80 + 0.77299 0.00 8.140 0 0.5380 6.4950 94.40 4.4547 4 307.0 21.00 387.94 12.80 18.40 + 1.00245 0.00 8.140 0 0.5380 6.6740 87.30 4.2390 4 307.0 21.00 380.23 11.98 21.00 + 1.13081 0.00 8.140 0 0.5380 5.7130 94.10 4.2330 4 307.0 21.00 360.17 22.60 12.70 + 1.35472 0.00 8.140 0 0.5380 6.0720 100.00 4.1750 4 307.0 21.00 376.73 13.04 14.50 + 1.38799 0.00 8.140 0 0.5380 5.9500 82.00 3.9900 4 307.0 21.00 232.60 27.71 13.20 + 1.15172 0.00 8.140 0 0.5380 5.7010 95.00 3.7872 4 307.0 21.00 358.77 18.35 13.10 + 1.61282 0.00 8.140 0 0.5380 6.0960 96.90 3.7598 4 307.0 21.00 248.31 20.34 13.50 + 0.06417 0.00 5.960 0 0.4990 5.9330 68.20 3.3603 5 279.0 19.20 396.90 9.68 18.90 + 0.09744 0.00 5.960 0 0.4990 5.8410 61.40 3.3779 5 279.0 19.20 377.56 11.41 20.00 + 0.08014 0.00 5.960 0 0.4990 5.8500 41.50 3.9342 5 279.0 19.20 396.90 8.77 21.00 + 0.17505 0.00 5.960 0 0.4990 5.9660 30.20 3.8473 5 279.0 19.20 393.43 10.13 24.70 + 0.02763 75.00 2.950 0 0.4280 6.5950 21.80 5.4011 3 252.0 18.30 395.63 4.32 30.80 + 0.03359 75.00 2.950 0 0.4280 7.0240 15.80 5.4011 3 252.0 18.30 395.62 1.98 34.90 + 0.12744 0.00 6.910 0 0.4480 6.7700 2.90 5.7209 3 233.0 17.90 385.41 4.84 26.60 + 0.14150 0.00 6.910 0 0.4480 6.1690 6.60 5.7209 3 233.0 17.90 383.37 5.81 25.30 + 0.15936 0.00 6.910 0 0.4480 6.2110 6.50 5.7209 3 233.0 17.90 394.46 7.44 24.70 + 0.12269 0.00 6.910 0 0.4480 6.0690 40.00 5.7209 3 233.0 17.90 389.39 9.55 21.20 + 0.17142 0.00 6.910 0 0.4480 5.6820 33.80 5.1004 3 233.0 17.90 396.90 10.21 19.30 + 0.18836 0.00 6.910 0 0.4480 5.7860 33.30 5.1004 3 233.0 17.90 396.90 14.15 20.00 + 0.22927 0.00 6.910 0 0.4480 6.0300 85.50 5.6894 3 233.0 17.90 392.74 18.80 16.60 + 0.25387 0.00 6.910 0 0.4480 5.3990 95.30 5.8700 3 233.0 17.90 396.90 30.81 14.40 + 0.21977 0.00 6.910 0 0.4480 5.6020 62.00 6.0877 3 233.0 17.90 396.90 16.20 19.40 + 0.08873 21.00 5.640 0 0.4390 5.9630 45.70 6.8147 4 243.0 16.80 395.56 13.45 19.70 + 0.04337 21.00 5.640 0 0.4390 6.1150 63.00 6.8147 4 243.0 16.80 393.97 9.43 20.50 + 0.05360 21.00 5.640 0 0.4390 6.5110 21.10 6.8147 4 243.0 16.80 396.90 5.28 25.00 + 0.04981 21.00 5.640 0 0.4390 5.9980 21.40 6.8147 4 243.0 16.80 396.90 8.43 23.40 + 0.01360 75.00 4.000 0 0.4100 5.8880 47.60 7.3197 3 469.0 21.10 396.90 14.80 18.90 + 0.01311 90.00 1.220 0 0.4030 7.2490 21.90 8.6966 5 226.0 17.90 395.93 4.81 35.40 + 0.02055 85.00 0.740 0 0.4100 6.3830 35.70 9.1876 2 313.0 17.30 396.90 5.77 24.70 + 0.01432 100.00 1.320 0 0.4110 6.8160 40.50 8.3248 5 256.0 15.10 392.90 3.95 31.60 + 0.15445 25.00 5.130 0 0.4530 6.1450 29.20 7.8148 8 284.0 19.70 390.68 6.86 23.30 + 0.10328 25.00 5.130 0 0.4530 5.9270 47.20 6.9320 8 284.0 19.70 396.90 9.22 19.60 + 0.14932 25.00 5.130 0 0.4530 5.7410 66.20 7.2254 8 284.0 19.70 395.11 13.15 18.70 + 0.17171 25.00 5.130 0 0.4530 5.9660 93.40 6.8185 8 284.0 19.70 378.08 14.44 16.00 + 0.11027 25.00 5.130 0 0.4530 6.4560 67.80 7.2255 8 284.0 19.70 396.90 6.73 22.20 + 0.12650 25.00 5.130 0 0.4530 6.7620 43.40 7.9809 8 284.0 19.70 395.58 9.50 25.00 + 0.01951 17.50 1.380 0 0.4161 7.1040 59.50 9.2229 3 216.0 18.60 393.24 8.05 33.00 + 0.03584 80.00 3.370 0 0.3980 6.2900 17.80 6.6115 4 337.0 16.10 396.90 4.67 23.50 + 0.04379 80.00 3.370 0 0.3980 5.7870 31.10 6.6115 4 337.0 16.10 396.90 10.24 19.40 + 0.05789 12.50 6.070 0 0.4090 5.8780 21.40 6.4980 4 345.0 18.90 396.21 8.10 22.00 + 0.13554 12.50 6.070 0 0.4090 5.5940 36.80 6.4980 4 345.0 18.90 396.90 13.09 17.40 + 0.12816 12.50 6.070 0 0.4090 5.8850 33.00 6.4980 4 345.0 18.90 396.90 8.79 20.90 + 0.08826 0.00 10.810 0 0.4130 6.4170 6.60 5.2873 4 305.0 19.20 383.73 6.72 24.20 + 0.15876 0.00 10.810 0 0.4130 5.9610 17.50 5.2873 4 305.0 19.20 376.94 9.88 21.70 + 0.09164 0.00 10.810 0 0.4130 6.0650 7.80 5.2873 4 305.0 19.20 390.91 5.52 22.80 + 0.19539 0.00 10.810 0 0.4130 6.2450 6.20 5.2873 4 305.0 19.20 377.17 7.54 23.40 + 0.07896 0.00 12.830 0 0.4370 6.2730 6.00 4.2515 5 398.0 18.70 394.92 6.78 24.10 + 0.09512 0.00 12.830 0 0.4370 6.2860 45.00 4.5026 5 398.0 18.70 383.23 8.94 21.40 + 0.10153 0.00 12.830 0 0.4370 6.2790 74.50 4.0522 5 398.0 18.70 373.66 11.97 20.00 + 0.08707 0.00 12.830 0 0.4370 6.1400 45.80 4.0905 5 398.0 18.70 386.96 10.27 20.80 + 0.05646 0.00 12.830 0 0.4370 6.2320 53.70 5.0141 5 398.0 18.70 386.40 12.34 21.20 + 0.08387 0.00 12.830 0 0.4370 5.8740 36.60 4.5026 5 398.0 18.70 396.06 9.10 20.30 + 0.04113 25.00 4.860 0 0.4260 6.7270 33.50 5.4007 4 281.0 19.00 396.90 5.29 28.00 + 0.04462 25.00 4.860 0 0.4260 6.6190 70.40 5.4007 4 281.0 19.00 395.63 7.22 23.90 + 0.03659 25.00 4.860 0 0.4260 6.3020 32.20 5.4007 4 281.0 19.00 396.90 6.72 24.80 + 0.03551 25.00 4.860 0 0.4260 6.1670 46.70 5.4007 4 281.0 19.00 390.64 7.51 22.90 + 0.05059 0.00 4.490 0 0.4490 6.3890 48.00 4.7794 3 247.0 18.50 396.90 9.62 23.90 + 0.05735 0.00 4.490 0 0.4490 6.6300 56.10 4.4377 3 247.0 18.50 392.30 6.53 26.60 + 0.05188 0.00 4.490 0 0.4490 6.0150 45.10 4.4272 3 247.0 18.50 395.99 12.86 22.50 + 0.07151 0.00 4.490 0 0.4490 6.1210 56.80 3.7476 3 247.0 18.50 395.15 8.44 22.20 + 0.05660 0.00 3.410 0 0.4890 7.0070 86.30 3.4217 2 270.0 17.80 396.90 5.50 23.60 + 0.05302 0.00 3.410 0 0.4890 7.0790 63.10 3.4145 2 270.0 17.80 396.06 5.70 28.70 + 0.04684 0.00 3.410 0 0.4890 6.4170 66.10 3.0923 2 270.0 17.80 392.18 8.81 22.60 + 0.03932 0.00 3.410 0 0.4890 6.4050 73.90 3.0921 2 270.0 17.80 393.55 8.20 22.00 + 0.04203 28.00 15.040 0 0.4640 6.4420 53.60 3.6659 4 270.0 18.20 395.01 8.16 22.90 + 0.02875 28.00 15.040 0 0.4640 6.2110 28.90 3.6659 4 270.0 18.20 396.33 6.21 25.00 + 0.04294 28.00 15.040 0 0.4640 6.2490 77.30 3.6150 4 270.0 18.20 396.90 10.59 20.60 + 0.12204 0.00 2.890 0 0.4450 6.6250 57.80 3.4952 2 276.0 18.00 357.98 6.65 28.40 + 0.11504 0.00 2.890 0 0.4450 6.1630 69.60 3.4952 2 276.0 18.00 391.83 11.34 21.40 + 0.12083 0.00 2.890 0 0.4450 8.0690 76.00 3.4952 2 276.0 18.00 396.90 4.21 38.70 + 0.08187 0.00 2.890 0 0.4450 7.8200 36.90 3.4952 2 276.0 18.00 393.53 3.57 43.80 + 0.06860 0.00 2.890 0 0.4450 7.4160 62.50 3.4952 2 276.0 18.00 396.90 6.19 33.20 + 0.14866 0.00 8.560 0 0.5200 6.7270 79.90 2.7778 5 384.0 20.90 394.76 9.42 27.50 + 0.11432 0.00 8.560 0 0.5200 6.7810 71.30 2.8561 5 384.0 20.90 395.58 7.67 26.50 + 0.22876 0.00 8.560 0 0.5200 6.4050 85.40 2.7147 5 384.0 20.90 70.80 10.63 18.60 + 0.21161 0.00 8.560 0 0.5200 6.1370 87.40 2.7147 5 384.0 20.90 394.47 13.44 19.30 + 0.13960 0.00 8.560 0 0.5200 6.1670 90.00 2.4210 5 384.0 20.90 392.69 12.33 20.10 + 0.13262 0.00 8.560 0 0.5200 5.8510 96.70 2.1069 5 384.0 20.90 394.05 16.47 19.50 + 0.17120 0.00 8.560 0 0.5200 5.8360 91.90 2.2110 5 384.0 20.90 395.67 18.66 19.50 + 0.13117 0.00 8.560 0 0.5200 6.1270 85.20 2.1224 5 384.0 20.90 387.69 14.09 20.40 + 0.12802 0.00 8.560 0 0.5200 6.4740 97.10 2.4329 5 384.0 20.90 395.24 12.27 19.80 + 0.26363 0.00 8.560 0 0.5200 6.2290 91.20 2.5451 5 384.0 20.90 391.23 15.55 19.40 + 0.10793 0.00 8.560 0 0.5200 6.1950 54.40 2.7778 5 384.0 20.90 393.49 13.00 21.70 + 0.10084 0.00 10.010 0 0.5470 6.7150 81.60 2.6775 6 432.0 17.80 395.59 10.16 22.80 + 0.12329 0.00 10.010 0 0.5470 5.9130 92.90 2.3534 6 432.0 17.80 394.95 16.21 18.80 + 0.22212 0.00 10.010 0 0.5470 6.0920 95.40 2.5480 6 432.0 17.80 396.90 17.09 18.70 + 0.14231 0.00 10.010 0 0.5470 6.2540 84.20 2.2565 6 432.0 17.80 388.74 10.45 18.50 + 0.17134 0.00 10.010 0 0.5470 5.9280 88.20 2.4631 6 432.0 17.80 344.91 15.76 18.30 + 0.13158 0.00 10.010 0 0.5470 6.1760 72.50 2.7301 6 432.0 17.80 393.30 12.04 21.20 + 0.15098 0.00 10.010 0 0.5470 6.0210 82.60 2.7474 6 432.0 17.80 394.51 10.30 19.20 + 0.13058 0.00 10.010 0 0.5470 5.8720 73.10 2.4775 6 432.0 17.80 338.63 15.37 20.40 + 0.14476 0.00 10.010 0 0.5470 5.7310 65.20 2.7592 6 432.0 17.80 391.50 13.61 19.30 + 0.06899 0.00 25.650 0 0.5810 5.8700 69.70 2.2577 2 188.0 19.10 389.15 14.37 22.00 + 0.07165 0.00 25.650 0 0.5810 6.0040 84.10 2.1974 2 188.0 19.10 377.67 14.27 20.30 + 0.09299 0.00 25.650 0 0.5810 5.9610 92.90 2.0869 2 188.0 19.10 378.09 17.93 20.50 + 0.15038 0.00 25.650 0 0.5810 5.8560 97.00 1.9444 2 188.0 19.10 370.31 25.41 17.30 + 0.09849 0.00 25.650 0 0.5810 5.8790 95.80 2.0063 2 188.0 19.10 379.38 17.58 18.80 + 0.16902 0.00 25.650 0 0.5810 5.9860 88.40 1.9929 2 188.0 19.10 385.02 14.81 21.40 + 0.38735 0.00 25.650 0 0.5810 5.6130 95.60 1.7572 2 188.0 19.10 359.29 27.26 15.70 + 0.25915 0.00 21.890 0 0.6240 5.6930 96.00 1.7883 4 437.0 21.20 392.11 17.19 16.20 + 0.32543 0.00 21.890 0 0.6240 6.4310 98.80 1.8125 4 437.0 21.20 396.90 15.39 18.00 + 0.88125 0.00 21.890 0 0.6240 5.6370 94.70 1.9799 4 437.0 21.20 396.90 18.34 14.30 + 0.34006 0.00 21.890 0 0.6240 6.4580 98.90 2.1185 4 437.0 21.20 395.04 12.60 19.20 + 1.19294 0.00 21.890 0 0.6240 6.3260 97.70 2.2710 4 437.0 21.20 396.90 12.26 19.60 + 0.59005 0.00 21.890 0 0.6240 6.3720 97.90 2.3274 4 437.0 21.20 385.76 11.12 23.00 + 0.32982 0.00 21.890 0 0.6240 5.8220 95.40 2.4699 4 437.0 21.20 388.69 15.03 18.40 + 0.97617 0.00 21.890 0 0.6240 5.7570 98.40 2.3460 4 437.0 21.20 262.76 17.31 15.60 + 0.55778 0.00 21.890 0 0.6240 6.3350 98.20 2.1107 4 437.0 21.20 394.67 16.96 18.10 + 0.32264 0.00 21.890 0 0.6240 5.9420 93.50 1.9669 4 437.0 21.20 378.25 16.90 17.40 + 0.35233 0.00 21.890 0 0.6240 6.4540 98.40 1.8498 4 437.0 21.20 394.08 14.59 17.10 + 0.24980 0.00 21.890 0 0.6240 5.8570 98.20 1.6686 4 437.0 21.20 392.04 21.32 13.30 + 0.54452 0.00 21.890 0 0.6240 6.1510 97.90 1.6687 4 437.0 21.20 396.90 18.46 17.80 + 0.29090 0.00 21.890 0 0.6240 6.1740 93.60 1.6119 4 437.0 21.20 388.08 24.16 14.00 + 1.62864 0.00 21.890 0 0.6240 5.0190 100.00 1.4394 4 437.0 21.20 396.90 34.41 14.40 + 3.32105 0.00 19.580 1 0.8710 5.4030 100.00 1.3216 5 403.0 14.70 396.90 26.82 13.40 + 4.09740 0.00 19.580 0 0.8710 5.4680 100.00 1.4118 5 403.0 14.70 396.90 26.42 15.60 + 2.77974 0.00 19.580 0 0.8710 4.9030 97.80 1.3459 5 403.0 14.70 396.90 29.29 11.80 + 2.37934 0.00 19.580 0 0.8710 6.1300 100.00 1.4191 5 403.0 14.70 172.91 27.80 13.80 + 2.15505 0.00 19.580 0 0.8710 5.6280 100.00 1.5166 5 403.0 14.70 169.27 16.65 15.60 + 2.36862 0.00 19.580 0 0.8710 4.9260 95.70 1.4608 5 403.0 14.70 391.71 29.53 14.60 + 2.33099 0.00 19.580 0 0.8710 5.1860 93.80 1.5296 5 403.0 14.70 356.99 28.32 17.80 + 2.73397 0.00 19.580 0 0.8710 5.5970 94.90 1.5257 5 403.0 14.70 351.85 21.45 15.40 + 1.65660 0.00 19.580 0 0.8710 6.1220 97.30 1.6180 5 403.0 14.70 372.80 14.10 21.50 + 1.49632 0.00 19.580 0 0.8710 5.4040 100.00 1.5916 5 403.0 14.70 341.60 13.28 19.60 + 1.12658 0.00 19.580 1 0.8710 5.0120 88.00 1.6102 5 403.0 14.70 343.28 12.12 15.30 + 2.14918 0.00 19.580 0 0.8710 5.7090 98.50 1.6232 5 403.0 14.70 261.95 15.79 19.40 + 1.41385 0.00 19.580 1 0.8710 6.1290 96.00 1.7494 5 403.0 14.70 321.02 15.12 17.00 + 3.53501 0.00 19.580 1 0.8710 6.1520 82.60 1.7455 5 403.0 14.70 88.01 15.02 15.60 + 2.44668 0.00 19.580 0 0.8710 5.2720 94.00 1.7364 5 403.0 14.70 88.63 16.14 13.10 + 1.22358 0.00 19.580 0 0.6050 6.9430 97.40 1.8773 5 403.0 14.70 363.43 4.59 41.30 + 1.34284 0.00 19.580 0 0.6050 6.0660 100.00 1.7573 5 403.0 14.70 353.89 6.43 24.30 + 1.42502 0.00 19.580 0 0.8710 6.5100 100.00 1.7659 5 403.0 14.70 364.31 7.39 23.30 + 1.27346 0.00 19.580 1 0.6050 6.2500 92.60 1.7984 5 403.0 14.70 338.92 5.50 27.00 + 1.46336 0.00 19.580 0 0.6050 7.4890 90.80 1.9709 5 403.0 14.70 374.43 1.73 50.00 + 1.83377 0.00 19.580 1 0.6050 7.8020 98.20 2.0407 5 403.0 14.70 389.61 1.92 50.00 + 1.51902 0.00 19.580 1 0.6050 8.3750 93.90 2.1620 5 403.0 14.70 388.45 3.32 50.00 + 2.24236 0.00 19.580 0 0.6050 5.8540 91.80 2.4220 5 403.0 14.70 395.11 11.64 22.70 + 2.92400 0.00 19.580 0 0.6050 6.1010 93.00 2.2834 5 403.0 14.70 240.16 9.81 25.00 + 2.01019 0.00 19.580 0 0.6050 7.9290 96.20 2.0459 5 403.0 14.70 369.30 3.70 50.00 + 1.80028 0.00 19.580 0 0.6050 5.8770 79.20 2.4259 5 403.0 14.70 227.61 12.14 23.80 + 2.30040 0.00 19.580 0 0.6050 6.3190 96.10 2.1000 5 403.0 14.70 297.09 11.10 23.80 + 2.44953 0.00 19.580 0 0.6050 6.4020 95.20 2.2625 5 403.0 14.70 330.04 11.32 22.30 + 1.20742 0.00 19.580 0 0.6050 5.8750 94.60 2.4259 5 403.0 14.70 292.29 14.43 17.40 + 2.31390 0.00 19.580 0 0.6050 5.8800 97.30 2.3887 5 403.0 14.70 348.13 12.03 19.10 + 0.13914 0.00 4.050 0 0.5100 5.5720 88.50 2.5961 5 296.0 16.60 396.90 14.69 23.10 + 0.09178 0.00 4.050 0 0.5100 6.4160 84.10 2.6463 5 296.0 16.60 395.50 9.04 23.60 + 0.08447 0.00 4.050 0 0.5100 5.8590 68.70 2.7019 5 296.0 16.60 393.23 9.64 22.60 + 0.06664 0.00 4.050 0 0.5100 6.5460 33.10 3.1323 5 296.0 16.60 390.96 5.33 29.40 + 0.07022 0.00 4.050 0 0.5100 6.0200 47.20 3.5549 5 296.0 16.60 393.23 10.11 23.20 + 0.05425 0.00 4.050 0 0.5100 6.3150 73.40 3.3175 5 296.0 16.60 395.60 6.29 24.60 + 0.06642 0.00 4.050 0 0.5100 6.8600 74.40 2.9153 5 296.0 16.60 391.27 6.92 29.90 + 0.05780 0.00 2.460 0 0.4880 6.9800 58.40 2.8290 3 193.0 17.80 396.90 5.04 37.20 + 0.06588 0.00 2.460 0 0.4880 7.7650 83.30 2.7410 3 193.0 17.80 395.56 7.56 39.80 + 0.06888 0.00 2.460 0 0.4880 6.1440 62.20 2.5979 3 193.0 17.80 396.90 9.45 36.20 + 0.09103 0.00 2.460 0 0.4880 7.1550 92.20 2.7006 3 193.0 17.80 394.12 4.82 37.90 + 0.10008 0.00 2.460 0 0.4880 6.5630 95.60 2.8470 3 193.0 17.80 396.90 5.68 32.50 + 0.08308 0.00 2.460 0 0.4880 5.6040 89.80 2.9879 3 193.0 17.80 391.00 13.98 26.40 + 0.06047 0.00 2.460 0 0.4880 6.1530 68.80 3.2797 3 193.0 17.80 387.11 13.15 29.60 + 0.05602 0.00 2.460 0 0.4880 7.8310 53.60 3.1992 3 193.0 17.80 392.63 4.45 50.00 + 0.07875 45.00 3.440 0 0.4370 6.7820 41.10 3.7886 5 398.0 15.20 393.87 6.68 32.00 + 0.12579 45.00 3.440 0 0.4370 6.5560 29.10 4.5667 5 398.0 15.20 382.84 4.56 29.80 + 0.08370 45.00 3.440 0 0.4370 7.1850 38.90 4.5667 5 398.0 15.20 396.90 5.39 34.90 + 0.09068 45.00 3.440 0 0.4370 6.9510 21.50 6.4798 5 398.0 15.20 377.68 5.10 37.00 + 0.06911 45.00 3.440 0 0.4370 6.7390 30.80 6.4798 5 398.0 15.20 389.71 4.69 30.50 + 0.08664 45.00 3.440 0 0.4370 7.1780 26.30 6.4798 5 398.0 15.20 390.49 2.87 36.40 + 0.02187 60.00 2.930 0 0.4010 6.8000 9.90 6.2196 1 265.0 15.60 393.37 5.03 31.10 + 0.01439 60.00 2.930 0 0.4010 6.6040 18.80 6.2196 1 265.0 15.60 376.70 4.38 29.10 + 0.01381 80.00 0.460 0 0.4220 7.8750 32.00 5.6484 4 255.0 14.40 394.23 2.97 50.00 + 0.04011 80.00 1.520 0 0.4040 7.2870 34.10 7.3090 2 329.0 12.60 396.90 4.08 33.30 + 0.04666 80.00 1.520 0 0.4040 7.1070 36.60 7.3090 2 329.0 12.60 354.31 8.61 30.30 + 0.03768 80.00 1.520 0 0.4040 7.2740 38.30 7.3090 2 329.0 12.60 392.20 6.62 34.60 + 0.03150 95.00 1.470 0 0.4030 6.9750 15.30 7.6534 3 402.0 17.00 396.90 4.56 34.90 + 0.01778 95.00 1.470 0 0.4030 7.1350 13.90 7.6534 3 402.0 17.00 384.30 4.45 32.90 + 0.03445 82.50 2.030 0 0.4150 6.1620 38.40 6.2700 2 348.0 14.70 393.77 7.43 24.10 + 0.02177 82.50 2.030 0 0.4150 7.6100 15.70 6.2700 2 348.0 14.70 395.38 3.11 42.30 + 0.03510 95.00 2.680 0 0.4161 7.8530 33.20 5.1180 4 224.0 14.70 392.78 3.81 48.50 + 0.02009 95.00 2.680 0 0.4161 8.0340 31.90 5.1180 4 224.0 14.70 390.55 2.88 50.00 + 0.13642 0.00 10.590 0 0.4890 5.8910 22.30 3.9454 4 277.0 18.60 396.90 10.87 22.60 + 0.22969 0.00 10.590 0 0.4890 6.3260 52.50 4.3549 4 277.0 18.60 394.87 10.97 24.40 + 0.25199 0.00 10.590 0 0.4890 5.7830 72.70 4.3549 4 277.0 18.60 389.43 18.06 22.50 + 0.13587 0.00 10.590 1 0.4890 6.0640 59.10 4.2392 4 277.0 18.60 381.32 14.66 24.40 + 0.43571 0.00 10.590 1 0.4890 5.3440 100.00 3.8750 4 277.0 18.60 396.90 23.09 20.00 + 0.17446 0.00 10.590 1 0.4890 5.9600 92.10 3.8771 4 277.0 18.60 393.25 17.27 21.70 + 0.37578 0.00 10.590 1 0.4890 5.4040 88.60 3.6650 4 277.0 18.60 395.24 23.98 19.30 + 0.21719 0.00 10.590 1 0.4890 5.8070 53.80 3.6526 4 277.0 18.60 390.94 16.03 22.40 + 0.14052 0.00 10.590 0 0.4890 6.3750 32.30 3.9454 4 277.0 18.60 385.81 9.38 28.10 + 0.28955 0.00 10.590 0 0.4890 5.4120 9.80 3.5875 4 277.0 18.60 348.93 29.55 23.70 + 0.19802 0.00 10.590 0 0.4890 6.1820 42.40 3.9454 4 277.0 18.60 393.63 9.47 25.00 + 0.04560 0.00 13.890 1 0.5500 5.8880 56.00 3.1121 5 276.0 16.40 392.80 13.51 23.30 + 0.07013 0.00 13.890 0 0.5500 6.6420 85.10 3.4211 5 276.0 16.40 392.78 9.69 28.70 + 0.11069 0.00 13.890 1 0.5500 5.9510 93.80 2.8893 5 276.0 16.40 396.90 17.92 21.50 + 0.11425 0.00 13.890 1 0.5500 6.3730 92.40 3.3633 5 276.0 16.40 393.74 10.50 23.00 + 0.35809 0.00 6.200 1 0.5070 6.9510 88.50 2.8617 8 307.0 17.40 391.70 9.71 26.70 + 0.40771 0.00 6.200 1 0.5070 6.1640 91.30 3.0480 8 307.0 17.40 395.24 21.46 21.70 + 0.62356 0.00 6.200 1 0.5070 6.8790 77.70 3.2721 8 307.0 17.40 390.39 9.93 27.50 + 0.61470 0.00 6.200 0 0.5070 6.6180 80.80 3.2721 8 307.0 17.40 396.90 7.60 30.10 + 0.31533 0.00 6.200 0 0.5040 8.2660 78.30 2.8944 8 307.0 17.40 385.05 4.14 44.80 + 0.52693 0.00 6.200 0 0.5040 8.7250 83.00 2.8944 8 307.0 17.40 382.00 4.63 50.00 + 0.38214 0.00 6.200 0 0.5040 8.0400 86.50 3.2157 8 307.0 17.40 387.38 3.13 37.60 + 0.41238 0.00 6.200 0 0.5040 7.1630 79.90 3.2157 8 307.0 17.40 372.08 6.36 31.60 + 0.29819 0.00 6.200 0 0.5040 7.6860 17.00 3.3751 8 307.0 17.40 377.51 3.92 46.70 + 0.44178 0.00 6.200 0 0.5040 6.5520 21.40 3.3751 8 307.0 17.40 380.34 3.76 31.50 + 0.53700 0.00 6.200 0 0.5040 5.9810 68.10 3.6715 8 307.0 17.40 378.35 11.65 24.30 + 0.46296 0.00 6.200 0 0.5040 7.4120 76.90 3.6715 8 307.0 17.40 376.14 5.25 31.70 + 0.57529 0.00 6.200 0 0.5070 8.3370 73.30 3.8384 8 307.0 17.40 385.91 2.47 41.70 + 0.33147 0.00 6.200 0 0.5070 8.2470 70.40 3.6519 8 307.0 17.40 378.95 3.95 48.30 + 0.44791 0.00 6.200 1 0.5070 6.7260 66.50 3.6519 8 307.0 17.40 360.20 8.05 29.00 + 0.33045 0.00 6.200 0 0.5070 6.0860 61.50 3.6519 8 307.0 17.40 376.75 10.88 24.00 + 0.52058 0.00 6.200 1 0.5070 6.6310 76.50 4.1480 8 307.0 17.40 388.45 9.54 25.10 + 0.51183 0.00 6.200 0 0.5070 7.3580 71.60 4.1480 8 307.0 17.40 390.07 4.73 31.50 + 0.08244 30.00 4.930 0 0.4280 6.4810 18.50 6.1899 6 300.0 16.60 379.41 6.36 23.70 + 0.09252 30.00 4.930 0 0.4280 6.6060 42.20 6.1899 6 300.0 16.60 383.78 7.37 23.30 + 0.11329 30.00 4.930 0 0.4280 6.8970 54.30 6.3361 6 300.0 16.60 391.25 11.38 22.00 + 0.10612 30.00 4.930 0 0.4280 6.0950 65.10 6.3361 6 300.0 16.60 394.62 12.40 20.10 + 0.10290 30.00 4.930 0 0.4280 6.3580 52.90 7.0355 6 300.0 16.60 372.75 11.22 22.20 + 0.12757 30.00 4.930 0 0.4280 6.3930 7.80 7.0355 6 300.0 16.60 374.71 5.19 23.70 + 0.20608 22.00 5.860 0 0.4310 5.5930 76.50 7.9549 7 330.0 19.10 372.49 12.50 17.60 + 0.19133 22.00 5.860 0 0.4310 5.6050 70.20 7.9549 7 330.0 19.10 389.13 18.46 18.50 + 0.33983 22.00 5.860 0 0.4310 6.1080 34.90 8.0555 7 330.0 19.10 390.18 9.16 24.30 + 0.19657 22.00 5.860 0 0.4310 6.2260 79.20 8.0555 7 330.0 19.10 376.14 10.15 20.50 + 0.16439 22.00 5.860 0 0.4310 6.4330 49.10 7.8265 7 330.0 19.10 374.71 9.52 24.50 + 0.19073 22.00 5.860 0 0.4310 6.7180 17.50 7.8265 7 330.0 19.10 393.74 6.56 26.20 + 0.14030 22.00 5.860 0 0.4310 6.4870 13.00 7.3967 7 330.0 19.10 396.28 5.90 24.40 + 0.21409 22.00 5.860 0 0.4310 6.4380 8.90 7.3967 7 330.0 19.10 377.07 3.59 24.80 + 0.08221 22.00 5.860 0 0.4310 6.9570 6.80 8.9067 7 330.0 19.10 386.09 3.53 29.60 + 0.36894 22.00 5.860 0 0.4310 8.2590 8.40 8.9067 7 330.0 19.10 396.90 3.54 42.80 + 0.04819 80.00 3.640 0 0.3920 6.1080 32.00 9.2203 1 315.0 16.40 392.89 6.57 21.90 + 0.03548 80.00 3.640 0 0.3920 5.8760 19.10 9.2203 1 315.0 16.40 395.18 9.25 20.90 + 0.01538 90.00 3.750 0 0.3940 7.4540 34.20 6.3361 3 244.0 15.90 386.34 3.11 44.00 + 0.61154 20.00 3.970 0 0.6470 8.7040 86.90 1.8010 5 264.0 13.00 389.70 5.12 50.00 + 0.66351 20.00 3.970 0 0.6470 7.3330 100.00 1.8946 5 264.0 13.00 383.29 7.79 36.00 + 0.65665 20.00 3.970 0 0.6470 6.8420 100.00 2.0107 5 264.0 13.00 391.93 6.90 30.10 + 0.54011 20.00 3.970 0 0.6470 7.2030 81.80 2.1121 5 264.0 13.00 392.80 9.59 33.80 + 0.53412 20.00 3.970 0 0.6470 7.5200 89.40 2.1398 5 264.0 13.00 388.37 7.26 43.10 + 0.52014 20.00 3.970 0 0.6470 8.3980 91.50 2.2885 5 264.0 13.00 386.86 5.91 48.80 + 0.82526 20.00 3.970 0 0.6470 7.3270 94.50 2.0788 5 264.0 13.00 393.42 11.25 31.00 + 0.55007 20.00 3.970 0 0.6470 7.2060 91.60 1.9301 5 264.0 13.00 387.89 8.10 36.50 + 0.76162 20.00 3.970 0 0.6470 5.5600 62.80 1.9865 5 264.0 13.00 392.40 10.45 22.80 + 0.78570 20.00 3.970 0 0.6470 7.0140 84.60 2.1329 5 264.0 13.00 384.07 14.79 30.70 + 0.57834 20.00 3.970 0 0.5750 8.2970 67.00 2.4216 5 264.0 13.00 384.54 7.44 50.00 + 0.54050 20.00 3.970 0 0.5750 7.4700 52.60 2.8720 5 264.0 13.00 390.30 3.16 43.50 + 0.09065 20.00 6.960 1 0.4640 5.9200 61.50 3.9175 3 223.0 18.60 391.34 13.65 20.70 + 0.29916 20.00 6.960 0 0.4640 5.8560 42.10 4.4290 3 223.0 18.60 388.65 13.00 21.10 + 0.16211 20.00 6.960 0 0.4640 6.2400 16.30 4.4290 3 223.0 18.60 396.90 6.59 25.20 + 0.11460 20.00 6.960 0 0.4640 6.5380 58.70 3.9175 3 223.0 18.60 394.96 7.73 24.40 + 0.22188 20.00 6.960 1 0.4640 7.6910 51.80 4.3665 3 223.0 18.60 390.77 6.58 35.20 + 0.05644 40.00 6.410 1 0.4470 6.7580 32.90 4.0776 4 254.0 17.60 396.90 3.53 32.40 + 0.09604 40.00 6.410 0 0.4470 6.8540 42.80 4.2673 4 254.0 17.60 396.90 2.98 32.00 + 0.10469 40.00 6.410 1 0.4470 7.2670 49.00 4.7872 4 254.0 17.60 389.25 6.05 33.20 + 0.06127 40.00 6.410 1 0.4470 6.8260 27.60 4.8628 4 254.0 17.60 393.45 4.16 33.10 + 0.07978 40.00 6.410 0 0.4470 6.4820 32.10 4.1403 4 254.0 17.60 396.90 7.19 29.10 + 0.21038 20.00 3.330 0 0.4429 6.8120 32.20 4.1007 5 216.0 14.90 396.90 4.85 35.10 + 0.03578 20.00 3.330 0 0.4429 7.8200 64.50 4.6947 5 216.0 14.90 387.31 3.76 45.40 + 0.03705 20.00 3.330 0 0.4429 6.9680 37.20 5.2447 5 216.0 14.90 392.23 4.59 35.40 + 0.06129 20.00 3.330 1 0.4429 7.6450 49.70 5.2119 5 216.0 14.90 377.07 3.01 46.00 + 0.01501 90.00 1.210 1 0.4010 7.9230 24.80 5.8850 1 198.0 13.60 395.52 3.16 50.00 + 0.00906 90.00 2.970 0 0.4000 7.0880 20.80 7.3073 1 285.0 15.30 394.72 7.85 32.20 + 0.01096 55.00 2.250 0 0.3890 6.4530 31.90 7.3073 1 300.0 15.30 394.72 8.23 22.00 + 0.01965 80.00 1.760 0 0.3850 6.2300 31.50 9.0892 1 241.0 18.20 341.60 12.93 20.10 + 0.03871 52.50 5.320 0 0.4050 6.2090 31.30 7.3172 6 293.0 16.60 396.90 7.14 23.20 + 0.04590 52.50 5.320 0 0.4050 6.3150 45.60 7.3172 6 293.0 16.60 396.90 7.60 22.30 + 0.04297 52.50 5.320 0 0.4050 6.5650 22.90 7.3172 6 293.0 16.60 371.72 9.51 24.80 + 0.03502 80.00 4.950 0 0.4110 6.8610 27.90 5.1167 4 245.0 19.20 396.90 3.33 28.50 + 0.07886 80.00 4.950 0 0.4110 7.1480 27.70 5.1167 4 245.0 19.20 396.90 3.56 37.30 + 0.03615 80.00 4.950 0 0.4110 6.6300 23.40 5.1167 4 245.0 19.20 396.90 4.70 27.90 + 0.08265 0.00 13.920 0 0.4370 6.1270 18.40 5.5027 4 289.0 16.00 396.90 8.58 23.90 + 0.08199 0.00 13.920 0 0.4370 6.0090 42.30 5.5027 4 289.0 16.00 396.90 10.40 21.70 + 0.12932 0.00 13.920 0 0.4370 6.6780 31.10 5.9604 4 289.0 16.00 396.90 6.27 28.60 + 0.05372 0.00 13.920 0 0.4370 6.5490 51.00 5.9604 4 289.0 16.00 392.85 7.39 27.10 + 0.14103 0.00 13.920 0 0.4370 5.7900 58.00 6.3200 4 289.0 16.00 396.90 15.84 20.30 + 0.06466 70.00 2.240 0 0.4000 6.3450 20.10 7.8278 5 358.0 14.80 368.24 4.97 22.50 + 0.05561 70.00 2.240 0 0.4000 7.0410 10.00 7.8278 5 358.0 14.80 371.58 4.74 29.00 + 0.04417 70.00 2.240 0 0.4000 6.8710 47.40 7.8278 5 358.0 14.80 390.86 6.07 24.80 + 0.03537 34.00 6.090 0 0.4330 6.5900 40.40 5.4917 7 329.0 16.10 395.75 9.50 22.00 + 0.09266 34.00 6.090 0 0.4330 6.4950 18.40 5.4917 7 329.0 16.10 383.61 8.67 26.40 + 0.10000 34.00 6.090 0 0.4330 6.9820 17.70 5.4917 7 329.0 16.10 390.43 4.86 33.10 + 0.05515 33.00 2.180 0 0.4720 7.2360 41.10 4.0220 7 222.0 18.40 393.68 6.93 36.10 + 0.05479 33.00 2.180 0 0.4720 6.6160 58.10 3.3700 7 222.0 18.40 393.36 8.93 28.40 + 0.07503 33.00 2.180 0 0.4720 7.4200 71.90 3.0992 7 222.0 18.40 396.90 6.47 33.40 + 0.04932 33.00 2.180 0 0.4720 6.8490 70.30 3.1827 7 222.0 18.40 396.90 7.53 28.20 + 0.49298 0.00 9.900 0 0.5440 6.6350 82.50 3.3175 4 304.0 18.40 396.90 4.54 22.80 + 0.34940 0.00 9.900 0 0.5440 5.9720 76.70 3.1025 4 304.0 18.40 396.24 9.97 20.30 + 2.63548 0.00 9.900 0 0.5440 4.9730 37.80 2.5194 4 304.0 18.40 350.45 12.64 16.10 + 0.79041 0.00 9.900 0 0.5440 6.1220 52.80 2.6403 4 304.0 18.40 396.90 5.98 22.10 + 0.26169 0.00 9.900 0 0.5440 6.0230 90.40 2.8340 4 304.0 18.40 396.30 11.72 19.40 + 0.26938 0.00 9.900 0 0.5440 6.2660 82.80 3.2628 4 304.0 18.40 393.39 7.90 21.60 + 0.36920 0.00 9.900 0 0.5440 6.5670 87.30 3.6023 4 304.0 18.40 395.69 9.28 23.80 + 0.25356 0.00 9.900 0 0.5440 5.7050 77.70 3.9450 4 304.0 18.40 396.42 11.50 16.20 + 0.31827 0.00 9.900 0 0.5440 5.9140 83.20 3.9986 4 304.0 18.40 390.70 18.33 17.80 + 0.24522 0.00 9.900 0 0.5440 5.7820 71.70 4.0317 4 304.0 18.40 396.90 15.94 19.80 + 0.40202 0.00 9.900 0 0.5440 6.3820 67.20 3.5325 4 304.0 18.40 395.21 10.36 23.10 + 0.47547 0.00 9.900 0 0.5440 6.1130 58.80 4.0019 4 304.0 18.40 396.23 12.73 21.00 + 0.16760 0.00 7.380 0 0.4930 6.4260 52.30 4.5404 5 287.0 19.60 396.90 7.20 23.80 + 0.18159 0.00 7.380 0 0.4930 6.3760 54.30 4.5404 5 287.0 19.60 396.90 6.87 23.10 + 0.35114 0.00 7.380 0 0.4930 6.0410 49.90 4.7211 5 287.0 19.60 396.90 7.70 20.40 + 0.28392 0.00 7.380 0 0.4930 5.7080 74.30 4.7211 5 287.0 19.60 391.13 11.74 18.50 + 0.34109 0.00 7.380 0 0.4930 6.4150 40.10 4.7211 5 287.0 19.60 396.90 6.12 25.00 + 0.19186 0.00 7.380 0 0.4930 6.4310 14.70 5.4159 5 287.0 19.60 393.68 5.08 24.60 + 0.30347 0.00 7.380 0 0.4930 6.3120 28.90 5.4159 5 287.0 19.60 396.90 6.15 23.00 + 0.24103 0.00 7.380 0 0.4930 6.0830 43.70 5.4159 5 287.0 19.60 396.90 12.79 22.20 + 0.06617 0.00 3.240 0 0.4600 5.8680 25.80 5.2146 4 430.0 16.90 382.44 9.97 19.30 + 0.06724 0.00 3.240 0 0.4600 6.3330 17.20 5.2146 4 430.0 16.90 375.21 7.34 22.60 + 0.04544 0.00 3.240 0 0.4600 6.1440 32.20 5.8736 4 430.0 16.90 368.57 9.09 19.80 + 0.05023 35.00 6.060 0 0.4379 5.7060 28.40 6.6407 1 304.0 16.90 394.02 12.43 17.10 + 0.03466 35.00 6.060 0 0.4379 6.0310 23.30 6.6407 1 304.0 16.90 362.25 7.83 19.40 + 0.05083 0.00 5.190 0 0.5150 6.3160 38.10 6.4584 5 224.0 20.20 389.71 5.68 22.20 + 0.03738 0.00 5.190 0 0.5150 6.3100 38.50 6.4584 5 224.0 20.20 389.40 6.75 20.70 + 0.03961 0.00 5.190 0 0.5150 6.0370 34.50 5.9853 5 224.0 20.20 396.90 8.01 21.10 + 0.03427 0.00 5.190 0 0.5150 5.8690 46.30 5.2311 5 224.0 20.20 396.90 9.80 19.50 + 0.03041 0.00 5.190 0 0.5150 5.8950 59.60 5.6150 5 224.0 20.20 394.81 10.56 18.50 + 0.03306 0.00 5.190 0 0.5150 6.0590 37.30 4.8122 5 224.0 20.20 396.14 8.51 20.60 + 0.05497 0.00 5.190 0 0.5150 5.9850 45.40 4.8122 5 224.0 20.20 396.90 9.74 19.00 + 0.06151 0.00 5.190 0 0.5150 5.9680 58.50 4.8122 5 224.0 20.20 396.90 9.29 18.70 + 0.01301 35.00 1.520 0 0.4420 7.2410 49.30 7.0379 1 284.0 15.50 394.74 5.49 32.70 + 0.02498 0.00 1.890 0 0.5180 6.5400 59.70 6.2669 1 422.0 15.90 389.96 8.65 16.50 + 0.02543 55.00 3.780 0 0.4840 6.6960 56.40 5.7321 5 370.0 17.60 396.90 7.18 23.90 + 0.03049 55.00 3.780 0 0.4840 6.8740 28.10 6.4654 5 370.0 17.60 387.97 4.61 31.20 + 0.03113 0.00 4.390 0 0.4420 6.0140 48.50 8.0136 3 352.0 18.80 385.64 10.53 17.50 + 0.06162 0.00 4.390 0 0.4420 5.8980 52.30 8.0136 3 352.0 18.80 364.61 12.67 17.20 + 0.01870 85.00 4.150 0 0.4290 6.5160 27.70 8.5353 4 351.0 17.90 392.43 6.36 23.10 + 0.01501 80.00 2.010 0 0.4350 6.6350 29.70 8.3440 4 280.0 17.00 390.94 5.99 24.50 + 0.02899 40.00 1.250 0 0.4290 6.9390 34.50 8.7921 1 335.0 19.70 389.85 5.89 26.60 + 0.06211 40.00 1.250 0 0.4290 6.4900 44.40 8.7921 1 335.0 19.70 396.90 5.98 22.90 + 0.07950 60.00 1.690 0 0.4110 6.5790 35.90 10.7103 4 411.0 18.30 370.78 5.49 24.10 + 0.07244 60.00 1.690 0 0.4110 5.8840 18.50 10.7103 4 411.0 18.30 392.33 7.79 18.60 + 0.01709 90.00 2.020 0 0.4100 6.7280 36.10 12.1265 5 187.0 17.00 384.46 4.50 30.10 + 0.04301 80.00 1.910 0 0.4130 5.6630 21.90 10.5857 4 334.0 22.00 382.80 8.05 18.20 + 0.10659 80.00 1.910 0 0.4130 5.9360 19.50 10.5857 4 334.0 22.00 376.04 5.57 20.60 + 8.98296 0.00 18.100 1 0.7700 6.2120 97.40 2.1222 24 666.0 20.20 377.73 17.60 17.80 + 3.84970 0.00 18.100 1 0.7700 6.3950 91.00 2.5052 24 666.0 20.20 391.34 13.27 21.70 + 5.20177 0.00 18.100 1 0.7700 6.1270 83.40 2.7227 24 666.0 20.20 395.43 11.48 22.70 + 4.26131 0.00 18.100 0 0.7700 6.1120 81.30 2.5091 24 666.0 20.20 390.74 12.67 22.60 + 4.54192 0.00 18.100 0 0.7700 6.3980 88.00 2.5182 24 666.0 20.20 374.56 7.79 25.00 + 3.83684 0.00 18.100 0 0.7700 6.2510 91.10 2.2955 24 666.0 20.20 350.65 14.19 19.90 + 3.67822 0.00 18.100 0 0.7700 5.3620 96.20 2.1036 24 666.0 20.20 380.79 10.19 20.80 + 4.22239 0.00 18.100 1 0.7700 5.8030 89.00 1.9047 24 666.0 20.20 353.04 14.64 16.80 + 3.47428 0.00 18.100 1 0.7180 8.7800 82.90 1.9047 24 666.0 20.20 354.55 5.29 21.90 + 4.55587 0.00 18.100 0 0.7180 3.5610 87.90 1.6132 24 666.0 20.20 354.70 7.12 27.50 + 3.69695 0.00 18.100 0 0.7180 4.9630 91.40 1.7523 24 666.0 20.20 316.03 14.00 21.90 +13.52220 0.00 18.100 0 0.6310 3.8630 100.00 1.5106 24 666.0 20.20 131.42 13.33 23.10 + 4.89822 0.00 18.100 0 0.6310 4.9700 100.00 1.3325 24 666.0 20.20 375.52 3.26 50.00 + 5.66998 0.00 18.100 1 0.6310 6.6830 96.80 1.3567 24 666.0 20.20 375.33 3.73 50.00 + 6.53876 0.00 18.100 1 0.6310 7.0160 97.50 1.2024 24 666.0 20.20 392.05 2.96 50.00 + 9.23230 0.00 18.100 0 0.6310 6.2160 100.00 1.1691 24 666.0 20.20 366.15 9.53 50.00 + 8.26725 0.00 18.100 1 0.6680 5.8750 89.60 1.1296 24 666.0 20.20 347.88 8.88 50.00 +11.10810 0.00 18.100 0 0.6680 4.9060 100.00 1.1742 24 666.0 20.20 396.90 34.77 13.80 +18.49820 0.00 18.100 0 0.6680 4.1380 100.00 1.1370 24 666.0 20.20 396.90 37.97 13.80 +19.60910 0.00 18.100 0 0.6710 7.3130 97.90 1.3163 24 666.0 20.20 396.90 13.44 15.00 +15.28800 0.00 18.100 0 0.6710 6.6490 93.30 1.3449 24 666.0 20.20 363.02 23.24 13.90 + 9.82349 0.00 18.100 0 0.6710 6.7940 98.80 1.3580 24 666.0 20.20 396.90 21.24 13.30 +23.64820 0.00 18.100 0 0.6710 6.3800 96.20 1.3861 24 666.0 20.20 396.90 23.69 13.10 +17.86670 0.00 18.100 0 0.6710 6.2230 100.00 1.3861 24 666.0 20.20 393.74 21.78 10.20 +88.97620 0.00 18.100 0 0.6710 6.9680 91.90 1.4165 24 666.0 20.20 396.90 17.21 10.40 +15.87440 0.00 18.100 0 0.6710 6.5450 99.10 1.5192 24 666.0 20.20 396.90 21.08 10.90 + 9.18702 0.00 18.100 0 0.7000 5.5360 100.00 1.5804 24 666.0 20.20 396.90 23.60 11.30 + 7.99248 0.00 18.100 0 0.7000 5.5200 100.00 1.5331 24 666.0 20.20 396.90 24.56 12.30 +20.08490 0.00 18.100 0 0.7000 4.3680 91.20 1.4395 24 666.0 20.20 285.83 30.63 8.80 +16.81180 0.00 18.100 0 0.7000 5.2770 98.10 1.4261 24 666.0 20.20 396.90 30.81 7.20 +24.39380 0.00 18.100 0 0.7000 4.6520 100.00 1.4672 24 666.0 20.20 396.90 28.28 10.50 +22.59710 0.00 18.100 0 0.7000 5.0000 89.50 1.5184 24 666.0 20.20 396.90 31.99 7.40 +14.33370 0.00 18.100 0 0.7000 4.8800 100.00 1.5895 24 666.0 20.20 372.92 30.62 10.20 + 8.15174 0.00 18.100 0 0.7000 5.3900 98.90 1.7281 24 666.0 20.20 396.90 20.85 11.50 + 6.96215 0.00 18.100 0 0.7000 5.7130 97.00 1.9265 24 666.0 20.20 394.43 17.11 15.10 + 5.29305 0.00 18.100 0 0.7000 6.0510 82.50 2.1678 24 666.0 20.20 378.38 18.76 23.20 +11.57790 0.00 18.100 0 0.7000 5.0360 97.00 1.7700 24 666.0 20.20 396.90 25.68 9.70 + 8.64476 0.00 18.100 0 0.6930 6.1930 92.60 1.7912 24 666.0 20.20 396.90 15.17 13.80 +13.35980 0.00 18.100 0 0.6930 5.8870 94.70 1.7821 24 666.0 20.20 396.90 16.35 12.70 + 8.71675 0.00 18.100 0 0.6930 6.4710 98.80 1.7257 24 666.0 20.20 391.98 17.12 13.10 + 5.87205 0.00 18.100 0 0.6930 6.4050 96.00 1.6768 24 666.0 20.20 396.90 19.37 12.50 + 7.67202 0.00 18.100 0 0.6930 5.7470 98.90 1.6334 24 666.0 20.20 393.10 19.92 8.50 +38.35180 0.00 18.100 0 0.6930 5.4530 100.00 1.4896 24 666.0 20.20 396.90 30.59 5.00 + 9.91655 0.00 18.100 0 0.6930 5.8520 77.80 1.5004 24 666.0 20.20 338.16 29.97 6.30 +25.04610 0.00 18.100 0 0.6930 5.9870 100.00 1.5888 24 666.0 20.20 396.90 26.77 5.60 +14.23620 0.00 18.100 0 0.6930 6.3430 100.00 1.5741 24 666.0 20.20 396.90 20.32 7.20 + 9.59571 0.00 18.100 0 0.6930 6.4040 100.00 1.6390 24 666.0 20.20 376.11 20.31 12.10 +24.80170 0.00 18.100 0 0.6930 5.3490 96.00 1.7028 24 666.0 20.20 396.90 19.77 8.30 +41.52920 0.00 18.100 0 0.6930 5.5310 85.40 1.6074 24 666.0 20.20 329.46 27.38 8.50 +67.92080 0.00 18.100 0 0.6930 5.6830 100.00 1.4254 24 666.0 20.20 384.97 22.98 5.00 +20.71620 0.00 18.100 0 0.6590 4.1380 100.00 1.1781 24 666.0 20.20 370.22 23.34 11.90 +11.95110 0.00 18.100 0 0.6590 5.6080 100.00 1.2852 24 666.0 20.20 332.09 12.13 27.90 + 7.40389 0.00 18.100 0 0.5970 5.6170 97.90 1.4547 24 666.0 20.20 314.64 26.40 17.20 +14.43830 0.00 18.100 0 0.5970 6.8520 100.00 1.4655 24 666.0 20.20 179.36 19.78 27.50 +51.13580 0.00 18.100 0 0.5970 5.7570 100.00 1.4130 24 666.0 20.20 2.60 10.11 15.00 +14.05070 0.00 18.100 0 0.5970 6.6570 100.00 1.5275 24 666.0 20.20 35.05 21.22 17.20 +18.81100 0.00 18.100 0 0.5970 4.6280 100.00 1.5539 24 666.0 20.20 28.79 34.37 17.90 +28.65580 0.00 18.100 0 0.5970 5.1550 100.00 1.5894 24 666.0 20.20 210.97 20.08 16.30 +45.74610 0.00 18.100 0 0.6930 4.5190 100.00 1.6582 24 666.0 20.20 88.27 36.98 7.00 +18.08460 0.00 18.100 0 0.6790 6.4340 100.00 1.8347 24 666.0 20.20 27.25 29.05 7.20 +10.83420 0.00 18.100 0 0.6790 6.7820 90.80 1.8195 24 666.0 20.20 21.57 25.79 7.50 +25.94060 0.00 18.100 0 0.6790 5.3040 89.10 1.6475 24 666.0 20.20 127.36 26.64 10.40 +73.53410 0.00 18.100 0 0.6790 5.9570 100.00 1.8026 24 666.0 20.20 16.45 20.62 8.80 +11.81230 0.00 18.100 0 0.7180 6.8240 76.50 1.7940 24 666.0 20.20 48.45 22.74 8.40 +11.08740 0.00 18.100 0 0.7180 6.4110 100.00 1.8589 24 666.0 20.20 318.75 15.02 16.70 + 7.02259 0.00 18.100 0 0.7180 6.0060 95.30 1.8746 24 666.0 20.20 319.98 15.70 14.20 +12.04820 0.00 18.100 0 0.6140 5.6480 87.60 1.9512 24 666.0 20.20 291.55 14.10 20.80 + 7.05042 0.00 18.100 0 0.6140 6.1030 85.10 2.0218 24 666.0 20.20 2.52 23.29 13.40 + 8.79212 0.00 18.100 0 0.5840 5.5650 70.60 2.0635 24 666.0 20.20 3.65 17.16 11.70 +15.86030 0.00 18.100 0 0.6790 5.8960 95.40 1.9096 24 666.0 20.20 7.68 24.39 8.30 +12.24720 0.00 18.100 0 0.5840 5.8370 59.70 1.9976 24 666.0 20.20 24.65 15.69 10.20 +37.66190 0.00 18.100 0 0.6790 6.2020 78.70 1.8629 24 666.0 20.20 18.82 14.52 10.90 + 7.36711 0.00 18.100 0 0.6790 6.1930 78.10 1.9356 24 666.0 20.20 96.73 21.52 11.00 + 9.33889 0.00 18.100 0 0.6790 6.3800 95.60 1.9682 24 666.0 20.20 60.72 24.08 9.50 + 8.49213 0.00 18.100 0 0.5840 6.3480 86.10 2.0527 24 666.0 20.20 83.45 17.64 14.50 +10.06230 0.00 18.100 0 0.5840 6.8330 94.30 2.0882 24 666.0 20.20 81.33 19.69 14.10 + 6.44405 0.00 18.100 0 0.5840 6.4250 74.80 2.2004 24 666.0 20.20 97.95 12.03 16.10 + 5.58107 0.00 18.100 0 0.7130 6.4360 87.90 2.3158 24 666.0 20.20 100.19 16.22 14.30 +13.91340 0.00 18.100 0 0.7130 6.2080 95.00 2.2222 24 666.0 20.20 100.63 15.17 11.70 +11.16040 0.00 18.100 0 0.7400 6.6290 94.60 2.1247 24 666.0 20.20 109.85 23.27 13.40 +14.42080 0.00 18.100 0 0.7400 6.4610 93.30 2.0026 24 666.0 20.20 27.49 18.05 9.60 +15.17720 0.00 18.100 0 0.7400 6.1520 100.00 1.9142 24 666.0 20.20 9.32 26.45 8.70 +13.67810 0.00 18.100 0 0.7400 5.9350 87.90 1.8206 24 666.0 20.20 68.95 34.02 8.40 + 9.39063 0.00 18.100 0 0.7400 5.6270 93.90 1.8172 24 666.0 20.20 396.90 22.88 12.80 +22.05110 0.00 18.100 0 0.7400 5.8180 92.40 1.8662 24 666.0 20.20 391.45 22.11 10.50 + 9.72418 0.00 18.100 0 0.7400 6.4060 97.20 2.0651 24 666.0 20.20 385.96 19.52 17.10 + 5.66637 0.00 18.100 0 0.7400 6.2190 100.00 2.0048 24 666.0 20.20 395.69 16.59 18.40 + 9.96654 0.00 18.100 0 0.7400 6.4850 100.00 1.9784 24 666.0 20.20 386.73 18.85 15.40 +12.80230 0.00 18.100 0 0.7400 5.8540 96.60 1.8956 24 666.0 20.20 240.52 23.79 10.80 +10.67180 0.00 18.100 0 0.7400 6.4590 94.80 1.9879 24 666.0 20.20 43.06 23.98 11.80 + 6.28807 0.00 18.100 0 0.7400 6.3410 96.40 2.0720 24 666.0 20.20 318.01 17.79 14.90 + 9.92485 0.00 18.100 0 0.7400 6.2510 96.60 2.1980 24 666.0 20.20 388.52 16.44 12.60 + 9.32909 0.00 18.100 0 0.7130 6.1850 98.70 2.2616 24 666.0 20.20 396.90 18.13 14.10 + 7.52601 0.00 18.100 0 0.7130 6.4170 98.30 2.1850 24 666.0 20.20 304.21 19.31 13.00 + 6.71772 0.00 18.100 0 0.7130 6.7490 92.60 2.3236 24 666.0 20.20 0.32 17.44 13.40 + 5.44114 0.00 18.100 0 0.7130 6.6550 98.20 2.3552 24 666.0 20.20 355.29 17.73 15.20 + 5.09017 0.00 18.100 0 0.7130 6.2970 91.80 2.3682 24 666.0 20.20 385.09 17.27 16.10 + 8.24809 0.00 18.100 0 0.7130 7.3930 99.30 2.4527 24 666.0 20.20 375.87 16.74 17.80 + 9.51363 0.00 18.100 0 0.7130 6.7280 94.10 2.4961 24 666.0 20.20 6.68 18.71 14.90 + 4.75237 0.00 18.100 0 0.7130 6.5250 86.50 2.4358 24 666.0 20.20 50.92 18.13 14.10 + 4.66883 0.00 18.100 0 0.7130 5.9760 87.90 2.5806 24 666.0 20.20 10.48 19.01 12.70 + 8.20058 0.00 18.100 0 0.7130 5.9360 80.30 2.7792 24 666.0 20.20 3.50 16.94 13.50 + 7.75223 0.00 18.100 0 0.7130 6.3010 83.70 2.7831 24 666.0 20.20 272.21 16.23 14.90 + 6.80117 0.00 18.100 0 0.7130 6.0810 84.40 2.7175 24 666.0 20.20 396.90 14.70 20.00 + 4.81213 0.00 18.100 0 0.7130 6.7010 90.00 2.5975 24 666.0 20.20 255.23 16.42 16.40 + 3.69311 0.00 18.100 0 0.7130 6.3760 88.40 2.5671 24 666.0 20.20 391.43 14.65 17.70 + 6.65492 0.00 18.100 0 0.7130 6.3170 83.00 2.7344 24 666.0 20.20 396.90 13.99 19.50 + 5.82115 0.00 18.100 0 0.7130 6.5130 89.90 2.8016 24 666.0 20.20 393.82 10.29 20.20 + 7.83932 0.00 18.100 0 0.6550 6.2090 65.40 2.9634 24 666.0 20.20 396.90 13.22 21.40 + 3.16360 0.00 18.100 0 0.6550 5.7590 48.20 3.0665 24 666.0 20.20 334.40 14.13 19.90 + 3.77498 0.00 18.100 0 0.6550 5.9520 84.70 2.8715 24 666.0 20.20 22.01 17.15 19.00 + 4.42228 0.00 18.100 0 0.5840 6.0030 94.50 2.5403 24 666.0 20.20 331.29 21.32 19.10 +15.57570 0.00 18.100 0 0.5800 5.9260 71.00 2.9084 24 666.0 20.20 368.74 18.13 19.10 +13.07510 0.00 18.100 0 0.5800 5.7130 56.70 2.8237 24 666.0 20.20 396.90 14.76 20.10 + 4.34879 0.00 18.100 0 0.5800 6.1670 84.00 3.0334 24 666.0 20.20 396.90 16.29 19.90 + 4.03841 0.00 18.100 0 0.5320 6.2290 90.70 3.0993 24 666.0 20.20 395.33 12.87 19.60 + 3.56868 0.00 18.100 0 0.5800 6.4370 75.00 2.8965 24 666.0 20.20 393.37 14.36 23.20 + 4.64689 0.00 18.100 0 0.6140 6.9800 67.60 2.5329 24 666.0 20.20 374.68 11.66 29.80 + 8.05579 0.00 18.100 0 0.5840 5.4270 95.40 2.4298 24 666.0 20.20 352.58 18.14 13.80 + 6.39312 0.00 18.100 0 0.5840 6.1620 97.40 2.2060 24 666.0 20.20 302.76 24.10 13.30 + 4.87141 0.00 18.100 0 0.6140 6.4840 93.60 2.3053 24 666.0 20.20 396.21 18.68 16.70 +15.02340 0.00 18.100 0 0.6140 5.3040 97.30 2.1007 24 666.0 20.20 349.48 24.91 12.00 +10.23300 0.00 18.100 0 0.6140 6.1850 96.70 2.1705 24 666.0 20.20 379.70 18.03 14.60 +14.33370 0.00 18.100 0 0.6140 6.2290 88.00 1.9512 24 666.0 20.20 383.32 13.11 21.40 + 5.82401 0.00 18.100 0 0.5320 6.2420 64.70 3.4242 24 666.0 20.20 396.90 10.74 23.00 + 5.70818 0.00 18.100 0 0.5320 6.7500 74.90 3.3317 24 666.0 20.20 393.07 7.74 23.70 + 5.73116 0.00 18.100 0 0.5320 7.0610 77.00 3.4106 24 666.0 20.20 395.28 7.01 25.00 + 2.81838 0.00 18.100 0 0.5320 5.7620 40.30 4.0983 24 666.0 20.20 392.92 10.42 21.80 + 2.37857 0.00 18.100 0 0.5830 5.8710 41.90 3.7240 24 666.0 20.20 370.73 13.34 20.60 + 3.67367 0.00 18.100 0 0.5830 6.3120 51.90 3.9917 24 666.0 20.20 388.62 10.58 21.20 + 5.69175 0.00 18.100 0 0.5830 6.1140 79.80 3.5459 24 666.0 20.20 392.68 14.98 19.10 + 4.83567 0.00 18.100 0 0.5830 5.9050 53.20 3.1523 24 666.0 20.20 388.22 11.45 20.60 + 0.15086 0.00 27.740 0 0.6090 5.4540 92.70 1.8209 4 711.0 20.10 395.09 18.06 15.20 + 0.18337 0.00 27.740 0 0.6090 5.4140 98.30 1.7554 4 711.0 20.10 344.05 23.97 7.00 + 0.20746 0.00 27.740 0 0.6090 5.0930 98.00 1.8226 4 711.0 20.10 318.43 29.68 8.10 + 0.10574 0.00 27.740 0 0.6090 5.9830 98.80 1.8681 4 711.0 20.10 390.11 18.07 13.60 + 0.11132 0.00 27.740 0 0.6090 5.9830 83.50 2.1099 4 711.0 20.10 396.90 13.35 20.10 + 0.17331 0.00 9.690 0 0.5850 5.7070 54.00 2.3817 6 391.0 19.20 396.90 12.01 21.80 + 0.27957 0.00 9.690 0 0.5850 5.9260 42.60 2.3817 6 391.0 19.20 396.90 13.59 24.50 + 0.17899 0.00 9.690 0 0.5850 5.6700 28.80 2.7986 6 391.0 19.20 393.29 17.60 23.10 + 0.28960 0.00 9.690 0 0.5850 5.3900 72.90 2.7986 6 391.0 19.20 396.90 21.14 19.70 + 0.26838 0.00 9.690 0 0.5850 5.7940 70.60 2.8927 6 391.0 19.20 396.90 14.10 18.30 + 0.23912 0.00 9.690 0 0.5850 6.0190 65.30 2.4091 6 391.0 19.20 396.90 12.92 21.20 + 0.17783 0.00 9.690 0 0.5850 5.5690 73.50 2.3999 6 391.0 19.20 395.77 15.10 17.50 + 0.22438 0.00 9.690 0 0.5850 6.0270 79.70 2.4982 6 391.0 19.20 396.90 14.33 16.80 + 0.06263 0.00 11.930 0 0.5730 6.5930 69.10 2.4786 1 273.0 21.00 391.99 9.67 22.40 + 0.04527 0.00 11.930 0 0.5730 6.1200 76.70 2.2875 1 273.0 21.00 396.90 9.08 20.60 + 0.06076 0.00 11.930 0 0.5730 6.9760 91.00 2.1675 1 273.0 21.00 396.90 5.64 23.90 + 0.10959 0.00 11.930 0 0.5730 6.7940 89.30 2.3889 1 273.0 21.00 393.45 6.48 22.00 + 0.04741 0.00 11.930 0 0.5730 6.0300 80.80 2.5050 1 273.0 21.00 396.90 7.88 11.90