Allowed gpyified var gauss

This commit is contained in:
Alan Saul 2015-09-07 14:00:04 +01:00
commit a156cc5dc9
68 changed files with 2201 additions and 1294 deletions

View file

@ -17,7 +17,7 @@ before_install:
- sudo ln -s /run/shm /dev/shm - sudo ln -s /run/shm /dev/shm
install: install:
- conda install --yes python=$TRAVIS_PYTHON_VERSION atlas numpy=1.7 scipy=0.12 matplotlib nose sphinx pip nose - conda install --yes python=$TRAVIS_PYTHON_VERSION atlas numpy=1.9 scipy=0.16 matplotlib nose sphinx pip nose
#- pip install . #- pip install .
- python setup.py build_ext --inplace - python setup.py build_ext --inplace
#--use-mirrors #--use-mirrors

View file

@ -5,3 +5,4 @@ Nicolas Durrande
Alan Saul Alan Saul
Max Zwiessele Max Zwiessele
Neil D. Lawrence Neil D. Lawrence
Zhenwen Dai

View file

@ -106,8 +106,15 @@ class GP(Model):
self.link_parameter(self.likelihood) self.link_parameter(self.likelihood)
self.posterior = None self.posterior = None
# The predictive variable to be used to predict using the posterior object's
# woodbury_vector and woodbury_inv is defined as predictive_variable
# This is usually just a link to self.X (full GP) or self.Z (sparse GP).
# Make sure to name this variable and the predict functions will "just work"
# as long as the posterior has the right woodbury entries.
self._predictive_variable = self.X
def set_XY(self, X=None, Y=None, trigger_update=True):
def set_XY(self, X=None, Y=None):
""" """
Set the input / output data of the model Set the input / output data of the model
This is useful if we wish to change our existing data but maintain the same model This is useful if we wish to change our existing data but maintain the same model
@ -117,7 +124,7 @@ class GP(Model):
:param Y: output observations :param Y: output observations
:type Y: np.ndarray :type Y: np.ndarray
""" """
if trigger_update: self.update_model(False) self.update_model(False)
if Y is not None: if Y is not None:
if self.normalizer is not None: if self.normalizer is not None:
self.normalizer.scale_by(Y) self.normalizer.scale_by(Y)
@ -133,34 +140,33 @@ class GP(Model):
assert isinstance(X, type(self.X)), "The given X must have the same type as the X in the model!" assert isinstance(X, type(self.X)), "The given X must have the same type as the X in the model!"
self.unlink_parameter(self.X) self.unlink_parameter(self.X)
self.X = X self.X = X
self.link_parameters(self.X) self.link_parameter(self.X)
else: else:
self.unlink_parameter(self.X) self.unlink_parameter(self.X)
from ..core import Param from ..core import Param
self.X = Param('latent mean',X) self.X = Param('latent mean',X)
self.link_parameters(self.X) self.link_parameter(self.X)
else: else:
self.X = ObsAr(X) self.X = ObsAr(X)
if trigger_update: self.update_model(True) self.update_model(True)
if trigger_update: self._trigger_params_changed()
def set_X(self,X, trigger_update=True): def set_X(self,X):
""" """
Set the input data of the model Set the input data of the model
:param X: input observations :param X: input observations
:type X: np.ndarray :type X: np.ndarray
""" """
self.set_XY(X=X, trigger_update=trigger_update) self.set_XY(X=X)
def set_Y(self,Y, trigger_update=True): def set_Y(self,Y):
""" """
Set the output data of the model Set the output data of the model
:param X: output observations :param X: output observations
:type X: np.ndarray :type X: np.ndarray
""" """
self.set_XY(Y=Y, trigger_update=trigger_update) self.set_XY(Y=Y)
def parameters_changed(self): def parameters_changed(self):
""" """
@ -183,7 +189,7 @@ class GP(Model):
""" """
return self._log_marginal_likelihood return self._log_marginal_likelihood
def _raw_predict(self, _Xnew, full_cov=False, kern=None): def _raw_predict(self, Xnew, full_cov=False, kern=None):
""" """
For making predictions, does not account for normalization or likelihood For making predictions, does not account for normalization or likelihood
@ -199,24 +205,33 @@ class GP(Model):
if kern is None: if kern is None:
kern = self.kern kern = self.kern
Kx = kern.K(_Xnew, self.X).T Kx = kern.K(self.X, Xnew)
WiKx = np.dot(self.posterior.woodbury_inv, Kx)
mu = np.dot(Kx.T, self.posterior.woodbury_vector) mu = np.dot(Kx.T, self.posterior.woodbury_vector)
if len(mu.shape)==1:
mu = mu.reshape(-1,1)
if full_cov: if full_cov:
Kxx = kern.K(_Xnew) Kxx = kern.K(Xnew)
var = Kxx - np.dot(Kx.T, WiKx) if self.posterior.woodbury_inv.ndim == 2:
var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
elif self.posterior.woodbury_inv.ndim == 3:
var = np.empty((Kxx.shape[0],Kxx.shape[1],self.posterior.woodbury_inv.shape[2]))
from ..util.linalg import mdot
for i in range(var.shape[2]):
var[:, :, i] = (Kxx - mdot(Kx.T, self.posterior.woodbury_inv[:, :, i], Kx))
var = var
else: else:
Kxx = kern.Kdiag(_Xnew) Kxx = kern.Kdiag(Xnew)
var = Kxx - np.sum(WiKx*Kx, 0) if self.posterior.woodbury_inv.ndim == 2:
var = var.reshape(-1, 1) var = (Kxx - np.sum(np.dot(self.posterior.woodbury_inv.T, Kx) * Kx, 0))[:,None]
var[var<0.] = 0. elif self.posterior.woodbury_inv.ndim == 3:
var = np.empty((Kxx.shape[0],self.posterior.woodbury_inv.shape[2]))
for i in range(var.shape[1]):
var[:, i] = (Kxx - (np.sum(np.dot(self.posterior.woodbury_inv[:, :, i].T, Kx) * Kx, 0)))
var = var
#add in the mean function
if self.mean_function is not None:
mu += self.mean_function.f(Xnew)
#force mu to be a column vector
if len(mu.shape)==1: mu = mu[:,None]
#add the mean function in
if not self.mean_function is None:
mu += self.mean_function.f(_Xnew)
return mu, var return mu, var
def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None): def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None):
@ -249,7 +264,7 @@ class GP(Model):
mean, var = self.likelihood.predictive_values(mu, var, full_cov, Y_metadata=Y_metadata) mean, var = self.likelihood.predictive_values(mu, var, full_cov, Y_metadata=Y_metadata)
return mean, var return mean, var
def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None): def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None, kern=None):
""" """
Get the predictive quantiles around the prediction at X Get the predictive quantiles around the prediction at X
@ -257,10 +272,12 @@ class GP(Model):
:type X: np.ndarray (Xnew x self.input_dim) :type X: np.ndarray (Xnew x self.input_dim)
:param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval :param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval
:type quantiles: tuple :type quantiles: tuple
:param kern: optional kernel to use for prediction
:type predict_kw: dict
:returns: list of quantiles for each X and predictive quantiles for interval combination :returns: list of quantiles for each X and predictive quantiles for interval combination
:rtype: [np.ndarray (Xnew x self.output_dim), np.ndarray (Xnew x self.output_dim)] :rtype: [np.ndarray (Xnew x self.output_dim), np.ndarray (Xnew x self.output_dim)]
""" """
m, v = self._raw_predict(X, full_cov=False) m, v = self._raw_predict(X, full_cov=False, kern=kern)
if self.normalizer is not None: if self.normalizer is not None:
m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v) m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
return self.likelihood.predictive_quantiles(m, v, quantiles, Y_metadata=Y_metadata) return self.likelihood.predictive_quantiles(m, v, quantiles, Y_metadata=Y_metadata)
@ -294,6 +311,110 @@ class GP(Model):
return dmu_dX, dv_dX return dmu_dX, dv_dX
def predict_jacobian(self, Xnew, kern=None, full_cov=True):
"""
Compute the derivatives of the posterior of the GP.
Given a set of points at which to predict X* (size [N*,Q]), compute the
mean and variance of the derivative. Resulting arrays are sized:
dL_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one).
Note that this is the mean and variance of the derivative,
not the derivative of the mean and variance! (See predictive_gradients for that)
dv_dX* -- [N*, Q], (since all outputs have the same variance)
If there is missing data, it is not implemented for now, but
there will be one output variance per output dimension.
:param X: The points at which to get the predictive gradients.
:type X: np.ndarray (Xnew x self.input_dim)
:param kern: The kernel to compute the jacobian for.
:param boolean full_cov: whether to return the full covariance of the jacobian.
:returns: dmu_dX, dv_dX
:rtype: [np.ndarray (N*, Q ,D), np.ndarray (N*,Q,(D)) ]
Note: We always return sum in input_dim gradients, as the off-diagonals
in the input_dim are not needed for further calculations.
This is a compromise for increase in speed. Mathematically the jacobian would
have another dimension in Q.
"""
if kern is None:
kern = self.kern
mean_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim))
for i in range(self.output_dim):
mean_jac[:,:,i] = kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1].T, Xnew, self._predictive_variable)
dK_dXnew_full = np.empty((self._predictive_variable.shape[0], Xnew.shape[0], Xnew.shape[1]))
for i in range(self._predictive_variable.shape[0]):
dK_dXnew_full[i] = kern.gradients_X([[1.]], Xnew, self._predictive_variable[[i]])
if full_cov:
dK2_dXdX = kern.gradients_XX([[1.]], Xnew)
else:
dK2_dXdX = kern.gradients_XX_diag([[1.]], Xnew)
def compute_cov_inner(wi):
if full_cov:
# full covariance gradients:
var_jac = dK2_dXdX - np.einsum('qnm,miq->niq', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
else:
var_jac = dK2_dXdX - np.einsum('qim,miq->iq', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
return var_jac
if self.posterior.woodbury_inv.ndim == 3:
var_jac = []
for d in range(self.posterior.woodbury_inv.shape[2]):
var_jac.append(compute_cov_inner(self.posterior.woodbury_inv[:, :, d]))
var_jac = np.concatenate(var_jac)
else:
var_jac = compute_cov_inner(self.posterior.woodbury_inv)
return mean_jac, var_jac
def predict_wishard_embedding(self, Xnew, kern=None, mean=True, covariance=True):
"""
Predict the wishard embedding G of the GP. This is the density of the
input of the GP defined by the probabilistic function mapping f.
G = J_mean.T*J_mean + output_dim*J_cov.
:param array-like Xnew: The points at which to evaluate the magnification.
:param :py:class:`~GPy.kern.Kern` kern: The kernel to use for the magnification.
Supplying only a part of the learning kernel gives insights into the density
of the specific kernel part of the input function. E.g. one can see how dense the
linear part of a kernel is compared to the non-linear part etc.
"""
if kern is None:
kern = self.kern
mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False)
mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac)
if var_jac.ndim == 3:
Sigma = np.einsum('iqd,ipd->iqp', var_jac, var_jac)
else:
Sigma = self.output_dim*np.einsum('iq,ip->iqp', var_jac, var_jac)
G = 0.
if mean:
G += mumuT
if covariance:
G += Sigma
return G
def predict_magnification(self, Xnew, kern=None, mean=True, covariance=True):
"""
Predict the magnification factor as
sqrt(det(G))
for each point N in Xnew
"""
G = self.predict_wishard_embedding(Xnew, kern, mean, covariance)
from ..util.linalg import jitchol
return np.array([np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :])))))) for n in range(Xnew.shape[0])])
#return np.array([np.sqrt(np.linalg.det(G[n, :, :])) for n in range(Xnew.shape[0])])
def posterior_samples_f(self,X,size=10, full_cov=True): def posterior_samples_f(self,X,size=10, full_cov=True):
""" """
Samples the posterior GP at the points X. Samples the posterior GP at the points X.
@ -397,8 +518,8 @@ class GP(Model):
def plot(self, plot_limits=None, which_data_rows='all', def plot(self, plot_limits=None, which_data_rows='all',
which_data_ycols='all', fixed_inputs=[], which_data_ycols='all', fixed_inputs=[],
levels=20, samples=0, fignum=None, ax=None, resolution=None, levels=20, samples=0, fignum=None, ax=None, resolution=None,
plot_raw=False, plot_raw=False, linecol=None,fillcol=None, Y_metadata=None,
linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx', predict_kw=None): data_symbol='kx', predict_kw=None, plot_training_data=True):
""" """
Plot the posterior of the GP. Plot the posterior of the GP.
- In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In one dimension, the function is plotted with a shaded region identifying two standard deviations.
@ -435,6 +556,8 @@ class GP(Model):
:type Y_metadata: dict :type Y_metadata: dict
:param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx') :param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx')
:type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib. :type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib.
:param plot_training_data: whether or not to plot the training points
:type plot_training_data: boolean
""" """
assert "matplotlib" in sys.modules, "matplotlib package has not been imported." assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import models_plots from ..plotting.matplot_dep import models_plots
@ -447,7 +570,103 @@ class GP(Model):
which_data_ycols, fixed_inputs, which_data_ycols, fixed_inputs,
levels, samples, fignum, ax, resolution, levels, samples, fignum, ax, resolution,
plot_raw=plot_raw, Y_metadata=Y_metadata, plot_raw=plot_raw, Y_metadata=Y_metadata,
data_symbol=data_symbol, predict_kw=predict_kw, **kw) data_symbol=data_symbol, predict_kw=predict_kw,
plot_training_data=plot_training_data, **kw)
def plot_data(self, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
fignum=None, ax=None, data_symbol='kx'):
"""
Plot the training data
- For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed.
Can plot only part of the data
using which_data_rows and which_data_ycols.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice model.X, model.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_ycols: 'all' or a list of integers
:param visible_dims: an array specifying the input dimensions to plot (maximum two)
:type visible_dims: a numpy array
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D
:type resolution: int
:param levels: number of levels to plot in a contour plot.
:param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure
:type levels: int
:param samples: the number of a posteriori samples to plot
:type samples: int
:param fignum: figure to plot on.
:type fignum: figure number
:param ax: axes to plot on.
:type ax: axes handle
:param linecol: color of line to plot [Tango.colorsHex['darkBlue']]
:type linecol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib
:param fillcol: color of fill [Tango.colorsHex['lightBlue']]
:type fillcol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib
:param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx')
:type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib.
"""
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import models_plots
kw = {}
return models_plots.plot_data(self, which_data_rows,
which_data_ycols, visible_dims,
fignum, ax, data_symbol, **kw)
def errorbars_trainset(self, which_data_rows='all',
which_data_ycols='all', fixed_inputs=[], fignum=None, ax=None,
linecol=None, data_symbol='kx', predict_kw=None, plot_training_data=True,lw=None):
"""
Plot the posterior error bars corresponding to the training data
- For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed.
Can plot only part of the data
using which_data_rows and which_data_ycols.
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice model.X, model.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_rows: 'all' or a list of integers
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v.
:type fixed_inputs: a list of tuples
:param fignum: figure to plot on.
:type fignum: figure number
:param ax: axes to plot on.
:type ax: axes handle
:param plot_training_data: whether or not to plot the training points
:type plot_training_data: boolean
"""
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import models_plots
kw = {}
if lw is not None:
kw['lw'] = lw
return models_plots.errorbars_trainset(self, which_data_rows, which_data_ycols, fixed_inputs,
fignum, ax, linecol, data_symbol,
predict_kw, plot_training_data, **kw)
def plot_magnification(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, legend=True,
plot_limits=None,
aspect='auto', updates=False, plot_inducing=True, kern=None, **kwargs):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_magnification(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, **kwargs)
def input_sensitivity(self, summarize=True): def input_sensitivity(self, summarize=True):
""" """

View file

@ -32,7 +32,7 @@ class Bijective_mapping(Mapping):
also back from f to X. The inverse mapping is called g(). also back from f to X. The inverse mapping is called g().
""" """
def __init__(self, input_dim, output_dim, name='bijective_mapping'): def __init__(self, input_dim, output_dim, name='bijective_mapping'):
super(Bijective_apping, self).__init__(name=name) super(Bijective_mapping, self).__init__(name=name)
def g(self, f): def g(self, f):
"""Inverse mapping from output domain of the function to the inputs.""" """Inverse mapping from output domain of the function to the inputs."""

View file

@ -180,6 +180,7 @@ class Param(Parameterizable, ObsAr):
import copy import copy
Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo)) Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo))
return s return s
def _setup_observers(self): def _setup_observers(self):
""" """
Setup the default observers Setup the default observers

View file

@ -59,6 +59,8 @@ class SparseGP(GP):
logger.info("Adding Z as parameter") logger.info("Adding Z as parameter")
self.link_parameter(self.Z, index=0) self.link_parameter(self.Z, index=0)
self.posterior = None self.posterior = None
self._predictive_variable = self.Z
def has_uncertain_inputs(self): def has_uncertain_inputs(self):
return isinstance(self.X, VariationalPosterior) return isinstance(self.X, VariationalPosterior)
@ -117,15 +119,16 @@ class SparseGP(GP):
if there is missing data, each dimension has its own full_cov of shape NxNxD, and if full_cov is of, if there is missing data, each dimension has its own full_cov of shape NxNxD, and if full_cov is of,
we take only the diagonal elements across N. we take only the diagonal elements across N.
For uncertain inputs, the SparseGP bound produces a full covariance structure across D, so for full_cov we For uncertain inputs, the SparseGP bound produces cannot predict the full covariance matrix full_cov for now.
return a NxDxD matrix and in the not full_cov case, we return the diagonal elements across D (NxD). The implementation of that will follow. However, for each dimension the
This is for both with and without missing data. See for missing data SparseGP implementation py:class:'~GPy.models.sparse_gp_minibatch.SparseGPMiniBatch'. covariance changes, so if full_cov is False (standard), we return the variance
for each dimension [NxD].
""" """
if kern is None: kern = self.kern if kern is None: kern = self.kern
if not isinstance(Xnew, VariationalPosterior): if not isinstance(Xnew, VariationalPosterior):
Kx = kern.K(self.Z, Xnew) Kx = kern.K(self._predictive_variable, Xnew)
mu = np.dot(Kx.T, self.posterior.woodbury_vector) mu = np.dot(Kx.T, self.posterior.woodbury_vector)
if full_cov: if full_cov:
Kxx = kern.K(Xnew) Kxx = kern.K(Xnew)
@ -149,13 +152,14 @@ class SparseGP(GP):
if self.mean_function is not None: if self.mean_function is not None:
mu += self.mean_function.f(Xnew) mu += self.mean_function.f(Xnew)
else: else:
psi0_star = kern.psi0(self.Z, Xnew) psi0_star = kern.psi0(self._predictive_variable, Xnew)
psi1_star = kern.psi1(self.Z, Xnew) psi1_star = kern.psi1(self._predictive_variable, Xnew)
#psi2_star = kern.psi2(self.Z, Xnew) # Only possible if we get NxMxM psi2 out of the code. #psi2_star = kern.psi2(self.Z, Xnew) # Only possible if we get NxMxM psi2 out of the code.
la = self.posterior.woodbury_vector la = self.posterior.woodbury_vector
mu = np.dot(psi1_star, la) # TODO: dimensions? mu = np.dot(psi1_star, la) # TODO: dimensions?
if full_cov: if full_cov:
raise NotImplementedError, "Full covariance for Sparse GP predicted with uncertain inputs not implemented yet."
var = np.empty((Xnew.shape[0], la.shape[1], la.shape[1])) var = np.empty((Xnew.shape[0], la.shape[1], la.shape[1]))
di = np.diag_indices(la.shape[1]) di = np.diag_indices(la.shape[1])
else: else:
@ -163,7 +167,7 @@ class SparseGP(GP):
for i in range(Xnew.shape[0]): for i in range(Xnew.shape[0]):
_mu, _var = Xnew.mean.values[[i]], Xnew.variance.values[[i]] _mu, _var = Xnew.mean.values[[i]], Xnew.variance.values[[i]]
psi2_star = kern.psi2(self.Z, NormalPosterior(_mu, _var)) psi2_star = kern.psi2(self._predictive_variable, NormalPosterior(_mu, _var))
tmp = (psi2_star[:, :] - psi1_star[[i]].T.dot(psi1_star[[i]])) tmp = (psi2_star[:, :] - psi1_star[[i]].T.dot(psi1_star[[i]]))
var_ = mdot(la.T, tmp, la) var_ = mdot(la.T, tmp, la)

View file

@ -24,7 +24,6 @@ class VerboseOptimization(object):
self.model.add_observer(self, self.print_status) self.model.add_observer(self, self.print_status)
self.status = 'running' self.status = 'running'
self.clear = clear_after_finish self.clear = clear_after_finish
self.deltat = .2
self.update() self.update()
@ -80,6 +79,7 @@ class VerboseOptimization(object):
def __enter__(self): def __enter__(self):
self.start = time.time() self.start = time.time()
self._time = self.start
return self return self
def print_out(self, seconds): def print_out(self, seconds):
@ -143,12 +143,12 @@ class VerboseOptimization(object):
def print_status(self, me, which=None): def print_status(self, me, which=None):
self.update() self.update()
seconds = time.time()-self.start t = time.time()
seconds = t-self.start
#sys.stdout.write(" "*len(self.message)) #sys.stdout.write(" "*len(self.message))
self.deltat += seconds if t-self._time > .3 or seconds < .3:
if self.deltat > .2:
self.print_out(seconds) self.print_out(seconds)
self.deltat = 0 self._time = t
self.iteration += 1 self.iteration += 1

View file

@ -69,7 +69,7 @@ from .expectation_propagation_dtc import EPDTC
from .dtc import DTC from .dtc import DTC
from .fitc import FITC from .fitc import FITC
from .var_dtc_parallel import VarDTC_minibatch from .var_dtc_parallel import VarDTC_minibatch
#from .svgp import SVGP from .var_gauss import VarGauss
# class FullLatentFunctionData(object): # class FullLatentFunctionData(object):
# #

View file

@ -4,6 +4,8 @@
import numpy as np import numpy as np
from ...core import Model from ...core import Model
from ...core.parameterization import variational from ...core.parameterization import variational
from ...util.linalg import tdot
from GPy.core.parameterization.variational import VariationalPosterior
def infer_newX(model, Y_new, optimize=True, init='L2'): def infer_newX(model, Y_new, optimize=True, init='L2'):
""" """
@ -60,7 +62,8 @@ class InferenceX(Model):
# self.kern.GPU(True) # self.kern.GPU(True)
from copy import deepcopy from copy import deepcopy
self.posterior = deepcopy(model.posterior) self.posterior = deepcopy(model.posterior)
if hasattr(model, 'variational_prior'): from ...core.parameterization.variational import VariationalPosterior
if isinstance(model.X, VariationalPosterior):
self.uncertain_input = True self.uncertain_input = True
from ...models.ss_gplvm import IBPPrior from ...models.ss_gplvm import IBPPrior
from ...models.ss_mrd import IBPPrior_SSMRD from ...models.ss_mrd import IBPPrior_SSMRD
@ -71,7 +74,7 @@ class InferenceX(Model):
self.variational_prior = model.variational_prior.copy() self.variational_prior = model.variational_prior.copy()
else: else:
self.uncertain_input = False self.uncertain_input = False
if hasattr(model, 'inducing_inputs'): if hasattr(model, 'Z'):
self.sparse_gp = True self.sparse_gp = True
self.Z = model.Z.copy() self.Z = model.Z.copy()
else: else:
@ -125,13 +128,13 @@ class InferenceX(Model):
wv = wv[:,self.valid_dim] wv = wv[:,self.valid_dim]
output_dim = self.valid_dim.sum() output_dim = self.valid_dim.sum()
if self.ninan is not None: if self.ninan is not None:
self.dL_dpsi2 = beta/2.*(self.posterior.woodbury_inv[:,:,self.valid_dim] - np.einsum('md,od->mo',wv, wv)[:, :, None]).sum(-1) self.dL_dpsi2 = beta/2.*(self.posterior.woodbury_inv[:,:,self.valid_dim] - tdot(wv)[:, :, None]).sum(-1)
else: else:
self.dL_dpsi2 = beta/2.*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv)) self.dL_dpsi2 = beta/2.*(output_dim*self.posterior.woodbury_inv - tdot(wv))
self.dL_dpsi1 = beta*np.dot(self.Y[:,self.valid_dim], wv.T) self.dL_dpsi1 = beta*np.dot(self.Y[:,self.valid_dim], wv.T)
self.dL_dpsi0 = - beta/2.* np.ones(self.Y.shape[0]) self.dL_dpsi0 = - beta/2.* np.ones(self.Y.shape[0])
else: else:
self.dL_dpsi2 = beta*(output_dim*self.posterior.woodbury_inv - np.einsum('md,od->mo',wv, wv))/2. self.dL_dpsi2 = beta*(output_dim*self.posterior.woodbury_inv - tdot(wv))/2. #np.einsum('md,od->mo',wv, wv)
self.dL_dpsi1 = beta*np.dot(self.Y, wv.T) self.dL_dpsi1 = beta*np.dot(self.Y, wv.T)
self.dL_dpsi0 = -beta/2.*output_dim* np.ones(self.Y.shape[0]) self.dL_dpsi0 = -beta/2.*output_dim* np.ones(self.Y.shape[0])

View file

@ -171,7 +171,9 @@ class Laplace(LatentFunctionInference):
#define the objective function (to be maximised) #define the objective function (to be maximised)
def obj(Ki_f, f): def obj(Ki_f, f):
ll = -0.5*np.sum(np.dot(Ki_f.T, f)) + np.sum(likelihood.logpdf(f, Y, Y_metadata=Y_metadata)) ll = -0.5*np.sum(np.dot(Ki_f.T, f)) + np.sum(likelihood.logpdf(f, Y, Y_metadata=Y_metadata))
print ll
if np.isnan(ll): if np.isnan(ll):
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
return -np.inf return -np.inf
else: else:
return ll return ll

View file

@ -64,9 +64,7 @@ class VarDTC(LatentFunctionInference):
def get_VVTfactor(self, Y, prec): def get_VVTfactor(self, Y, prec):
return Y * prec # TODO chache this, and make it effective return Y * prec # TODO chache this, and make it effective
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None):
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, Lm=None, dL_dKmm=None):
_, output_dim = Y.shape _, output_dim = Y.shape
uncertain_inputs = isinstance(X, VariationalPosterior) uncertain_inputs = isinstance(X, VariationalPosterior)
@ -95,17 +93,28 @@ class VarDTC(LatentFunctionInference):
# The rather complex computations of A, and the psi stats # The rather complex computations of A, and the psi stats
if uncertain_inputs: if uncertain_inputs:
psi0 = kern.psi0(Z, X) if psi0 is None:
psi1 = kern.psi1(Z, X) psi0 = kern.psi0(Z, X)
if psi1 is None:
psi1 = kern.psi1(Z, X)
if het_noise: if het_noise:
psi2_beta = np.sum([kern.psi2(Z,X[i:i+1,:]) * beta_i for i,beta_i in enumerate(beta)],0) if psi2 is None:
assert len(psi2.shape) == 3 # Need to have not summed out N
#FIXME: Need testing
psi2_beta = np.sum([psi2[X[i:i+1,:], :, :] * beta_i for i,beta_i in enumerate(beta)],0)
else:
psi2_beta = np.sum([kern.psi2(Z,X[i:i+1,:]) * beta_i for i,beta_i in enumerate(beta)],0)
else: else:
psi2_beta = kern.psi2(Z,X) * beta if psi2 is None:
psi2 = kern.psi2(Z,X)
psi2_beta = psi2 * beta
LmInv = dtrtri(Lm) LmInv = dtrtri(Lm)
A = LmInv.dot(psi2_beta.dot(LmInv.T)) A = LmInv.dot(psi2_beta.dot(LmInv.T))
else: else:
psi0 = kern.Kdiag(X) if psi0 is None:
psi1 = kern.K(X, Z) psi0 = kern.Kdiag(X)
if psi1 is None:
psi1 = kern.K(X, Z)
if het_noise: if het_noise:
tmp = psi1 * (np.sqrt(beta)) tmp = psi1 * (np.sqrt(beta))
else: else:

View file

@ -172,17 +172,22 @@ class VarDTC_minibatch(LatentFunctionInference):
if not np.isfinite(Kmm).all(): if not np.isfinite(Kmm).all():
print(Kmm) print(Kmm)
Lm = jitchol(Kmm) Lm = jitchol(Kmm)
LmInv = dtrtri(Lm)
LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right') LmInvPsi2LmInvT = LmInv.dot(psi2_full.dot(LmInv.T))
Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT
LL = jitchol(Lambda) LL = jitchol(Lambda)
LLInv = dtrtri(LL)
logdet_L = 2.*np.sum(np.log(np.diag(LL))) logdet_L = 2.*np.sum(np.log(np.diag(LL)))
b = dtrtrs(LL,dtrtrs(Lm,psi1Y_full.T)[0])[0] LmLLInv = LLInv.dot(LmInv)
bbt = np.square(b).sum()
v = dtrtrs(Lm,dtrtrs(LL,b,trans=1)[0],trans=1)[0]
tmp = -backsub_both_sides(LL, tdot(b)+output_dim*np.eye(input_dim), transpose='left') b = psi1Y_full.dot(LmLLInv.T)
dL_dpsi2R = backsub_both_sides(Lm, tmp+output_dim*np.eye(input_dim), transpose='left')/2. bbt = np.square(b).sum()
v = b.dot(LmLLInv).T
LLinvPsi1TYYTPsi1LLinvT = tdot(b.T)
tmp = -LLInv.T.dot(LLinvPsi1TYYTPsi1LLinvT+output_dim*np.eye(input_dim)).dot(LLInv)
dL_dpsi2R = LmInv.T.dot(tmp+output_dim*np.eye(input_dim)).dot(LmInv)/2.
# Cache intermediate results # Cache intermediate results
self.midRes['dL_dpsi2R'] = dL_dpsi2R self.midRes['dL_dpsi2R'] = dL_dpsi2R
@ -201,7 +206,7 @@ class VarDTC_minibatch(LatentFunctionInference):
# Compute dL_dKmm # Compute dL_dKmm
#====================================================================== #======================================================================
dL_dKmm = dL_dpsi2R - output_dim*backsub_both_sides(Lm, LmInvPsi2LmInvT, transpose='left')/2. dL_dKmm = dL_dpsi2R - output_dim*LmInv.T.dot(LmInvPsi2LmInvT).dot(LmInv)/2.
#====================================================================== #======================================================================
# Compute the Posterior distribution of inducing points p(u|Y) # Compute the Posterior distribution of inducing points p(u|Y)

View file

@ -0,0 +1,69 @@
# Copyright (c) 2015, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ...util.linalg import pdinv
from .posterior import Posterior
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
class VarGauss(LatentFunctionInference):
"""
The Variational Gaussian Approximation revisited
@article{Opper:2009,
title = {The Variational Gaussian Approximation Revisited},
author = {Opper, Manfred and Archambeau, C{\'e}dric},
journal = {Neural Comput.},
year = {2009},
pages = {786--792},
}
"""
def __init__(self, alpha, beta):
"""
:param alpha: GPy.core.Param varational parameter
:param beta: GPy.core.Param varational parameter
"""
self.alpha, self.beta = alpha, beta
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, Z=None):
if mean_function is not None:
raise NotImplementedError
num_data, output_dim = Y.shape
assert output_dim ==1, "Only one output supported"
K = kern.K(X)
m = K.dot(self.alpha)
KB = K*self.beta[:, None]
BKB = KB*self.beta[None, :]
A = np.eye(num_data) + BKB
Ai, LA, _, Alogdet = pdinv(A)
Sigma = np.diag(self.beta**-2) - Ai/self.beta[:, None]/self.beta[None, :] # posterior coavairance: need full matrix for gradients
var = np.diag(Sigma).reshape(-1,1)
F, dF_dm, dF_dv, dF_dthetaL = likelihood.variational_expectations(Y, m, var, Y_metadata=Y_metadata)
if dF_dthetaL is not None:
dL_dthetaL = dF_dthetaL.sum(1).sum(1)
else:
dL_dthetaL = np.array([])
dF_da = np.dot(K, dF_dm)
SigmaB = Sigma*self.beta
#dF_db_ = -np.diag(Sigma.dot(np.diag(dF_dv.flatten())).dot(SigmaB))*2
dF_db = -2*np.sum(Sigma**2 * (dF_dv * self.beta), 0)
#assert np.allclose(dF_db, dF_db_)
KL = 0.5*(Alogdet + np.trace(Ai) - num_data + np.sum(m*self.alpha))
dKL_da = m
A_A2 = Ai - Ai.dot(Ai)
dKL_db = np.diag(np.dot(KB.T, A_A2))
log_marginal = F.sum() - KL
self.alpha.gradient = dF_da - dKL_da
self.beta.gradient = dF_db - dKL_db
# K-gradients
dKL_dK = 0.5*(self.alpha*self.alpha.T + self.beta[:, None]*self.beta[None, :]*A_A2)
tmp = Ai*self.beta[:, None]/self.beta[None, :]
dF_dK = self.alpha*dF_dm.T + np.dot(tmp*dF_dv, tmp.T)
return Posterior(mean=m, cov=Sigma ,K=K),\
log_marginal,\
{'dL_dK':dF_dK-dKL_dK, 'dL_dthetaL':dL_dthetaL}

View file

@ -1,14 +1,10 @@
# ## Copyright (c) 2014, Zhenwen Dai # ## Copyright (c) 2014, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
import numpy as np import numpy as np
from scipy import linalg, optimize
import Tango
import sys import sys
import re
import numdifftools as ndt
import pdb
try: try:
#In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used #In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used

View file

@ -38,16 +38,17 @@ class SparseGPMissing(StochasticStorage):
import numpy as np import numpy as np
self.Y = model.Y_normalized self.Y = model.Y_normalized
bdict = {} bdict = {}
#For N > 1000 array2string default crops
opt = np.get_printoptions()
np.set_printoptions(threshold='nan')
for d in range(self.Y.shape[1]): for d in range(self.Y.shape[1]):
inan = np.isnan(self.Y[:, d]) inan = np.isnan(self.Y)[:, d]
arr_str = np.array2string(inan, arr_str = np.array2string(inan, np.inf, 0, True, '', formatter={'bool':lambda x: '1' if x else '0'})
np.inf, 0,
True, '',
formatter={'bool':lambda x: '1' if x else '0'})
try: try:
bdict[arr_str][0].append(d) bdict[arr_str][0].append(d)
except: except:
bdict[arr_str] = [[d], ~inan] bdict[arr_str] = [[d], ~inan]
np.set_printoptions(**opt)
self.d = bdict.values() self.d = bdict.values()
class SparseGPStochastics(StochasticStorage): class SparseGPStochastics(StochasticStorage):
@ -55,32 +56,36 @@ class SparseGPStochastics(StochasticStorage):
For the sparse gp we need to store the dimension we are in, For the sparse gp we need to store the dimension we are in,
and the indices corresponding to those and the indices corresponding to those
""" """
def __init__(self, model, batchsize=1): def __init__(self, model, batchsize=1, missing_data=True):
self.batchsize = batchsize self.batchsize = batchsize
self.output_dim = model.Y.shape[1] self.output_dim = model.Y.shape[1]
self.Y = model.Y_normalized self.Y = model.Y_normalized
self.missing_data = missing_data
self.reset() self.reset()
self.do_stochastics() self.do_stochastics()
def do_stochastics(self): def do_stochastics(self):
import numpy as np
if self.batchsize == 1: if self.batchsize == 1:
self.current_dim = (self.current_dim+1)%self.output_dim self.current_dim = (self.current_dim+1)%self.output_dim
self.d = [[[self.current_dim], np.isnan(self.Y[:, self.d])]] self.d = [[[self.current_dim], np.isnan(self.Y[:, self.current_dim]) if self.missing_data else None]]
else: else:
import numpy as np
self.d = np.random.choice(self.output_dim, size=self.batchsize, replace=False) self.d = np.random.choice(self.output_dim, size=self.batchsize, replace=False)
bdict = {} bdict = {}
for d in self.d: if self.missing_data:
inan = np.isnan(self.Y[:, d]) opt = np.get_printoptions()
arr_str = int(np.array2string(inan, np.set_printoptions(threshold='nan')
np.inf, 0, for d in self.d:
True, '', inan = np.isnan(self.Y[:, d])
formatter={'bool':lambda x: '1' if x else '0'}), 2) arr_str = np.array2string(inan,np.inf, 0,True, '',formatter={'bool':lambda x: '1' if x else '0'})
try: try:
bdict[arr_str][0].append(d) bdict[arr_str][0].append(d)
except: except:
bdict[arr_str] = [[d], ~inan] bdict[arr_str] = [[d], ~inan]
self.d = bdict.values() np.set_printoptions(**opt)
self.d = bdict.values()
else:
self.d = [[self.d, None]]
def reset(self): def reset(self):
self.current_dim = -1 self.current_dim = -1

View file

@ -14,7 +14,7 @@ class Add(CombinationKernel):
This kernel will take over the active dims of it's subkernels passed in. This kernel will take over the active dims of it's subkernels passed in.
""" """
def __init__(self, subkerns, name='add'): def __init__(self, subkerns, name='sum'):
for i, kern in enumerate(subkerns[:]): for i, kern in enumerate(subkerns[:]):
if isinstance(kern, Add): if isinstance(kern, Add):
del subkerns[i] del subkerns[i]
@ -72,15 +72,28 @@ class Add(CombinationKernel):
[target.__iadd__(p.gradients_X_diag(dL_dKdiag, X)) for p in self.parts] [target.__iadd__(p.gradients_X_diag(dL_dKdiag, X)) for p in self.parts]
return target return target
@Cache_this(limit=2, force_kwargs=['which_parts']) def gradients_XX(self, dL_dK, X, X2):
if X2 is None:
target = np.zeros((X.shape[0], X.shape[0], X.shape[1]))
else:
target = np.zeros((X.shape[0], X2.shape[0], X.shape[1]))
[target.__iadd__(p.gradients_XX(dL_dK, X, X2)) for p in self.parts]
return target
def gradients_XX_diag(self, dL_dKdiag, X):
target = np.zeros(X.shape)
[target.__iadd__(p.gradients_XX_diag(dL_dKdiag, X)) for p in self.parts]
return target
@Cache_this(limit=1, force_kwargs=['which_parts'])
def psi0(self, Z, variational_posterior): def psi0(self, Z, variational_posterior):
return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts)) return reduce(np.add, (p.psi0(Z, variational_posterior) for p in self.parts))
@Cache_this(limit=2, force_kwargs=['which_parts']) @Cache_this(limit=1, force_kwargs=['which_parts'])
def psi1(self, Z, variational_posterior): def psi1(self, Z, variational_posterior):
return reduce(np.add, (p.psi1(Z, variational_posterior) for p in self.parts)) return reduce(np.add, (p.psi1(Z, variational_posterior) for p in self.parts))
@Cache_this(limit=2, force_kwargs=['which_parts']) @Cache_this(limit=1, force_kwargs=['which_parts'])
def psi2(self, Z, variational_posterior): def psi2(self, Z, variational_posterior):
psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts)) psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts))
#return psi2 #return psi2
@ -115,6 +128,41 @@ class Add(CombinationKernel):
raise NotImplementedError("psi2 cannot be computed for this kernel") raise NotImplementedError("psi2 cannot be computed for this kernel")
return psi2 return psi2
@Cache_this(limit=1, force_kwargs=['which_parts'])
def psi2n(self, Z, variational_posterior):
psi2 = reduce(np.add, (p.psi2n(Z, variational_posterior) for p in self.parts))
#return psi2
# compute the "cross" terms
from .static import White, Bias
from .rbf import RBF
#from rbf_inv import RBFInv
from .linear import Linear
#ffrom fixed import Fixed
for p1, p2 in itertools.combinations(self.parts, 2):
# i1, i2 = p1.active_dims, p2.active_dims
# white doesn;t combine with anything
if isinstance(p1, White) or isinstance(p2, White):
pass
# rbf X bias
#elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)):
elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)):
tmp = p2.psi1(Z, variational_posterior).sum(axis=0)
psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :])
#elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)):
elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)):
tmp = p1.psi1(Z, variational_posterior).sum(axis=0)
psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :])
elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)):
assert np.intersect1d(p1.active_dims, p2.active_dims).size == 0, "only non overlapping kernel dimensions allowed so far"
tmp1 = p1.psi1(Z, variational_posterior)
tmp2 = p2.psi1(Z, variational_posterior)
psi2 += np.einsum('nm,no->nmo',tmp1,tmp2)+np.einsum('nm,no->nmo',tmp2,tmp1)
#(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
else:
raise NotImplementedError("psi2 cannot be computed for this kernel")
return psi2
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
from .static import White, Bias from .static import White, Bias
for p1 in self.parts: for p1 in self.parts:
@ -126,9 +174,9 @@ class Add(CombinationKernel):
if isinstance(p2, White): if isinstance(p2, White):
continue continue
elif isinstance(p2, Bias): elif isinstance(p2, Bias):
eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.variance * 2. eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2.
else:# np.setdiff1d(p1.active_dims, ar2, assume_unique): # TODO: Careful, not correct for overlapping active_dims else:# np.setdiff1d(p1.active_dims, ar2, assume_unique): # TODO: Careful, not correct for overlapping active_dims
eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.psi1(Z, variational_posterior) * 2. eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2.
p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
def gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
@ -143,9 +191,9 @@ class Add(CombinationKernel):
if isinstance(p2, White): if isinstance(p2, White):
continue continue
elif isinstance(p2, Bias): elif isinstance(p2, Bias):
eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.variance * 2. eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2.
else: else:
eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.psi1(Z, variational_posterior) * 2. eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2.
target += p1.gradients_Z_expectations(dL_psi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) target += p1.gradients_Z_expectations(dL_psi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
return target return target
@ -161,9 +209,9 @@ class Add(CombinationKernel):
if isinstance(p2, White): if isinstance(p2, White):
continue continue
elif isinstance(p2, Bias): elif isinstance(p2, Bias):
eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.variance * 2. eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2.
else: else:
eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.psi1(Z, variational_posterior) * 2. eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z, variational_posterior) * 2.
grads = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior) grads = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
[np.add(target_grads[i],grads[i],target_grads[i]) for i in range(len(grads))] [np.add(target_grads[i],grads[i],target_grads[i]) for i in range(len(grads))]
return target_grads return target_grads

View file

@ -6,7 +6,11 @@ import numpy as np
from ...core.parameterization import Param from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp from ...core.parameterization.transformations import Logexp
from ...util.config import config # for assesing whether to use cython from ...util.config import config # for assesing whether to use cython
from . import coregionalize_cython try:
from . import coregionalize_cython
config.set('cython', 'working', 'True')
except ImportError:
config.set('cython', 'working', 'False')
class Coregionalize(Kern): class Coregionalize(Kern):
""" """
@ -94,7 +98,7 @@ class Coregionalize(Kern):
dL_dK_small = self._gradient_reduce_numpy(dL_dK, index, index2) dL_dK_small = self._gradient_reduce_numpy(dL_dK, index, index2)
dkappa = np.diag(dL_dK_small) dkappa = np.diag(dL_dK_small).copy()
dL_dK_small += dL_dK_small.T dL_dK_small += dL_dK_small.T
dW = (self.W[:, None, :]*dL_dK_small[:, :, None]).sum(0) dW = (self.W[:, None, :]*dL_dK_small[:, :, None]).sum(0)
@ -126,4 +130,3 @@ class Coregionalize(Kern):
def gradients_X_diag(self, dL_dKdiag, X): def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape) return np.zeros(X.shape)

View file

@ -58,20 +58,9 @@ class Kern(Parameterized):
self._sliced_X = 0 self._sliced_X = 0
self.useGPU = self._support_GPU and useGPU self.useGPU = self._support_GPU and useGPU
self._return_psi2_n_flag = ObsAr(np.zeros(1)).astype(bool)
@property from .psi_comp import PSICOMP_GH
def return_psi2_n(self): self.psicomp = PSICOMP_GH()
"""
Flag whether to pass back psi2 as NxMxM or MxM, by summing out N.
"""
return self._return_psi2_n_flag[0]
@return_psi2_n.setter
def return_psi2_n(self, val):
def visit(self):
if isinstance(self, Kern):
self._return_psi2_n_flag[0]=val
self.traverse(visit)
@Cache_this(limit=20) @Cache_this(limit=20)
def _slice_X(self, X): def _slice_X(self, X):
@ -90,13 +79,19 @@ class Kern(Parameterized):
def Kdiag(self, X): def Kdiag(self, X):
raise NotImplementedError raise NotImplementedError
def psi0(self, Z, variational_posterior): def psi0(self, Z, variational_posterior):
raise NotImplementedError return self.psicomp.psicomputations(self, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior): def psi1(self, Z, variational_posterior):
raise NotImplementedError return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior): def psi2(self, Z, variational_posterior):
raise NotImplementedError return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=False)[2]
def psi2n(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
def gradients_X(self, dL_dK, X, X2): def gradients_X(self, dL_dK, X, X2):
raise NotImplementedError raise NotImplementedError
def gradients_XX(self, dL_dK, X, X2):
raise(NotImplementedError, "This is the second derivative of K wrt X and X2, and not implemented for this kernel")
def gradients_XX_diag(self, dL_dKdiag, X):
raise(NotImplementedError, "This is the diagonal of the second derivative of K wrt X and X2, and not implemented for this kernel")
def gradients_X_diag(self, dL_dKdiag, X): def gradients_X_diag(self, dL_dKdiag, X):
raise NotImplementedError raise NotImplementedError
@ -119,21 +114,23 @@ class Kern(Parameterized):
dL_dpsi1 * dpsi1_d{theta_i} + dL_dpsi1 * dpsi1_d{theta_i} +
dL_dpsi2 * dpsi2_d{theta_i} dL_dpsi2 * dpsi2_d{theta_i}
""" """
raise NotImplementedError dtheta = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[0]
self.gradient[:] = dtheta
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
psi0=None, psi1=None, psi2=None):
""" """
Returns the derivative of the objective wrt Z, using the chain rule Returns the derivative of the objective wrt Z, using the chain rule
through the expectation variables. through the expectation variables.
""" """
raise NotImplementedError return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[1]
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
""" """
Compute the gradients wrt the parameters of the variational Compute the gradients wrt the parameters of the variational
distruibution q(X), chain-ruling via the expectations of the kernel distruibution q(X), chain-ruling via the expectations of the kernel
""" """
raise NotImplementedError return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2:]
def plot(self, x=None, fignum=None, ax=None, title=None, plot_limits=None, resolution=None, **mpl_kwargs): def plot(self, x=None, fignum=None, ax=None, title=None, plot_limits=None, resolution=None, **mpl_kwargs):
""" """
@ -172,7 +169,7 @@ class Kern(Parameterized):
def __iadd__(self, other): def __iadd__(self, other):
return self.add(other) return self.add(other)
def add(self, other, name='add'): def add(self, other, name='sum'):
""" """
Add another kernel to this one. Add another kernel to this one.

View file

@ -19,20 +19,26 @@ class KernCallsViaSlicerMeta(ParametersChangedMeta):
put_clean(dct, 'update_gradients_full', _slice_update_gradients_full) put_clean(dct, 'update_gradients_full', _slice_update_gradients_full)
put_clean(dct, 'update_gradients_diag', _slice_update_gradients_diag) put_clean(dct, 'update_gradients_diag', _slice_update_gradients_diag)
put_clean(dct, 'gradients_X', _slice_gradients_X) put_clean(dct, 'gradients_X', _slice_gradients_X)
put_clean(dct, 'gradients_XX', _slice_gradients_XX)
put_clean(dct, 'gradients_XX_diag', _slice_gradients_X_diag)
put_clean(dct, 'gradients_X_diag', _slice_gradients_X_diag) put_clean(dct, 'gradients_X_diag', _slice_gradients_X_diag)
put_clean(dct, 'psi0', _slice_psi) put_clean(dct, 'psi0', _slice_psi)
put_clean(dct, 'psi1', _slice_psi) put_clean(dct, 'psi1', _slice_psi)
put_clean(dct, 'psi2', _slice_psi) put_clean(dct, 'psi2', _slice_psi)
put_clean(dct, 'psi2n', _slice_psi)
put_clean(dct, 'update_gradients_expectations', _slice_update_gradients_expectations) put_clean(dct, 'update_gradients_expectations', _slice_update_gradients_expectations)
put_clean(dct, 'gradients_Z_expectations', _slice_gradients_Z_expectations) put_clean(dct, 'gradients_Z_expectations', _slice_gradients_Z_expectations)
put_clean(dct, 'gradients_qX_expectations', _slice_gradients_qX_expectations) put_clean(dct, 'gradients_qX_expectations', _slice_gradients_qX_expectations)
return super(KernCallsViaSlicerMeta, cls).__new__(cls, name, bases, dct) return super(KernCallsViaSlicerMeta, cls).__new__(cls, name, bases, dct)
class _Slice_wrap(object): class _Slice_wrap(object):
def __init__(self, k, X, X2=None): def __init__(self, k, X, X2=None, ret_shape=None):
self.k = k self.k = k
self.shape = X.shape if ret_shape is None:
self.shape = X.shape
else:
self.shape = ret_shape
assert X.ndim == 2, "only matrices are allowed as inputs to kernels for now, given X.shape={!s}".format(X.shape) assert X.ndim == 2, "only matrices are allowed as inputs to kernels for now, given X.shape={!s}".format(X.shape)
if X2 is not None: if X2 is not None:
assert X2.ndim == 2, "only matrices are allowed as inputs to kernels for now, given X2.shape={!s}".format(X2.shape) assert X2.ndim == 2, "only matrices are allowed as inputs to kernels for now, given X2.shape={!s}".format(X2.shape)
@ -54,7 +60,10 @@ class _Slice_wrap(object):
def handle_return_array(self, return_val): def handle_return_array(self, return_val):
if self.ret: if self.ret:
ret = np.zeros(self.shape) ret = np.zeros(self.shape)
ret[:, self.k.active_dims] = return_val if len(self.shape) == 2:
ret[:, self.k.active_dims] = return_val
elif len(self.shape) == 3:
ret[:, :, self.k.active_dims] = return_val
return ret return ret
return return_val return return_val
@ -98,6 +107,19 @@ def _slice_gradients_X(f):
return ret return ret
return wrap return wrap
def _slice_gradients_XX(f):
@wraps(f)
def wrap(self, dL_dK, X, X2=None):
if X2 is None:
N, M = X.shape[0], X.shape[0]
else:
N, M = X.shape[0], X2.shape[0]
with _Slice_wrap(self, X, X2, ret_shape=(N, M, X.shape[1])) as s:
#with _Slice_wrap(self, X, X2, ret_shape=None) as s:
ret = s.handle_return_array(f(self, dL_dK, s.X, s.X2))
return ret
return wrap
def _slice_gradients_X_diag(f): def _slice_gradients_X_diag(f):
@wraps(f) @wraps(f)
def wrap(self, dL_dKdiag, X): def wrap(self, dL_dKdiag, X):
@ -124,7 +146,8 @@ def _slice_update_gradients_expectations(f):
def _slice_gradients_Z_expectations(f): def _slice_gradients_Z_expectations(f):
@wraps(f) @wraps(f)
def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
with _Slice_wrap(self, Z, variational_posterior) as s: with _Slice_wrap(self, Z, variational_posterior) as s:
ret = s.handle_return_array(f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X, s.X2)) ret = s.handle_return_array(f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X, s.X2))
return ret return ret
@ -132,7 +155,8 @@ def _slice_gradients_Z_expectations(f):
def _slice_gradients_qX_expectations(f): def _slice_gradients_qX_expectations(f):
@wraps(f) @wraps(f)
def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def wrap(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior,
psi0=None, psi1=None, psi2=None, Lpsi0=None, Lpsi1=None, Lpsi2=None):
with _Slice_wrap(self, variational_posterior, Z) as s: with _Slice_wrap(self, variational_posterior, Z) as s:
ret = list(f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X2, s.X)) ret = list(f(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, s.X2, s.X))
r2 = ret[:2] r2 = ret[:2]

View file

@ -100,6 +100,12 @@ class Linear(Kern):
#return (((X2[None,:, :] * self.variances)) * dL_dK[:, :, None]).sum(1) #return (((X2[None,:, :] * self.variances)) * dL_dK[:, :, None]).sum(1)
return np.einsum('jq,q,ij->iq', X2, self.variances, dL_dK) return np.einsum('jq,q,ij->iq', X2, self.variances, dL_dK)
def gradients_XX(self, dL_dK, X, X2=None):
if X2 is None:
return 2*np.ones(X.shape)*self.variances
else:
return np.ones(X.shape)*self.variances
def gradients_X_diag(self, dL_dKdiag, X): def gradients_X_diag(self, dL_dKdiag, X):
return 2.*self.variances*dL_dKdiag[:,None]*X return 2.*self.variances*dL_dKdiag[:,None]*X
@ -111,26 +117,29 @@ class Linear(Kern):
#---------------------------------------# #---------------------------------------#
def psi0(self, Z, variational_posterior): def psi0(self, Z, variational_posterior):
return self.psicomp.psicomputations(self.variances, Z, variational_posterior)[0] return self.psicomp.psicomputations(self, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior): def psi1(self, Z, variational_posterior):
return self.psicomp.psicomputations(self.variances, Z, variational_posterior)[1] return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior): def psi2(self, Z, variational_posterior):
return self.psicomp.psicomputations(self.variances, Z, variational_posterior)[2] return self.psicomp.psicomputations(self, Z, variational_posterior)[2]
def psi2n(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
dL_dvar = self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variances, Z, variational_posterior)[0] dL_dvar = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[0]
if self.ARD: if self.ARD:
self.variances.gradient = dL_dvar self.variances.gradient = dL_dvar
else: else:
self.variances.gradient = dL_dvar.sum() self.variances.gradient = dL_dvar.sum()
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variances, Z, variational_posterior)[1] return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[1]
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variances, Z, variational_posterior)[2:] return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2:]
class LinearFull(Kern): class LinearFull(Kern):
def __init__(self, input_dim, rank, W=None, kappa=None, active_dims=None, name='linear_full'): def __init__(self, input_dim, rank, W=None, kappa=None, active_dims=None, name='linear_full'):

View file

@ -5,6 +5,7 @@ from .kern import Kern
from ...core.parameterization import Param from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp from ...core.parameterization.transformations import Logexp
import numpy as np import numpy as np
from ...util.caching import Cache_this
four_over_tau = 2./np.pi four_over_tau = 2./np.pi
class MLP(Kern): class MLP(Kern):
@ -31,7 +32,7 @@ class MLP(Kern):
""" """
def __init__(self, input_dim, variance=1., weight_variance=1., bias_variance=100., active_dims=None, name='mlp'): def __init__(self, input_dim, variance=1., weight_variance=1., bias_variance=1., active_dims=None, name='mlp'):
super(MLP, self).__init__(input_dim, active_dims, name) super(MLP, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', variance, Logexp()) self.variance = Param('variance', variance, Logexp())
self.weight_variance = Param('weight_variance', weight_variance, Logexp()) self.weight_variance = Param('weight_variance', weight_variance, Logexp())
@ -40,96 +41,77 @@ class MLP(Kern):
def K(self, X, X2=None): def K(self, X, X2=None):
self._K_computations(X, X2) if X2 is None:
return self.variance*self._K_dvar X_denom = np.sqrt(self._comp_prod(X)+1.)
X2_denom = X_denom
X2 = X
else:
X_denom = np.sqrt(self._comp_prod(X)+1.)
X2_denom = np.sqrt(self._comp_prod(X2)+1.)
XTX = self._comp_prod(X,X2)/X_denom[:,None]/X2_denom[None,:]
return self.variance*four_over_tau*np.arcsin(XTX)
def Kdiag(self, X): def Kdiag(self, X):
"""Compute the diagonal of the covariance matrix for X.""" """Compute the diagonal of the covariance matrix for X."""
self._K_diag_computations(X) X_prod = self._comp_prod(X)
return self.variance*self._K_diag_dvar return self.variance*four_over_tau*np.arcsin(X_prod/(X_prod+1.))
def update_gradients_full(self, dL_dK, X, X2=None): def update_gradients_full(self, dL_dK, X, X2=None):
"""Derivative of the covariance with respect to the parameters.""" """Derivative of the covariance with respect to the parameters."""
self._K_computations(X, X2) dvar, dw, db = self._comp_grads(dL_dK, X, X2)[:3]
self.variance.gradient = np.sum(self._K_dvar*dL_dK) self.variance.gradient = dvar
self.weight_variance.gradient = dw
denom3 = self._K_denom**3 self.bias_variance.gradient = db
base = four_over_tau*self.variance/np.sqrt(1-self._K_asin_arg*self._K_asin_arg)
base_cov_grad = base*dL_dK
if X2 is None:
vec = np.diag(self._K_inner_prod)
self.weight_variance.gradient = ((self._K_inner_prod/self._K_denom
-.5*self._K_numer/denom3
*(np.outer((self.weight_variance*vec+self.bias_variance+1.), vec)
+np.outer(vec,(self.weight_variance*vec+self.bias_variance+1.))))*base_cov_grad).sum()
self.bias_variance.gradient = ((1./self._K_denom
-.5*self._K_numer/denom3
*((vec[None, :]+vec[:, None])*self.weight_variance
+2.*self.bias_variance + 2.))*base_cov_grad).sum()
else:
vec1 = (X*X).sum(1)
vec2 = (X2*X2).sum(1)
self.weight_variance.gradient = ((self._K_inner_prod/self._K_denom
-.5*self._K_numer/denom3
*(np.outer((self.weight_variance*vec1+self.bias_variance+1.), vec2) + np.outer(vec1, self.weight_variance*vec2 + self.bias_variance+1.)))*base_cov_grad).sum()
self.bias_variance.gradient = ((1./self._K_denom
-.5*self._K_numer/denom3
*((vec1[:, None]+vec2[None, :])*self.weight_variance
+ 2*self.bias_variance + 2.))*base_cov_grad).sum()
def update_gradients_diag(self, dL_dKdiag, X): def update_gradients_diag(self, dL_dKdiag, X):
self._K_diag_computations(X) dvar, dw, db = self._comp_grads_diag(dL_dKdiag, X)[:3]
self.variance.gradient = np.sum(self._K_diag_dvar*dL_dKdiag) self.variance.gradient = dvar
self.weight_variance.gradient = dw
base = four_over_tau*self.variance/np.sqrt(1-self._K_diag_asin_arg*self._K_diag_asin_arg) self.bias_variance.gradient = db
base_cov_grad = base*dL_dKdiag/np.square(self._K_diag_denom)
self.weight_variance.gradient = (base_cov_grad*np.square(X).sum(axis=1)).sum()
self.bias_variance.gradient = base_cov_grad.sum()
def gradients_X(self, dL_dK, X, X2): def gradients_X(self, dL_dK, X, X2):
"""Derivative of the covariance matrix with respect to X""" """Derivative of the covariance matrix with respect to X"""
self._K_computations(X, X2) return self._comp_grads(dL_dK, X, X2)[3]
arg = self._K_asin_arg
numer = self._K_numer
denom = self._K_denom
denom3 = denom*denom*denom
if X2 is not None:
vec2 = (X2*X2).sum(1)*self.weight_variance+self.bias_variance + 1.
return four_over_tau*self.weight_variance*self.variance*((X2[None, :, :]/denom[:, :, None] - vec2[None, :, None]*X[:, None, :]*(numer/denom3)[:, :, None])*(dL_dK/np.sqrt(1-arg*arg))[:, :, None]).sum(1)
else:
vec = (X*X).sum(1)*self.weight_variance+self.bias_variance + 1.
return 2*four_over_tau*self.weight_variance*self.variance*((X[None, :, :]/denom[:, :, None] - vec[None, :, None]*X[:, None, :]*(numer/denom3)[:, :, None])*(dL_dK/np.sqrt(1-arg*arg))[:, :, None]).sum(1)
def gradients_X_diag(self, dL_dKdiag, X): def gradients_X_diag(self, dL_dKdiag, X):
"""Gradient of diagonal of covariance with respect to X""" """Gradient of diagonal of covariance with respect to X"""
self._K_diag_computations(X) return self._comp_grads_diag(dL_dKdiag, X)[3]
arg = self._K_diag_asin_arg
denom = self._K_diag_denom
#numer = self._K_diag_numer
return four_over_tau*2.*self.weight_variance*self.variance*X*(1./denom*(1. - arg)*dL_dKdiag/(np.sqrt(1-arg*arg)))[:, None]
@Cache_this(limit=50, ignore_args=())
def _K_computations(self, X, X2): def _comp_prod(self, X, X2=None):
"""Pre-computations for the covariance matrix (used for computing the covariance and its gradients."""
if X2 is None: if X2 is None:
self._K_inner_prod = np.dot(X,X.T) return (np.square(X)*self.weight_variance).sum(axis=1)+self.bias_variance
self._K_numer = self._K_inner_prod*self.weight_variance + self.bias_variance
vec = np.diag(self._K_numer) + 1.
self._K_denom = np.sqrt(np.outer(vec,vec))
else: else:
self._K_inner_prod = np.dot(X,X2.T) return (X*self.weight_variance).dot(X2.T)+self.bias_variance
self._K_numer = self._K_inner_prod*self.weight_variance + self.bias_variance
vec1 = (X*X).sum(1)*self.weight_variance + self.bias_variance + 1.
vec2 = (X2*X2).sum(1)*self.weight_variance + self.bias_variance + 1.
self._K_denom = np.sqrt(np.outer(vec1,vec2))
self._K_asin_arg = self._K_numer/self._K_denom
self._K_dvar = four_over_tau*np.arcsin(self._K_asin_arg)
def _K_diag_computations(self, X): @Cache_this(limit=20, ignore_args=(1,))
"""Pre-computations concerning the diagonal terms (used for computation of diagonal and its gradients).""" def _comp_grads(self, dL_dK, X, X2=None):
self._K_diag_numer = (X*X).sum(1)*self.weight_variance + self.bias_variance var,w,b = self.variance, self.weight_variance, self.bias_variance
self._K_diag_denom = self._K_diag_numer+1. K = self.K(X, X2)
self._K_diag_asin_arg = self._K_diag_numer/self._K_diag_denom dvar = (dL_dK*K).sum()/var
self._K_diag_dvar = four_over_tau*np.arcsin(self._K_diag_asin_arg) X_prod = self._comp_prod(X)
X2_prod = self._comp_prod(X2) if X2 is not None else X_prod
XTX = self._comp_prod(X,X2) if X2 is not None else self._comp_prod(X, X)
common = var*four_over_tau/np.sqrt((X_prod[:,None]+1.)*(X2_prod[None,:]+1.)-np.square(XTX))*dL_dK
dw = (common*((XTX-b)/w-XTX*(((X_prod-b)/(w*(X_prod+1.)))[:,None]+((X2_prod-b)/(w*(X2_prod+1.)))[None,:])/2.)).sum()
db = (common*(1.-XTX*(1./(X_prod[:,None]+1.)+1./(X2_prod[None,:]+1.))/2.)).sum()
if X2 is None:
common = common+common.T
dX = common.dot(X)*w-((common*XTX).sum(axis=1)/(X_prod+1.))[:,None]*X*w
dX2 = dX
else:
dX = common.dot(X2)*w-((common*XTX).sum(axis=1)/(X_prod+1.))[:,None]*X*w
dX2 = common.T.dot(X)*w-((common*XTX).sum(axis=0)/(X2_prod+1.))[:,None]*X2*w
return dvar, dw, db, dX, dX2
@Cache_this(limit=20, ignore_args=(1,))
def _comp_grads_diag(self, dL_dKdiag, X):
var,w,b = self.variance, self.weight_variance, self.bias_variance
K = self.Kdiag(X)
dvar = (dL_dKdiag*K).sum()/var
X_prod = self._comp_prod(X)
common = var*four_over_tau/(np.sqrt(1-np.square(X_prod/(X_prod+1)))*np.square(X_prod+1))*dL_dKdiag
dw = (common*(X_prod-b)).sum()/w
db = common.sum()
dX = common[:,None]*X*w*2
return dvar, dw, db, dX

View file

@ -9,18 +9,34 @@ from . import ssrbf_psi_comp
from . import sslinear_psi_comp from . import sslinear_psi_comp
from . import linear_psi_comp from . import linear_psi_comp
class PSICOMP_RBF(Pickleable):
@Cache_this(limit=2, ignore_args=(0,)) class PSICOMP(Pickleable):
def psicomputations(self, variance, lengthscale, Z, variational_posterior):
def psicomputations(self, kern, Z, qX, return_psi2_n=False):
raise NotImplementedError("Abstract method!")
def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, qX):
raise NotImplementedError("Abstract method!")
def _setup_observers(self):
pass
from .gaussherm import PSICOMP_GH
class PSICOMP_RBF(PSICOMP):
@Cache_this(limit=5, ignore_args=(0,))
def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
variance, lengthscale = kern.variance, kern.lengthscale
if isinstance(variational_posterior, variational.NormalPosterior): if isinstance(variational_posterior, variational.NormalPosterior):
return rbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior) return rbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior, return_psi2_n=return_psi2_n)
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior): elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
return ssrbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior) return ssrbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior)
else: else:
raise ValueError("unknown distriubtion received for psi-statistics") raise ValueError("unknown distriubtion received for psi-statistics")
@Cache_this(limit=2, ignore_args=(0,1,2,3)) @Cache_this(limit=5, ignore_args=(0,2,3,4))
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior): def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
variance, lengthscale = kern.variance, kern.lengthscale
if isinstance(variational_posterior, variational.NormalPosterior): if isinstance(variational_posterior, variational.NormalPosterior):
return rbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior) return rbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior)
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior): elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
@ -28,28 +44,26 @@ class PSICOMP_RBF(Pickleable):
else: else:
raise ValueError("unknown distriubtion received for psi-statistics") raise ValueError("unknown distriubtion received for psi-statistics")
def _setup_observers(self): class PSICOMP_Linear(PSICOMP):
pass
class PSICOMP_Linear(Pickleable): @Cache_this(limit=5, ignore_args=(0,))
def psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
@Cache_this(limit=2, ignore_args=(0,)) variances = kern.variances
def psicomputations(self, variance, Z, variational_posterior):
if isinstance(variational_posterior, variational.NormalPosterior): if isinstance(variational_posterior, variational.NormalPosterior):
return linear_psi_comp.psicomputations(variance, Z, variational_posterior) return linear_psi_comp.psicomputations(variances, Z, variational_posterior, return_psi2_n=return_psi2_n)
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior): elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
return sslinear_psi_comp.psicomputations(variance, Z, variational_posterior) return sslinear_psi_comp.psicomputations(variances, Z, variational_posterior)
else: else:
raise ValueError("unknown distriubtion received for psi-statistics") raise ValueError("unknown distriubtion received for psi-statistics")
@Cache_this(limit=2, ignore_args=(0,1,2,3)) @Cache_this(limit=2, ignore_args=(0,2,3,4))
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior): def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
variances = kern.variances
if isinstance(variational_posterior, variational.NormalPosterior): if isinstance(variational_posterior, variational.NormalPosterior):
return linear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior) return linear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variances, Z, variational_posterior)
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior): elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
return sslinear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior) return sslinear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variances, Z, variational_posterior)
else: else:
raise ValueError("unknown distriubtion received for psi-statistics") raise ValueError("unknown distriubtion received for psi-statistics")
def _setup_observers(self):
pass

View file

@ -0,0 +1,92 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
An approximated psi-statistics implementation based on Gauss-Hermite Quadrature
"""
import numpy as np
from GPy.util.caching import Cache_this
from ....util.linalg import tdot
from . import PSICOMP
class PSICOMP_GH(PSICOMP):
def __init__(self, degree=5, cache_K=True):
self.degree = degree
self.cache_K = cache_K
self.locs, self.weights = np.polynomial.hermite.hermgauss(degree)
self.locs *= np.sqrt(2.)
self.weights*= 1./np.sqrt(np.pi)
self.Xs = None
def _setup_observers(self):
pass
@Cache_this(limit=10, ignore_args=(0,))
def comp_K(self, Z, qX):
if self.Xs is None or self.Xs.shape != qX.mean.shape:
from ....core.parameterization import ObsAr
self.Xs = ObsAr(np.empty((self.degree,)+qX.mean.shape))
mu, S = qX.mean.values, qX.variance.values
S_sq = np.sqrt(S)
for i in xrange(self.degree):
self.Xs[i] = self.locs[i]*S_sq+mu
return self.Xs
@Cache_this(limit=10, ignore_args=(0,))
def psicomputations(self, kern, Z, qX, return_psi2_n=False):
mu, S = qX.mean.values, qX.variance.values
N,M,Q = mu.shape[0],Z.shape[0],mu.shape[1]
if self.cache_K: Xs = self.comp_K(Z, qX)
else: S_sq = np.sqrt(S)
psi0 = np.zeros((N,))
psi1 = np.zeros((N,M))
psi2 = np.zeros((M,M))
for i in xrange(self.degree):
if self.cache_K:
X = Xs[i]
else:
X = self.locs[i]*S_sq+mu
psi0 += self.weights[i]* kern.Kdiag(X)
Kfu = kern.K(X,Z)
psi1 += self.weights[i]* Kfu
psi2 += self.weights[i]* tdot(Kfu.T)
return psi0, psi1, psi2
@Cache_this(limit=10, ignore_args=(0, 2,3,4))
def psiDerivativecomputations(self, kern, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, qX):
mu, S = qX.mean.values, qX.variance.values
if self.cache_K: Xs = self.comp_K(Z, qX)
S_sq = np.sqrt(S)
dtheta_old = kern.gradient.copy()
dtheta = np.zeros_like(kern.gradient)
dZ = np.zeros_like(Z.values)
dmu = np.zeros_like(mu)
dS = np.zeros_like(S)
for i in xrange(self.degree):
if self.cache_K:
X = Xs[i]
else:
X = self.locs[i]*S_sq+mu
dL_dpsi0_i = dL_dpsi0*self.weights[i]
kern.update_gradients_diag(dL_dpsi0_i, X)
dtheta += kern.gradient
dX = kern.gradients_X_diag(dL_dpsi0_i, X)
Kfu = kern.K(X,Z)
dL_dkfu = (dL_dpsi1+ 2.*Kfu.dot(dL_dpsi2))*self.weights[i]
kern.update_gradients_full(dL_dkfu, X, Z)
dtheta += kern.gradient
dX += kern.gradients_X(dL_dkfu, X, Z)
dZ += kern.gradients_X(dL_dkfu.T, Z, X)
dmu += dX
dS += dX*self.locs[i]/(2.*S_sq)
kern.gradient[:] = dtheta_old
return dtheta, dZ, dmu, dS

View file

@ -8,7 +8,7 @@ The package for the Psi statistics computation of the linear kernel for Bayesian
import numpy as np import numpy as np
from ....util.linalg import tdot from ....util.linalg import tdot
def psicomputations(variance, Z, variational_posterior): def psicomputations(variance, Z, variational_posterior, return_psi2_n=False):
""" """
Compute psi-statistics for ss-linear kernel Compute psi-statistics for ss-linear kernel
""" """
@ -21,8 +21,12 @@ def psicomputations(variance, Z, variational_posterior):
S = variational_posterior.variance S = variational_posterior.variance
psi0 = (variance*(np.square(mu)+S)).sum(axis=1) psi0 = (variance*(np.square(mu)+S)).sum(axis=1)
psi1 = np.dot(mu,(variance*Z).T) Zv = variance * Z
psi2 = np.dot(S.sum(axis=0)*np.square(variance)*Z,Z.T)+ tdot(psi1.T) psi1 = np.dot(mu,Zv.T)
if return_psi2_n:
psi2 = psi1[:,:,None] * psi1[:,None,:] + np.dot(S[:,None,:] * Zv[None,:,:], Zv.T)
else:
psi2 = np.dot(S.sum(axis=0) * Zv, Zv.T) + tdot(psi1.T)
return psi0, psi1, psi2 return psi0, psi1, psi2
@ -59,19 +63,39 @@ def _psi2computations(dL_dpsi2, variance, Z, mu, S):
variance2 = np.square(variance) variance2 = np.square(variance)
common_sum = np.dot(mu,(variance*Z).T) common_sum = np.dot(mu,(variance*Z).T)
Z_expect = (np.dot(dL_dpsi2,Z)*Z).sum(axis=0) if len(dL_dpsi2.shape)==2:
dL_dpsi2T = dL_dpsi2+dL_dpsi2.T Z_expect = (np.dot(dL_dpsi2,Z)*Z).sum(axis=0)
common_expect = np.dot(common_sum,np.dot(dL_dpsi2T,Z)) dL_dpsi2T = dL_dpsi2+dL_dpsi2.T
Z2_expect = np.inner(common_sum,dL_dpsi2T) common_expect = np.dot(common_sum,np.dot(dL_dpsi2T,Z))
Z1_expect = np.dot(dL_dpsi2T,Z) Z2_expect = np.inner(common_sum,dL_dpsi2T)
Z1_expect = np.dot(dL_dpsi2T,Z)
dL_dvar = 2.*S.sum(axis=0)*variance*Z_expect+(common_expect*mu).sum(axis=0) dL_dvar = 2.*S.sum(axis=0)*variance*Z_expect+(common_expect*mu).sum(axis=0)
dL_dmu = common_expect*variance dL_dmu = common_expect*variance
dL_dS = np.empty(S.shape) dL_dS = np.empty(S.shape)
dL_dS[:] = Z_expect*variance2 dL_dS[:] = Z_expect*variance2
dL_dZ = variance2*S.sum(axis=0)*Z1_expect+np.dot(Z2_expect.T,variance*mu) dL_dZ = variance2*S.sum(axis=0)*Z1_expect+np.dot(Z2_expect.T,variance*mu)
else:
N,M,Q = mu.shape[0],Z.shape[0],mu.shape[1]
dL_dpsi2_ = dL_dpsi2.sum(axis=0)
Z_expect = (np.dot(dL_dpsi2.reshape(N*M,M),Z).reshape(N,M,Q)*Z[None,:,:]).sum(axis=1)
dL_dpsi2T = dL_dpsi2_+dL_dpsi2_.T
dL_dpsi2T_ = dL_dpsi2+np.swapaxes(dL_dpsi2, 1, 2)
common_expect = np.dot(common_sum,np.dot(dL_dpsi2T,Z))
common_expect_ = (common_sum[:,:,None]*np.dot(dL_dpsi2T_.reshape(N*M,M),Z).reshape(N,M,Q)).sum(axis=1)
Z2_expect = (common_sum[:,:,None]*dL_dpsi2T_).sum(axis=1)
Z1_expect = np.dot(dL_dpsi2T_.reshape(N*M,M),Z).reshape(N,M,Q)
dL_dvar = 2.*variance*(S*Z_expect).sum(axis=0)+(common_expect_*mu).sum(axis=0)
dL_dmu = common_expect_*variance
dL_dS = np.empty(S.shape)
dL_dS[:] = variance2* Z_expect
dL_dZ = variance2*(S[:,None,:]*Z1_expect).sum(axis=0)+np.dot(Z2_expect.T,variance*mu)
return dL_dvar, dL_dmu, dL_dS, dL_dZ return dL_dvar, dL_dmu, dL_dS, dL_dZ

View file

@ -5,7 +5,7 @@ The module for psi-statistics for RBF kernel
import numpy as np import numpy as np
from GPy.util.caching import Cacher from GPy.util.caching import Cacher
def psicomputations(variance, lengthscale, Z, variational_posterior): def psicomputations(variance, lengthscale, Z, variational_posterior, return_psi2_n=False):
""" """
Z - MxQ Z - MxQ
mu - NxQ mu - NxQ
@ -21,7 +21,8 @@ def psicomputations(variance, lengthscale, Z, variational_posterior):
psi0 = np.empty(mu.shape[0]) psi0 = np.empty(mu.shape[0])
psi0[:] = variance psi0[:] = variance
psi1 = _psi1computations(variance, lengthscale, Z, mu, S) psi1 = _psi1computations(variance, lengthscale, Z, mu, S)
psi2 = _psi2computations(variance, lengthscale, Z, mu, S).sum(axis=0) psi2 = _psi2computations(variance, lengthscale, Z, mu, S)
if not return_psi2_n: psi2 = psi2.sum(axis=0)
return psi0, psi1, psi2 return psi0, psi1, psi2
def __psi1computations(variance, lengthscale, Z, mu, S): def __psi1computations(variance, lengthscale, Z, mu, S):

View file

@ -241,7 +241,7 @@ gpu_code = """
class PSICOMP_RBF_GPU(PSICOMP_RBF): class PSICOMP_RBF_GPU(PSICOMP_RBF):
def __init__(self, threadnum=128, blocknum=15, GPU_direct=False): def __init__(self, threadnum=256, blocknum=30, GPU_direct=False):
self.GPU_direct = GPU_direct self.GPU_direct = GPU_direct
self.gpuCache = None self.gpuCache = None

View file

@ -31,6 +31,9 @@ class RBF(Stationary):
def dK_dr(self, r): def dK_dr(self, r):
return -r*self.K_of_r(r) return -r*self.K_of_r(r)
def dK2_drdr(self, r):
return (r**2-1)*self.K_of_r(r)
def __getstate__(self): def __getstate__(self):
dc = super(RBF, self).__getstate__() dc = super(RBF, self).__getstate__()
if self.useGPU: if self.useGPU:
@ -50,22 +53,25 @@ class RBF(Stationary):
#---------------------------------------# #---------------------------------------#
def psi0(self, Z, variational_posterior): def psi0(self, Z, variational_posterior):
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior)[0] return self.psicomp.psicomputations(self, Z, variational_posterior)[0]
def psi1(self, Z, variational_posterior): def psi1(self, Z, variational_posterior):
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior)[1] return self.psicomp.psicomputations(self, Z, variational_posterior)[1]
def psi2(self, Z, variational_posterior): def psi2(self, Z, variational_posterior):
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior)[2] return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=False)[2]
def psi2n(self, Z, variational_posterior):
return self.psicomp.psicomputations(self, Z, variational_posterior, return_psi2_n=True)[2]
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
dL_dvar, dL_dlengscale = self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior)[:2] dL_dvar, dL_dlengscale = self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[:2]
self.variance.gradient = dL_dvar self.variance.gradient = dL_dvar
self.lengthscale.gradient = dL_dlengscale self.lengthscale.gradient = dL_dlengscale
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior)[2] return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[2]
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return self.psicomp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, self.variance, self.lengthscale, Z, variational_posterior)[3:] return self.psicomp.psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior)[3:]

View file

@ -24,6 +24,13 @@ class Static(Kern):
def gradients_X_diag(self, dL_dKdiag, X): def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape) return np.zeros(X.shape)
def gradients_XX(self, dL_dK, X, X2):
if X2 is None:
X2 = X
return np.zeros((X.shape[0], X2.shape[0], X.shape[1]), dtype=np.float64)
def gradients_XX_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(Z.shape) return np.zeros(Z.shape)
@ -59,6 +66,9 @@ class White(Static):
def psi2(self, Z, variational_posterior): def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64) return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None): def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None: if X2 is None:
self.variance.gradient = np.trace(dL_dK) self.variance.gradient = np.trace(dL_dK)
@ -92,6 +102,11 @@ class Bias(Static):
ret[:] = self.variance*self.variance*variational_posterior.shape[0] ret[:] = self.variance*self.variance*variational_posterior.shape[0]
return ret return ret
def psi2n(self, Z, variational_posterior):
ret = np.empty((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
ret[:] = self.variance*self.variance
return ret
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0] self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0]
@ -120,6 +135,9 @@ class Fixed(Static):
def psi2(self, Z, variational_posterior): def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64) return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum() self.variance.gradient = dL_dpsi0.sum()

View file

@ -15,7 +15,7 @@ from ...util.caching import Cache_this
try: try:
from . import stationary_cython from . import stationary_cython
except ImportError: except ImportError:
print('warning in sationary: failed to import cython module: falling back to numpy') print('warning in stationary: failed to import cython module: falling back to numpy')
config.set('cython', 'working', 'false') config.set('cython', 'working', 'false')
@ -77,6 +77,10 @@ class Stationary(Kern):
def dK_dr(self, r): def dK_dr(self, r):
raise NotImplementedError("implement derivative of the covariance function wrt r to use this class") raise NotImplementedError("implement derivative of the covariance function wrt r to use this class")
@Cache_this(limit=20, ignore_args=())
def dK2_drdr(self, r):
raise NotImplementedError("implement second derivative of covariance wrt r to use this method")
@Cache_this(limit=5, ignore_args=()) @Cache_this(limit=5, ignore_args=())
def K(self, X, X2=None): def K(self, X, X2=None):
""" """
@ -89,11 +93,16 @@ class Stationary(Kern):
r = self._scaled_dist(X, X2) r = self._scaled_dist(X, X2)
return self.K_of_r(r) return self.K_of_r(r)
@Cache_this(limit=3, ignore_args=()) @Cache_this(limit=20, ignore_args=())
def dK_dr_via_X(self, X, X2): def dK_dr_via_X(self, X, X2):
#a convenience function, so we can cache dK_dr #a convenience function, so we can cache dK_dr
return self.dK_dr(self._scaled_dist(X, X2)) return self.dK_dr(self._scaled_dist(X, X2))
@Cache_this(limit=3, ignore_args=())
def dK2_drdr_via_X(self, X, X2):
#a convenience function, so we can cache dK_dr
return self.dK2_drdr(self._scaled_dist(X, X2))
def _unscaled_dist(self, X, X2=None): def _unscaled_dist(self, X, X2=None):
""" """
Compute the Euclidean distance between each row of X and X2, or between Compute the Euclidean distance between each row of X and X2, or between
@ -114,7 +123,7 @@ class Stationary(Kern):
r2 = np.clip(r2, 0, np.inf) r2 = np.clip(r2, 0, np.inf)
return np.sqrt(r2) return np.sqrt(r2)
@Cache_this(limit=5, ignore_args=()) @Cache_this(limit=20, ignore_args=())
def _scaled_dist(self, X, X2=None): def _scaled_dist(self, X, X2=None):
""" """
Efficiently compute the scaled distance, r. Efficiently compute the scaled distance, r.
@ -201,6 +210,59 @@ class Stationary(Kern):
else: else:
return self._gradients_X_pure(dL_dK, X, X2) return self._gradients_X_pure(dL_dK, X, X2)
def gradients_XX(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective K(dL_dK), compute the second derivative of K wrt X and X2:
..math:
\frac{\partial^2 K}{\partial X\partial X2}
..returns:
dL2_dXdX2: NxMxQ, for X [NxQ] and X2[MxQ] (X2 is X if, X2 is None)
Thus, we return the second derivative in X2.
"""
# The off diagonals in Q are always zero, this should also be true for the Linear kernel...
# According to multivariable chain rule, we can chain the second derivative through r:
# d2K_dXdX2 = dK_dr*d2r_dXdX2 + d2K_drdr * dr_dX * dr_dX2:
invdist = self._inv_dist(X, X2)
invdist2 = invdist**2
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
tmp1 = dL_dr * invdist
dL_drdr = self.dK2_drdr_via_X(X, X2) * dL_dK
tmp2 = dL_drdr * invdist2
l2 = np.ones(X.shape[1]) * self.lengthscale**2
if X2 is None:
X2 = X
tmp1 -= np.eye(X.shape[0])*self.variance
else:
tmp1[X==X2.T] -= self.variance
grad = np.empty((X.shape[0], X2.shape[0], X.shape[1]), dtype=np.float64)
#grad = np.empty(X.shape, dtype=np.float64)
for q in range(self.input_dim):
tmpdist2 = (X[:,[q]]-X2[:,[q]].T) ** 2
grad[:, :, q] = ((tmp1*invdist2 - tmp2)*tmpdist2/l2[q] - tmp1)/l2[q]
#grad[:, :, q] = ((tmp1*(((tmpdist2)*invdist2/l2[q])-1)) - (tmp2*(tmpdist2))/l2[q])/l2[q]
#np.sum(((tmp1*(((tmpdist2)*invdist2/l2[q])-1)) - (tmp2*(tmpdist2))/l2[q])/l2[q], axis=1, out=grad[:,q])
#np.sum( - (tmp2*(tmpdist**2)), axis=1, out=grad[:,q])
return grad
def gradients_XX_diag(self, dL_dK, X):
"""
Given the derivative of the objective K(dL_dK), compute the second derivative of K wrt X and X2:
..math:
\frac{\partial^2 K}{\partial X\partial X2}
..returns:
dL2_dXdX2: NxMxQ, for X [NxQ] and X2[MxQ]
"""
return np.ones(X.shape) * self.variance/self.lengthscale**2
def _gradients_X_pure(self, dL_dK, X, X2=None): def _gradients_X_pure(self, dL_dK, X, X2=None):
invdist = self._inv_dist(X, X2) invdist = self._inv_dist(X, X2)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK dL_dr = self.dK_dr_via_X(X, X2) * dL_dK

View file

@ -1,3 +1,5 @@
#ifndef __APPLE__
#include <omp.h> #include <omp.h>
#endif
void _grad_X(int N, int D, int M, double*X, double* X2, double* tmp, double* grad); void _grad_X(int N, int D, int M, double*X, double* X2, double* tmp, double* grad);
void _lengthscale_grads(int N, int D, int M, double* X, double* X2, double* tmp, double* grad); void _lengthscale_grads(int N, int D, int M, double* X, double* X2, double* tmp, double* grad);

View file

@ -1,6 +1,6 @@
from .bernoulli import Bernoulli from .bernoulli import Bernoulli
from .exponential import Exponential from .exponential import Exponential
from .gaussian import Gaussian from .gaussian import Gaussian, HeteroscedasticGaussian
from .gamma import Gamma from .gamma import Gamma
from .poisson import Poisson from .poisson import Poisson
from .student_t import StudentT from .student_t import StudentT

View file

@ -85,6 +85,7 @@ class Bernoulli(Likelihood):
gh_x, gh_w = gh_points gh_x, gh_w = gh_points
gh_w = gh_w / np.sqrt(np.pi)
shape = m.shape shape = m.shape
m,v,Y = m.flatten(), v.flatten(), Y.flatten() m,v,Y = m.flatten(), v.flatten(), Y.flatten()
Ysign = np.where(Y==1,1,-1) Ysign = np.where(Y==1,1,-1)

View file

@ -48,6 +48,7 @@ class Gaussian(Likelihood):
def betaY(self,Y,Y_metadata=None): def betaY(self,Y,Y_metadata=None):
#TODO: ~Ricardo this does not live here #TODO: ~Ricardo this does not live here
raise RuntimeError, "Please notify the GPy developers, this should not happen"
return Y/self.gaussian_variance(Y_metadata) return Y/self.gaussian_variance(Y_metadata)
def gaussian_variance(self, Y_metadata=None): def gaussian_variance(self, Y_metadata=None):
@ -315,9 +316,44 @@ class Gaussian(Likelihood):
return -0.5*np.log(2*np.pi) -0.5*np.log(v) - 0.5*np.square(y_test - mu_star)/v return -0.5*np.log(2*np.pi) -0.5*np.log(v) - 0.5*np.square(y_test - mu_star)/v
def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None): def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):
if not isinstance(self.gp_link, link_functions.Identity):
return super(Gaussian, self).variational_expectations(Y=Y, m=m, v=v, gh_points=gh_points, Y_metadata=Y_metadata)
lik_var = float(self.variance) lik_var = float(self.variance)
F = -0.5*np.log(2*np.pi) -0.5*np.log(lik_var) - 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/lik_var F = -0.5*np.log(2*np.pi) -0.5*np.log(lik_var) - 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/lik_var
dF_dmu = (Y - m)/lik_var dF_dmu = (Y - m)/lik_var
dF_dv = np.ones_like(v)*(-0.5/lik_var) dF_dv = np.ones_like(v)*(-0.5/lik_var)
dF_dtheta = -0.5/lik_var + 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/(lik_var**2) dF_dtheta = -0.5/lik_var + 0.5*(np.square(Y) + np.square(m) + v - 2*m*Y)/(lik_var**2)
return F, dF_dmu, dF_dv, dF_dtheta.reshape(1, Y.shape[0], Y.shape[1]) return F, dF_dmu, dF_dv, dF_dtheta.reshape(1, Y.shape[0], Y.shape[1])
class HeteroscedasticGaussian(Gaussian):
def __init__(self, Y_metadata, gp_link=None, variance=1., name='het_Gauss'):
if gp_link is None:
gp_link = link_functions.Identity()
if not isinstance(gp_link, link_functions.Identity):
print("Warning, Exact inference is not implemeted for non-identity link functions,\
if you are not already, ensure Laplace inference_method is used")
super(HeteroscedasticGaussian, self).__init__(gp_link, np.ones(Y_metadata['output_index'].shape)*variance, name)
def exact_inference_gradients(self, dL_dKdiag,Y_metadata=None):
return dL_dKdiag[Y_metadata['output_index']]
def gaussian_variance(self, Y_metadata=None):
return self.variance[Y_metadata['output_index'].flatten()]
def predictive_values(self, mu, var, full_cov=False, Y_metadata=None):
_s = self.variance[Y_metadata['output_index'].flatten()]
if full_cov:
if var.ndim == 2:
var += np.eye(var.shape[0])*_s
if var.ndim == 3:
var += np.atleast_3d(np.eye(var.shape[0])*_s)
else:
var += _s
return mu, var
def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None):
_s = self.variance[Y_metadata['output_index'].flatten()]
return [stats.norm.ppf(q/100.)*np.sqrt(var + _s) + mu for q in quantiles]

View file

@ -9,6 +9,7 @@ from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_miniba
import logging import logging
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
from GPy.core.parameterization.param import Param from GPy.core.parameterization.param import Param
from GPy.core.parameterization.observable_array import ObsAr
class BayesianGPLVMMiniBatch(SparseGPMiniBatch): class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
""" """
@ -80,46 +81,10 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
"""Get the gradients of the posterior distribution of X in its specific form.""" """Get the gradients of the posterior distribution of X in its specific form."""
return X.mean.gradient, X.variance.gradient return X.mean.gradient, X.variance.gradient
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, **kw): def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None, **kw):
posterior, log_marginal_likelihood, grad_dict, current_values, value_indices = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm, subset_indices=subset_indices, **kw) posterior, log_marginal_likelihood, grad_dict = super(BayesianGPLVMMiniBatch, self)._inner_parameters_changed(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=dL_dKmm,
psi0=psi0, psi1=psi1, psi2=psi2, **kw)
if self.has_uncertain_inputs(): return posterior, log_marginal_likelihood, grad_dict
current_values['meangrad'], current_values['vargrad'] = self.kern.gradients_qX_expectations(
variational_posterior=X,
Z=Z, dL_dpsi0=grad_dict['dL_dpsi0'],
dL_dpsi1=grad_dict['dL_dpsi1'],
dL_dpsi2=grad_dict['dL_dpsi2'])
else:
current_values['Xgrad'] = self.kern.gradients_X(grad_dict['dL_dKnm'], X, Z)
current_values['Xgrad'] += self.kern.gradients_X_diag(grad_dict['dL_dKdiag'], X)
if subset_indices is not None:
value_indices['Xgrad'] = subset_indices['samples']
kl_fctr = self.kl_factr
if self.has_uncertain_inputs():
if self.missing_data:
d = self.output_dim
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)/d
else:
log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(X)
# Subsetting Variational Posterior objects, makes the gradients
# empty. We need them to be 0 though:
X.mean.gradient[:] = 0
X.variance.gradient[:] = 0
self.variational_prior.update_gradients_KL(X)
if self.missing_data:
current_values['meangrad'] += kl_fctr*X.mean.gradient/d
current_values['vargrad'] += kl_fctr*X.variance.gradient/d
else:
current_values['meangrad'] += kl_fctr*X.mean.gradient
current_values['vargrad'] += kl_fctr*X.variance.gradient
if subset_indices is not None:
value_indices['meangrad'] = subset_indices['samples']
value_indices['vargrad'] = subset_indices['samples']
return posterior, log_marginal_likelihood, grad_dict, current_values, value_indices
def _outer_values_update(self, full_values): def _outer_values_update(self, full_values):
""" """
@ -128,22 +93,47 @@ class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
""" """
super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values) super(BayesianGPLVMMiniBatch, self)._outer_values_update(full_values)
if self.has_uncertain_inputs(): if self.has_uncertain_inputs():
self.X.mean.gradient = full_values['meangrad'] meangrad_tmp, vargrad_tmp = self.kern.gradients_qX_expectations(
self.X.variance.gradient = full_values['vargrad'] variational_posterior=self.X,
Z=self.Z, dL_dpsi0=full_values['dL_dpsi0'],
dL_dpsi1=full_values['dL_dpsi1'],
dL_dpsi2=full_values['dL_dpsi2'],
psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
self.X.mean.gradient = meangrad_tmp
self.X.variance.gradient = vargrad_tmp
else: else:
self.X.gradient = full_values['Xgrad'] self.X.gradient = self.kern.gradients_X(full_values['dL_dKnm'], self.X, self.Z)
self.X.gradient += self.kern.gradients_X_diag(full_values['dL_dKdiag'], self.X)
def _outer_init_full_values(self): def _outer_init_full_values(self):
if self.has_uncertain_inputs(): return super(BayesianGPLVMMiniBatch, self)._outer_init_full_values()
return dict(meangrad=np.zeros(self.X.mean.shape),
vargrad=np.zeros(self.X.variance.shape))
else:
return dict(Xgrad=np.zeros(self.X.shape))
def parameters_changed(self): def parameters_changed(self):
super(BayesianGPLVMMiniBatch,self).parameters_changed() super(BayesianGPLVMMiniBatch,self).parameters_changed()
if isinstance(self.inference_method, VarDTC_minibatch):
return kl_fctr = self.kl_factr
if kl_fctr > 0:
Xgrad = self.X.gradient.copy()
self.X.gradient[:] = 0
self.variational_prior.update_gradients_KL(self.X)
if self.missing_data or not self.stochastics:
self.X.mean.gradient = kl_fctr*self.X.mean.gradient
self.X.variance.gradient = kl_fctr*self.X.variance.gradient
else:
d = self.output_dim
self.X.mean.gradient = kl_fctr*self.X.mean.gradient*self.stochastics.batchsize/d
self.X.variance.gradient = kl_fctr*self.X.variance.gradient*self.stochastics.batchsize/d
self.X.gradient += Xgrad
if self.missing_data or not self.stochastics:
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
elif self.stochastics:
d = self.output_dim
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)*self.stochastics.batchsize/d
self._Xgrad = self.X.gradient.copy()
def plot_latent(self, labels=None, which_indices=None, def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40, resolution=50, ax=None, marker='o', s=40,

View file

@ -16,6 +16,8 @@ class GPHeteroscedasticRegression(GP):
:param X: input observations :param X: input observations
:param Y: observed values :param Y: observed values
:param kernel: a GPy kernel, defaults to rbf :param kernel: a GPy kernel, defaults to rbf
NB: This model does not make inference on the noise outside the training set
""" """
def __init__(self, X, Y, kernel=None, Y_metadata=None): def __init__(self, X, Y, kernel=None, Y_metadata=None):
@ -30,10 +32,7 @@ class GPHeteroscedasticRegression(GP):
kernel = kern.RBF(X.shape[1]) kernel = kern.RBF(X.shape[1])
#Likelihood #Likelihood
#likelihoods_list = [likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for j in range(Ny)] likelihood = likelihoods.HeteroscedasticGaussian(Y_metadata)
noise_terms = np.unique(Y_metadata['output_index'].flatten())
likelihoods_list = [likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for j in noise_terms]
likelihood = likelihoods.MixedNoise(likelihoods_list=likelihoods_list)
super(GPHeteroscedasticRegression, self).__init__(X,Y,kernel,likelihood, Y_metadata=Y_metadata) super(GPHeteroscedasticRegression, self).__init__(X,Y,kernel,likelihood, Y_metadata=Y_metadata)

View file

@ -2,19 +2,16 @@
# Distributed under the terms of the GNU General public License, see LICENSE.txt # Distributed under the terms of the GNU General public License, see LICENSE.txt
import numpy as np import numpy as np
from scipy import stats from ..core import GP
from scipy.special import erf
from ..core.model import Model
from ..core.parameterization import ObsAr from ..core.parameterization import ObsAr
from .. import kern from .. import kern
from ..core.parameterization.param import Param from ..core.parameterization.param import Param
from ..util.linalg import pdinv from ..inference.latent_function_inference import VarGauss
from ..likelihoods import Gaussian
log_2_pi = np.log(2*np.pi) log_2_pi = np.log(2*np.pi)
class GPVariationalGaussianApproximation(Model): class GPVariationalGaussianApproximation(GP):
""" """
The Variational Gaussian Approximation revisited The Variational Gaussian Approximation revisited
@ -26,75 +23,14 @@ class GPVariationalGaussianApproximation(Model):
pages = {786--792}, pages = {786--792},
} }
""" """
def __init__(self, X, Y, kernel, likelihood=None, Y_metadata=None): def __init__(self, X, Y, kernel, likelihood, Y_metadata=None):
Model.__init__(self,'Variational GP')
if likelihood is None:
likelihood = Gaussian()
# accept the construction arguments
self.X = ObsAr(X)
self.Y = Y
self.num_data, self.input_dim = self.X.shape
self.Y_metadata = Y_metadata
self.kern = kernel num_data = Y.shape[0]
self.likelihood = likelihood self.alpha = Param('alpha', np.zeros((num_data,1))) # only one latent fn for now.
self.link_parameter(self.kern) self.beta = Param('beta', np.ones(num_data))
self.link_parameter(self.likelihood)
inf = VarGauss(self.alpha, self.beta)
super(GPVariationalGaussianApproximation, self).__init__(X, Y, kernel, likelihood, name='VarGP', inference_method=inf)
self.alpha = Param('alpha', np.zeros((self.num_data,1))) # only one latent fn for now.
self.beta = Param('beta', np.ones(self.num_data))
self.link_parameter(self.alpha) self.link_parameter(self.alpha)
self.link_parameter(self.beta) self.link_parameter(self.beta)
def log_likelihood(self):
return self._log_lik
def parameters_changed(self):
K = self.kern.K(self.X)
m = K.dot(self.alpha)
KB = K*self.beta[:, None]
BKB = KB*self.beta[None, :]
A = np.eye(self.num_data) + BKB
Ai, LA, _, Alogdet = pdinv(A)
Sigma = np.diag(self.beta**-2) - Ai/self.beta[:, None]/self.beta[None, :] # posterior coavairance: need full matrix for gradients
var = np.diag(Sigma).reshape(-1,1)
F, dF_dm, dF_dv, dF_dthetaL = self.likelihood.variational_expectations(self.Y, m, var, Y_metadata=self.Y_metadata)
self.likelihood.gradient = dF_dthetaL.sum(1).sum(1)
dF_da = np.dot(K, dF_dm)
SigmaB = Sigma*self.beta
dF_db = -np.diag(Sigma.dot(np.diag(dF_dv.flatten())).dot(SigmaB))*2
KL = 0.5*(Alogdet + np.trace(Ai) - self.num_data + np.sum(m*self.alpha))
dKL_da = m
A_A2 = Ai - Ai.dot(Ai)
dKL_db = np.diag(np.dot(KB.T, A_A2))
self._log_lik = F.sum() - KL
self.alpha.gradient = dF_da - dKL_da
self.beta.gradient = dF_db - dKL_db
# K-gradients
dKL_dK = 0.5*(self.alpha*self.alpha.T + self.beta[:, None]*self.beta[None, :]*A_A2)
tmp = Ai*self.beta[:, None]/self.beta[None, :]
dF_dK = self.alpha*dF_dm.T + np.dot(tmp*dF_dv, tmp.T)
self.kern.update_gradients_full(dF_dK - dKL_dK, self.X)
def _raw_predict(self, Xnew, full_cov=False):
"""
Predict the function(s) at the new point(s) Xnew.
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray, Nnew x self.input_dim
"""
Wi, _, _, _ = pdinv(self.kern.K(self.X) + np.diag(self.beta**-2))
Kux = self.kern.K(self.X, Xnew)
mu = np.dot(Kux.T, self.alpha)
WiKux = np.dot(Wi, Kux)
if full_cov:
Kxx = self.kern.K(Xnew)
var = Kxx - np.dot(Kux.T, WiKux)
else:
Kxx = self.kern.Kdiag(Xnew)
var = Kxx - np.sum(WiKux*Kux, 0)
var = var.reshape(-1,1)
return mu, var

View file

@ -43,19 +43,19 @@ class GPLVM(GP):
super(GPLVM, self).parameters_changed() super(GPLVM, self).parameters_changed()
self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None) self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None)
def jacobian(self,X): #def jacobian(self,X):
J = np.zeros((X.shape[0],X.shape[1],self.output_dim)) # J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
for i in range(self.output_dim): # for i in range(self.output_dim):
J[:,:,i] = self.kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1], X, self.X) # J[:,:,i] = self.kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1], X, self.X)
return J # return J
def magnification(self,X): #def magnification(self,X):
target=np.zeros(X.shape[0]) # target=np.zeros(X.shape[0])
#J = np.zeros((X.shape[0],X.shape[1],self.output_dim)) # #J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
J = self.jacobian(X) ## J = self.jacobian(X)
for i in range(X.shape[0]): # for i in range(X.shape[0]):
target[i]=np.sqrt(np.linalg.det(np.dot(J[i,:,:],np.transpose(J[i,:,:])))) # target[i]=np.sqrt(np.linalg.det(np.dot(J[i,:,:],np.transpose(J[i,:,:]))))
return target # return target
def plot(self): def plot(self):
assert self.Y.shape[1] == 2, "too high dimensional to plot. Try plot_latent" assert self.Y.shape[1] == 2, "too high dimensional to plot. Try plot_latent"
@ -81,6 +81,3 @@ class GPLVM(GP):
resolution, ax, marker, s, resolution, ax, marker, s,
fignum, False, legend, fignum, False, legend,
plot_limits, aspect, updates, **kwargs) plot_limits, aspect, updates, **kwargs)
def plot_magnification(self, *args, **kwargs):
return util.plot_latent.plot_magnification(self, *args, **kwargs)

View file

@ -170,20 +170,19 @@ class MRD(BayesianGPLVMMiniBatch):
self._log_marginal_likelihood += b._log_marginal_likelihood self._log_marginal_likelihood += b._log_marginal_likelihood
self.logger.info('working on im <{}>'.format(hex(id(i)))) self.logger.info('working on im <{}>'.format(hex(id(i))))
self.Z.gradient[:] += b.full_values['Zgrad'] self.Z.gradient[:] += b.Z.gradient#full_values['Zgrad']
grad_dict = b.full_values #grad_dict = b.full_values
if self.has_uncertain_inputs(): if self.has_uncertain_inputs():
self.X.mean.gradient += grad_dict['meangrad'] self.X.gradient += b._Xgrad
self.X.variance.gradient += grad_dict['vargrad']
else: else:
self.X.gradient += grad_dict['Xgrad'] self.X.gradient += b._Xgrad
if self.has_uncertain_inputs(): #if self.has_uncertain_inputs():
# update for the KL divergence # # update for the KL divergence
self.variational_prior.update_gradients_KL(self.X) # self.variational_prior.update_gradients_KL(self.X)
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X) # self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.X)
pass # pass
def log_likelihood(self): def log_likelihood(self):
return self._log_marginal_likelihood return self._log_marginal_likelihood

View file

@ -63,10 +63,10 @@ class SparseGPMiniBatch(SparseGP):
if stochastic and missing_data: if stochastic and missing_data:
self.missing_data = True self.missing_data = True
self.stochastics = SparseGPStochastics(self, batchsize) self.stochastics = SparseGPStochastics(self, batchsize, self.missing_data)
elif stochastic and not missing_data: elif stochastic and not missing_data:
self.missing_data = False self.missing_data = False
self.stochastics = SparseGPStochastics(self, batchsize) self.stochastics = SparseGPStochastics(self, batchsize, self.missing_data)
elif missing_data: elif missing_data:
self.missing_data = True self.missing_data = True
self.stochastics = SparseGPMissing(self) self.stochastics = SparseGPMissing(self)
@ -76,11 +76,12 @@ class SparseGPMiniBatch(SparseGP):
logger.info("Adding Z as parameter") logger.info("Adding Z as parameter")
self.link_parameter(self.Z, index=0) self.link_parameter(self.Z, index=0)
self.posterior = None self.posterior = None
self._predictive_variable = self.Z
def has_uncertain_inputs(self): def has_uncertain_inputs(self):
return isinstance(self.X, VariationalPosterior) return isinstance(self.X, VariationalPosterior)
def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, subset_indices=None, **kwargs): def _inner_parameters_changed(self, kern, X, Z, likelihood, Y, Y_metadata, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None, **kwargs):
""" """
This is the standard part, which usually belongs in parameters_changed. This is the standard part, which usually belongs in parameters_changed.
@ -99,47 +100,13 @@ class SparseGPMiniBatch(SparseGP):
like them into this dictionary for inner use of the indices inside the like them into this dictionary for inner use of the indices inside the
algorithm. algorithm.
""" """
try: if psi2 is None:
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm, dL_dKmm=None, **kwargs) psi2_sum_n = None
except:
posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata)
current_values = {}
likelihood.update_gradients(grad_dict['dL_dthetaL'])
current_values['likgrad'] = likelihood.gradient.copy()
if subset_indices is None:
subset_indices = {}
if isinstance(X, VariationalPosterior):
#gradients wrt kernel
dL_dKmm = grad_dict['dL_dKmm']
kern.update_gradients_full(dL_dKmm, Z, None)
current_values['kerngrad'] = kern.gradient.copy()
kern.update_gradients_expectations(variational_posterior=X,
Z=Z,
dL_dpsi0=grad_dict['dL_dpsi0'],
dL_dpsi1=grad_dict['dL_dpsi1'],
dL_dpsi2=grad_dict['dL_dpsi2'])
current_values['kerngrad'] += kern.gradient
#gradients wrt Z
current_values['Zgrad'] = kern.gradients_X(dL_dKmm, Z)
current_values['Zgrad'] += kern.gradients_Z_expectations(
grad_dict['dL_dpsi0'],
grad_dict['dL_dpsi1'],
grad_dict['dL_dpsi2'],
Z=Z,
variational_posterior=X)
else: else:
#gradients wrt kernel psi2_sum_n = psi2.sum(axis=0)
kern.update_gradients_diag(grad_dict['dL_dKdiag'], X) posterior, log_marginal_likelihood, grad_dict = self.inference_method.inference(kern, X, Z, likelihood, Y, Y_metadata, Lm=Lm,
current_values['kerngrad'] = kern.gradient.copy() dL_dKmm=dL_dKmm, psi0=psi0, psi1=psi1, psi2=psi2_sum_n, **kwargs)
kern.update_gradients_full(grad_dict['dL_dKnm'], X, Z) return posterior, log_marginal_likelihood, grad_dict
current_values['kerngrad'] += kern.gradient
kern.update_gradients_full(grad_dict['dL_dKmm'], Z, None)
current_values['kerngrad'] += kern.gradient
#gradients wrt Z
current_values['Zgrad'] = kern.gradients_X(grad_dict['dL_dKmm'], Z)
current_values['Zgrad'] += kern.gradients_X(grad_dict['dL_dKnm'].T, Z, X)
return posterior, log_marginal_likelihood, grad_dict, current_values, subset_indices
def _inner_take_over_or_update(self, full_values=None, current_values=None, value_indices=None): def _inner_take_over_or_update(self, full_values=None, current_values=None, value_indices=None):
""" """
@ -173,7 +140,10 @@ class SparseGPMiniBatch(SparseGP):
else: else:
index = slice(None) index = slice(None)
if key in full_values: if key in full_values:
full_values[key][index] += current_values[key] try:
full_values[key][index] += current_values[key]
except:
full_values[key] += current_values[key]
else: else:
full_values[key] = current_values[key] full_values[key] = current_values[key]
@ -192,9 +162,41 @@ class SparseGPMiniBatch(SparseGP):
Here you put the values, which were collected before in the right places. Here you put the values, which were collected before in the right places.
E.g. set the gradients of parameters, etc. E.g. set the gradients of parameters, etc.
""" """
self.likelihood.gradient = full_values['likgrad'] if self.has_uncertain_inputs():
self.kern.gradient = full_values['kerngrad'] #gradients wrt kernel
self.Z.gradient = full_values['Zgrad'] dL_dKmm = full_values['dL_dKmm']
self.kern.update_gradients_full(dL_dKmm, self.Z, None)
kgrad = self.kern.gradient.copy()
self.kern.update_gradients_expectations(
variational_posterior=self.X,
Z=self.Z, dL_dpsi0=full_values['dL_dpsi0'],
dL_dpsi1=full_values['dL_dpsi1'],
dL_dpsi2=full_values['dL_dpsi2'])
self.kern.gradient += kgrad
#gradients wrt Z
self.Z.gradient = self.kern.gradients_X(dL_dKmm, self.Z)
self.Z.gradient += self.kern.gradients_Z_expectations(
variational_posterior=self.X,
Z=self.Z, dL_dpsi0=full_values['dL_dpsi0'],
dL_dpsi1=full_values['dL_dpsi1'],
dL_dpsi2=full_values['dL_dpsi2'])
else:
#gradients wrt kernel
self.kern.update_gradients_diag(full_values['dL_dKdiag'], self.X)
kgrad = self.kern.gradient.copy()
self.kern.update_gradients_full(full_values['dL_dKnm'], self.X, self.Z)
kgrad += self.kern.gradient
self.kern.update_gradients_full(full_values['dL_dKmm'], self.Z, None)
self.kern.gradient += kgrad
#kgrad += self.kern.gradient
#gradients wrt Z
self.Z.gradient = self.kern.gradients_X(full_values['dL_dKmm'], self.Z)
self.Z.gradient += self.kern.gradients_X(full_values['dL_dKnm'].T, self.Z, self.X)
self.likelihood.update_gradients(full_values['dL_dthetaL'])
def _outer_init_full_values(self): def _outer_init_full_values(self):
""" """
@ -209,7 +211,15 @@ class SparseGPMiniBatch(SparseGP):
to initialize the gradients for the mean and the variance in order to to initialize the gradients for the mean and the variance in order to
have the full gradient for indexing) have the full gradient for indexing)
""" """
return {} retd = dict(dL_dKmm=np.zeros((self.Z.shape[0], self.Z.shape[0])))
if self.has_uncertain_inputs():
retd.update(dict(dL_dpsi0=np.zeros(self.X.shape[0]),
dL_dpsi1=np.zeros((self.X.shape[0], self.Z.shape[0])),
dL_dpsi2=np.zeros((self.X.shape[0], self.Z.shape[0], self.Z.shape[0]))))
else:
retd.update({'dL_dKdiag': np.zeros(self.X.shape[0]),
'dL_dKnm': np.zeros((self.X.shape[0], self.Z.shape[0]))})
return retd
def _outer_loop_for_missing_data(self): def _outer_loop_for_missing_data(self):
Lm = None Lm = None
@ -231,28 +241,36 @@ class SparseGPMiniBatch(SparseGP):
print(message, end=' ') print(message, end=' ')
for d, ninan in self.stochastics.d: for d, ninan in self.stochastics.d:
if not self.stochastics: if not self.stochastics:
print(' '*(len(message)) + '\r', end=' ') print(' '*(len(message)) + '\r', end=' ')
message = m_f(d) message = m_f(d)
print(message, end=' ') print(message, end=' ')
posterior, log_marginal_likelihood, \ psi0ni = self.psi0[ninan]
grad_dict, current_values, value_indices = self._inner_parameters_changed( psi1ni = self.psi1[ninan]
if self.has_uncertain_inputs():
psi2ni = self.psi2[ninan]
value_indices = dict(outputs=d, samples=ninan, dL_dpsi0=ninan, dL_dpsi1=ninan, dL_dpsi2=ninan)
else:
psi2ni = None
value_indices = dict(outputs=d, samples=ninan, dL_dKdiag=ninan, dL_dKnm=ninan)
posterior, log_marginal_likelihood, grad_dict = self._inner_parameters_changed(
self.kern, self.X[ninan], self.kern, self.X[ninan],
self.Z, self.likelihood, self.Z, self.likelihood,
self.Y_normalized[ninan][:, d], self.Y_metadata, self.Y_normalized[ninan][:, d], self.Y_metadata,
Lm, dL_dKmm, Lm, dL_dKmm,
subset_indices=dict(outputs=d, samples=ninan)) psi0=psi0ni, psi1=psi1ni, psi2=psi2ni)
self._inner_take_over_or_update(self.full_values, current_values, value_indices) # Fill out the full values by adding in the apporpriate grad_dict
self._inner_values_update(current_values) # values
self._inner_take_over_or_update(self.full_values, grad_dict, value_indices)
self._inner_values_update(grad_dict) # What is this for? -> MRD
Lm = posterior.K_chol
dL_dKmm = grad_dict['dL_dKmm']
woodbury_inv[:, :, d] = posterior.woodbury_inv[:,:,None] woodbury_inv[:, :, d] = posterior.woodbury_inv[:,:,None]
woodbury_vector[:, d] = posterior.woodbury_vector woodbury_vector[:, d] = posterior.woodbury_vector
self._log_marginal_likelihood += log_marginal_likelihood self._log_marginal_likelihood += log_marginal_likelihood
if not self.stochastics: if not self.stochastics:
print('') print('')
@ -260,10 +278,10 @@ class SparseGPMiniBatch(SparseGP):
self.posterior = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, self.posterior = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector,
K=posterior._K, mean=None, cov=None, K_chol=posterior.K_chol) K=posterior._K, mean=None, cov=None, K_chol=posterior.K_chol)
self._outer_values_update(self.full_values) self._outer_values_update(self.full_values)
if self.has_uncertain_inputs():
self.kern.return_psi2_n = False
def _outer_loop_without_missing_data(self): def _outer_loop_without_missing_data(self):
self._log_marginal_likelihood = 0
if self.posterior is None: if self.posterior is None:
woodbury_inv = np.zeros((self.num_inducing, self.num_inducing, self.output_dim)) woodbury_inv = np.zeros((self.num_inducing, self.num_inducing, self.output_dim))
woodbury_vector = np.zeros((self.num_inducing, self.output_dim)) woodbury_vector = np.zeros((self.num_inducing, self.output_dim))
@ -271,17 +289,16 @@ class SparseGPMiniBatch(SparseGP):
woodbury_inv = self.posterior._woodbury_inv woodbury_inv = self.posterior._woodbury_inv
woodbury_vector = self.posterior._woodbury_vector woodbury_vector = self.posterior._woodbury_vector
d = self.stochastics.d d = self.stochastics.d[0][0]
posterior, log_marginal_likelihood, \ posterior, log_marginal_likelihood, grad_dict= self._inner_parameters_changed(
grad_dict, self.full_values, _ = self._inner_parameters_changed(
self.kern, self.X, self.kern, self.X,
self.Z, self.likelihood, self.Z, self.likelihood,
self.Y_normalized[:, d], self.Y_metadata) self.Y_normalized[:, d], self.Y_metadata)
self.grad_dict = grad_dict self.grad_dict = grad_dict
self._log_marginal_likelihood += log_marginal_likelihood self._log_marginal_likelihood = log_marginal_likelihood
self._outer_values_update(self.full_values) self._outer_values_update(self.grad_dict)
woodbury_inv[:, :, d] = posterior.woodbury_inv[:, :, None] woodbury_inv[:, :, d] = posterior.woodbury_inv[:, :, None]
woodbury_vector[:, d] = posterior.woodbury_vector woodbury_vector[:, d] = posterior.woodbury_vector
@ -290,10 +307,23 @@ class SparseGPMiniBatch(SparseGP):
K=posterior._K, mean=None, cov=None, K_chol=posterior.K_chol) K=posterior._K, mean=None, cov=None, K_chol=posterior.K_chol)
def parameters_changed(self): def parameters_changed(self):
#Compute the psi statistics for N once, but don't sum out N in psi2
if self.has_uncertain_inputs():
#psi0 = ObsAr(self.kern.psi0(self.Z, self.X))
#psi1 = ObsAr(self.kern.psi1(self.Z, self.X))
#psi2 = ObsAr(self.kern.psi2(self.Z, self.X))
self.psi0 = self.kern.psi0(self.Z, self.X)
self.psi1 = self.kern.psi1(self.Z, self.X)
self.psi2 = self.kern.psi2n(self.Z, self.X)
else:
self.psi0 = self.kern.Kdiag(self.X)
self.psi1 = self.kern.K(self.X, self.Z)
self.psi2 = None
if self.missing_data: if self.missing_data:
self._outer_loop_for_missing_data() self._outer_loop_for_missing_data()
elif self.stochastics: elif self.stochastics:
self._outer_loop_without_missing_data() self._outer_loop_without_missing_data()
else: else:
self.posterior, self._log_marginal_likelihood, self.grad_dict, self.full_values, _ = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata) self.posterior, self._log_marginal_likelihood, self.grad_dict = self._inner_parameters_changed(self.kern, self.X, self.Z, self.likelihood, self.Y_normalized, self.Y_metadata)
self._outer_values_update(self.full_values) self._outer_values_update(self.grad_dict)

View file

@ -47,6 +47,32 @@ def gpplot(x, mu, lower, upper, edgecol='#3300FF', fillcol='#33CCFF', ax=None, f
return plots return plots
def gperrors(x, mu, lower, upper, edgecol=None, ax=None, fignum=None, **kwargs):
_, axes = ax_default(fignum, ax)
mu = mu.flatten()
x = x.flatten()
lower = lower.flatten()
upper = upper.flatten()
plots = []
if edgecol is None:
edgecol='#3300FF'
if not 'alpha' in kwargs.keys():
kwargs['alpha'] = 1.
if not 'lw' in kwargs.keys():
kwargs['lw'] = 1.
plots.append(axes.errorbar(x,mu,yerr=np.vstack([mu-lower,upper-mu]),color=edgecol,**kwargs))
plots[-1][0].remove()
return plots
def removeRightTicks(ax=None): def removeRightTicks(ax=None):
ax = ax or pb.gca() ax = ax or pb.gca()
for i, line in enumerate(ax.get_yticklines()): for i, line in enumerate(ax.get_yticklines()):

View file

@ -9,7 +9,8 @@ import itertools
try: try:
import Tango import Tango
from matplotlib.cm import get_cmap from matplotlib.cm import get_cmap
import pylab as pb from matplotlib import pyplot as pb
from matplotlib import cm
except: except:
pass pass
@ -114,7 +115,7 @@ def plot_latent(model, labels=None, which_indices=None,
# create a function which computes the shading of latent space according to the output variance # create a function which computes the shading of latent space according to the output variance
def plot_function(x): def plot_function(x):
Xtest_full = np.zeros((x.shape[0], model.X.shape[1])) Xtest_full = np.zeros((x.shape[0], X.shape[1]))
Xtest_full[:, [input_1, input_2]] = x Xtest_full[:, [input_1, input_2]] = x
_, var = model.predict(Xtest_full, **predict_kwargs) _, var = model.predict(Xtest_full, **predict_kwargs)
var = var[:, :1] var = var[:, :1]
@ -137,7 +138,7 @@ def plot_latent(model, labels=None, which_indices=None,
view = ImshowController(ax, plot_function, view = ImshowController(ax, plot_function,
(xmin, ymin, xmax, ymax), (xmin, ymin, xmax, ymax),
resolution, aspect=aspect, interpolation='bilinear', resolution, aspect=aspect, interpolation='bilinear',
cmap=pb.cm.binary, **imshow_kwargs) cmap=cm.binary, **imshow_kwargs)
# make sure labels are in order of input: # make sure labels are in order of input:
labels = np.asarray(labels) labels = np.asarray(labels)
@ -192,17 +193,18 @@ def plot_latent(model, labels=None, which_indices=None,
if updates: if updates:
try: try:
ax.figure.canvas.show() fig.canvas.show()
except Exception as e: except Exception as e:
print("Could not invoke show: {}".format(e)) print("Could not invoke show: {}".format(e))
raw_input('Enter to continue') #raw_input('Enter to continue')
view.deactivate() return view
return ax return ax
def plot_magnification(model, labels=None, which_indices=None, def plot_magnification(model, labels=None, which_indices=None,
resolution=60, ax=None, marker='o', s=40, resolution=60, ax=None, marker='o', s=40,
fignum=None, plot_inducing=False, legend=True, fignum=None, plot_inducing=False, legend=True,
aspect='auto', updates=False): plot_limits=None,
aspect='auto', updates=False, mean=True, covariance=True, kern=None):
""" """
:param labels: a np.array of size model.num_data containing labels for the points (can be number, strings, etc) :param labels: a np.array of size model.num_data containing labels for the points (can be number, strings, etc)
:param resolution: the resolution of the grid on which to evaluate the predictive variance :param resolution: the resolution of the grid on which to evaluate the predictive variance
@ -210,6 +212,8 @@ def plot_magnification(model, labels=None, which_indices=None,
if ax is None: if ax is None:
fig = pb.figure(num=fignum) fig = pb.figure(num=fignum)
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
else:
fig = ax.figure
Tango.reset() Tango.reset()
if labels is None: if labels is None:
@ -217,19 +221,90 @@ def plot_magnification(model, labels=None, which_indices=None,
input_1, input_2 = most_significant_input_dimensions(model, which_indices) input_1, input_2 = most_significant_input_dimensions(model, which_indices)
# first, plot the output variance as a function of the latent space #fethch the data points X that we'd like to plot
Xtest, xx, yy, xmin, xmax = x_frame2D(model.X[:, [input_1, input_2]], resolution=resolution) X = model.X
Xtest_full = np.zeros((Xtest.shape[0], model.X.shape[1])) if isinstance(X, VariationalPosterior):
X = X.mean
else:
X = X
if X.shape[0] > 1000:
print("Warning: subsampling X, as it has more samples then 1000. X.shape={!s}".format(X.shape))
subsample = np.random.choice(X.shape[0], size=1000, replace=False)
X = X[subsample]
labels = labels[subsample]
#=======================================================================
# <<<WORK IN PROGRESS>>>
# <<<DO NOT DELETE>>>
# plt.close('all')
# fig, ax = plt.subplots(1,1)
# from GPy.plotting.matplot_dep.dim_reduction_plots import most_significant_input_dimensions
# import matplotlib.patches as mpatches
# i1, i2 = most_significant_input_dimensions(m, None)
# xmin, xmax = 100, -100
# ymin, ymax = 100, -100
# legend_handles = []
#
# X = m.X.mean[:, [i1, i2]]
# X = m.X.variance[:, [i1, i2]]
#
# xmin = X[:,0].min(); xmax = X[:,0].max()
# ymin = X[:,1].min(); ymax = X[:,1].max()
# range_ = [[xmin, xmax], [ymin, ymax]]
# ul = np.unique(labels)
#
# for i, l in enumerate(ul):
# #cdict = dict(red =[(0., colors[i][0], colors[i][0]), (1., colors[i][0], colors[i][0])],
# # green=[(0., colors[i][0], colors[i][1]), (1., colors[i][1], colors[i][1])],
# # blue =[(0., colors[i][0], colors[i][2]), (1., colors[i][2], colors[i][2])],
# # alpha=[(0., 0., .0), (.5, .5, .5), (1., .5, .5)])
# #cmap = LinearSegmentedColormap('{}'.format(l), cdict)
# cmap = LinearSegmentedColormap.from_list('cmap_{}'.format(str(l)), [colors[i], colors[i]], 255)
# cmap._init()
# #alphas = .5*(1+scipy.special.erf(np.linspace(-2,2, cmap.N+3)))#np.log(np.linspace(np.exp(0), np.exp(1.), cmap.N+3))
# alphas = (scipy.special.erf(np.linspace(0,2.4, cmap.N+3)))#np.log(np.linspace(np.exp(0), np.exp(1.), cmap.N+3))
# cmap._lut[:, -1] = alphas
# print l
# x, y = X[labels==l].T
#
# heatmap, xedges, yedges = np.histogram2d(x, y, bins=300, range=range_)
# #heatmap, xedges, yedges = np.histogram2d(x, y, bins=100)
#
# im = ax.imshow(heatmap, extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], cmap=cmap, aspect='auto', interpolation='nearest', label=str(l))
# legend_handles.append(mpatches.Patch(color=colors[i], label=l))
# ax.set_xlim(xmin, xmax)
# ax.set_ylim(ymin, ymax)
# plt.legend(legend_handles, [l.get_label() for l in legend_handles])
# plt.draw()
# plt.show()
#=======================================================================
#Create an IMshow controller that can re-plot the latent space shading at a good resolution
if plot_limits is None:
xmin, ymin = X[:, [input_1, input_2]].min(0)
xmax, ymax = X[:, [input_1, input_2]].max(0)
x_r, y_r = xmax-xmin, ymax-ymin
xmin -= .1*x_r
xmax += .1*x_r
ymin -= .1*y_r
ymax += .1*y_r
else:
try:
xmin, xmax, ymin, ymax = plot_limits
except (TypeError, ValueError) as e:
raise e.__class__("Wrong plot limits: {} given -> need (xmin, xmax, ymin, ymax)".format(plot_limits))
def plot_function(x): def plot_function(x):
Xtest_full = np.zeros((x.shape[0], X.shape[1]))
Xtest_full[:, [input_1, input_2]] = x Xtest_full[:, [input_1, input_2]] = x
mf=model.magnification(Xtest_full) mf = model.predict_magnification(Xtest_full, kern=kern, mean=mean, covariance=covariance)
return mf return mf
view = ImshowController(ax, plot_function, view = ImshowController(ax, plot_function,
tuple(model.X.min(0)[:, [input_1, input_2]]) + tuple(model.X.max(0)[:, [input_1, input_2]]), (xmin, ymin, xmax, ymax),
resolution, aspect=aspect, interpolation='bilinear', resolution, aspect=aspect, interpolation='bilinear',
cmap=pb.cm.gray) cmap=cm.gray)
# make sure labels are in order of input: # make sure labels are in order of input:
ulabels = [] ulabels = []
@ -245,17 +320,17 @@ def plot_magnification(model, labels=None, which_indices=None,
elif type(ul) is np.int64: elif type(ul) is np.int64:
this_label = 'class %i' % ul this_label = 'class %i' % ul
else: else:
this_label = 'class %i' % i this_label = unicode(ul)
m = marker.next() m = marker.next()
index = np.nonzero(labels == ul)[0] index = np.nonzero(labels == ul)[0]
if model.input_dim == 1: if model.input_dim == 1:
x = model.X[index, input_1] x = X[index, input_1]
y = np.zeros(index.size) y = np.zeros(index.size)
else: else:
x = model.X[index, input_1] x = X[index, input_1]
y = model.X[index, input_2] y = X[index, input_2]
ax.scatter(x, y, marker=m, s=s, color=Tango.nextMedium(), label=this_label) ax.scatter(x, y, marker=m, s=s, c=Tango.nextMedium(), label=this_label, linewidth=.2, edgecolor='k', alpha=.9)
ax.set_xlabel('latent dimension %i' % input_1) ax.set_xlabel('latent dimension %i' % input_1)
ax.set_ylabel('latent dimension %i' % input_2) ax.set_ylabel('latent dimension %i' % input_2)
@ -263,19 +338,29 @@ def plot_magnification(model, labels=None, which_indices=None,
if not np.all(labels == 1.) and legend: if not np.all(labels == 1.) and legend:
ax.legend(loc=0, numpoints=1) ax.legend(loc=0, numpoints=1)
ax.set_xlim(xmin[0], xmax[0]) ax.set_xlim((xmin, xmax))
ax.set_ylim(xmin[1], xmax[1]) ax.set_ylim((ymin, ymax))
ax.grid(b=False) # remove the grid if present, it doesn't look good
ax.set_aspect('auto') # set a nice aspect ratio
if plot_inducing: if plot_inducing and hasattr(model, 'Z'):
ax.plot(model.Z[:, input_1], model.Z[:, input_2], '^w') Z = model.Z
ax.scatter(Z[:, input_1], Z[:, input_2], c='w', s=18, marker="^", edgecolor='k', linewidth=.3, alpha=.7)
try:
fig.canvas.draw()
fig.tight_layout()
fig.canvas.draw()
except Exception as e:
print("Could not invoke tight layout: {}".format(e))
pass
if updates: if updates:
fig.canvas.show() try:
raw_input('Enter to continue') fig.canvas.draw()
fig.canvas.show()
pb.title('Magnification Factor') except Exception as e:
print("Could not invoke show: {}".format(e))
#raw_input('Enter to continue')
return view
return ax return ax
@ -314,8 +399,8 @@ def plot_steepest_gradient_map(model, fignum=None, ax=None, which_indices=None,
this_label = 'class %i' % i this_label = 'class %i' % i
m = marker.next() m = marker.next()
index = np.nonzero(data_labels == ul)[0] index = np.nonzero(data_labels == ul)[0]
x = model.X[index, input_1] x = X[index, input_1]
y = model.X[index, input_2] y = X[index, input_2]
ax.scatter(x, y, marker=m, s=data_s, color=Tango.nextMedium(), label=this_label) ax.scatter(x, y, marker=m, s=data_s, color=Tango.nextMedium(), label=this_label)
ax.set_xlabel('latent dimension %i' % input_1) ax.set_xlabel('latent dimension %i' % input_1)
@ -323,7 +408,7 @@ def plot_steepest_gradient_map(model, fignum=None, ax=None, which_indices=None,
controller = ImAnnotateController(ax, controller = ImAnnotateController(ax,
plot_function, plot_function,
tuple(model.X.min(0)[:, significant_dims]) + tuple(model.X.max(0)[:, significant_dims]), tuple(X.min(0)[:, significant_dims]) + tuple(X.max(0)[:, significant_dims]),
resolution=resolution, resolution=resolution,
aspect=aspect, aspect=aspect,
cmap=get_cmap('jet'), cmap=get_cmap('jet'),

View file

@ -9,6 +9,9 @@ class AxisEventController(object):
def __init__(self, ax): def __init__(self, ax):
self.ax = ax self.ax = ax
self.activate() self.activate()
def __del__(self):
self.deactivate()
return self
def deactivate(self): def deactivate(self):
for cb_class in self.ax.callbacks.callbacks.values(): for cb_class in self.ax.callbacks.callbacks.values():
for cb_num in cb_class.keys(): for cb_num in cb_class.keys():

View file

@ -3,19 +3,79 @@
import numpy as np import numpy as np
from . import Tango from . import Tango
from base_plots import gpplot, x_frame1D, x_frame2D from base_plots import gpplot, x_frame1D, x_frame2D,gperrors
from ...models.gp_coregionalized_regression import GPCoregionalizedRegression from ...models.gp_coregionalized_regression import GPCoregionalizedRegression
from ...models.sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression from ...models.sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression
from scipy import sparse from scipy import sparse
from ...core.parameterization.variational import VariationalPosterior from ...core.parameterization.variational import VariationalPosterior
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
def plot_data(model, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
fignum=None, ax=None, data_symbol='kx',mew=1.5):
"""
Plot the training data
- For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed.
Can plot only part of the data
using which_data_rows and which_data_ycols.
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice model.X, model.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_rows: 'all' or a list of integers
:param visible_dims: an array specifying the input dimensions to plot (maximum two)
:type visible_dims: a numpy array
:param fignum: figure to plot on.
:type fignum: figure number
:param ax: axes to plot on.
:type ax: axes handle
"""
#deal with optional arguments
if which_data_rows == 'all':
which_data_rows = slice(None)
if which_data_ycols == 'all':
which_data_ycols = np.arange(model.output_dim)
if ax is None:
fig = plt.figure(num=fignum)
ax = fig.add_subplot(111)
#data
X = model.X
Y = model.Y
#work out what the inputs are for plotting (1D or 2D)
if visible_dims is None:
visible_dims = np.arange(model.input_dim)
assert visible_dims.size <= 2, "Visible inputs cannot be larger than two"
free_dims = visible_dims
plots = {}
#one dimensional plotting
if len(free_dims) == 1:
for d in which_data_ycols:
plots['dataplot'] = ax.plot(X[which_data_rows,free_dims], Y[which_data_rows, d], data_symbol, mew=mew)
#2D plotting
elif len(free_dims) == 2:
for d in which_data_ycols:
plots['dataplot'] = ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40,
Y[which_data_rows, d], cmap=plt.cm.jet, vmin=Y.min(), vmax=Y.max(), linewidth=0.)
else:
raise NotImplementedError("Cannot define a frame with more than two input dimensions")
return plots
def plot_fit(model, plot_limits=None, which_data_rows='all', def plot_fit(model, plot_limits=None, which_data_rows='all',
which_data_ycols='all', fixed_inputs=[], which_data_ycols='all', fixed_inputs=[],
levels=20, samples=0, fignum=None, ax=None, resolution=None, levels=20, samples=0, fignum=None, ax=None, resolution=None,
plot_raw=False, plot_raw=False,
linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None, data_symbol='kx', linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None, data_symbol='kx',
apply_link=False, samples_f=0, plot_uncertain_inputs=True, predict_kw=None): apply_link=False, samples_f=0, plot_uncertain_inputs=True, predict_kw=None, plot_training_data=True):
""" """
Plot the posterior of the GP. Plot the posterior of the GP.
- In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In one dimension, the function is plotted with a shaded region identifying two standard deviations.
@ -43,7 +103,6 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
:type fignum: figure number :type fignum: figure number
:param ax: axes to plot on. :param ax: axes to plot on.
:type ax: axes handle :type ax: axes handle
:type output: integer (first output is 0)
:param linecol: color of line to plot. :param linecol: color of line to plot.
:type linecol: :type linecol:
:param fillcol: color of fill :param fillcol: color of fill
@ -52,6 +111,8 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
:type apply_link: boolean :type apply_link: boolean
:param samples_f: the number of posteriori f samples to plot p(f*|y) :param samples_f: the number of posteriori f samples to plot p(f*|y)
:type samples_f: int :type samples_f: int
:param plot_training_data: whether or not to plot the training points
:type plot_training_data: boolean
""" """
#deal with optional arguments #deal with optional arguments
if which_data_rows == 'all': if which_data_rows == 'all':
@ -110,12 +171,17 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
else: else:
Y_metadata['output_index'] = extra_data Y_metadata['output_index'] = extra_data
m, v = model.predict(Xgrid, full_cov=False, Y_metadata=Y_metadata, **predict_kw) m, v = model.predict(Xgrid, full_cov=False, Y_metadata=Y_metadata, **predict_kw)
lower, upper = model.predict_quantiles(Xgrid, Y_metadata=Y_metadata) fmu, fv = model._raw_predict(Xgrid, full_cov=False, **predict_kw)
lower, upper = model.likelihood.predictive_quantiles(fmu, fv, (2.5, 97.5), Y_metadata=Y_metadata)
for d in which_data_ycols: for d in which_data_ycols:
plots['gpplot'] = gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], ax=ax, edgecol=linecol, fillcol=fillcol) plots['gpplot'] = gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], ax=ax, edgecol=linecol, fillcol=fillcol)
if not plot_raw: plots['dataplot'] = ax.plot(X[which_data_rows,free_dims], Y[which_data_rows, d], data_symbol, mew=1.5) #if not plot_raw: plots['dataplot'] = ax.plot(X[which_data_rows,free_dims], Y[which_data_rows, d], data_symbol, mew=1.5)
if not plot_raw and plot_training_data:
plots['dataplot'] = plot_data(model=model, which_data_rows=which_data_rows,
visible_dims=free_dims, data_symbol=data_symbol, mew=1.5, ax=ax, fignum=fignum)
#optionally plot some samples #optionally plot some samples
if samples: #NOTE not tested with fixed_inputs if samples: #NOTE not tested with fixed_inputs
@ -195,7 +261,9 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
for d in which_data_ycols: for d in which_data_ycols:
m_d = m[:,d].reshape(resolution, resolution).T m_d = m[:,d].reshape(resolution, resolution).T
plots['contour'] = ax.contour(x, y, m_d, levels, vmin=m.min(), vmax=m.max(), cmap=plt.cm.jet) plots['contour'] = ax.contour(x, y, m_d, levels, vmin=m.min(), vmax=m.max(), cmap=plt.cm.jet)
if not plot_raw: plots['dataplot'] = ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40, Y[which_data_rows, d], cmap=plt.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) #if not plot_raw: plots['dataplot'] = ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40, Y[which_data_rows, d], cmap=plt.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.)
if not plot_raw and plot_training_data:
plots['dataplot'] = ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40, Y[which_data_rows, d], cmap=plt.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.)
#set the limits of the plot to some sensible values #set the limits of the plot to some sensible values
ax.set_xlim(xmin[0], xmax[0]) ax.set_xlim(xmin[0], xmax[0])
@ -260,3 +328,82 @@ def fixed_inputs(model, non_fixed_inputs, fix_routine='median', as_list=True, X_
return f_inputs return f_inputs
else: else:
return X return X
def errorbars_trainset(model, which_data_rows='all',
which_data_ycols='all', fixed_inputs=[],
fignum=None, ax=None,
linecol='red', data_symbol='kx',
predict_kw=None, plot_training_data=True, **kwargs):
"""
Plot the posterior error bars corresponding to the training data
- For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed.
Can plot only part of the data
using which_data_rows and which_data_ycols.
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice model.X, model.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
:type which_data_rows: 'all' or a list of integers
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v.
:type fixed_inputs: a list of tuples
:param fignum: figure to plot on.
:type fignum: figure number
:param ax: axes to plot on.
:type ax: axes handle
:param plot_training_data: whether or not to plot the training points
:type plot_training_data: boolean
"""
#deal with optional arguments
if which_data_rows == 'all':
which_data_rows = slice(None)
if which_data_ycols == 'all':
which_data_ycols = np.arange(model.output_dim)
if ax is None:
fig = plt.figure(num=fignum)
ax = fig.add_subplot(111)
X = model.X
Y = model.Y
if predict_kw is None:
predict_kw = {}
#work out what the inputs are for plotting (1D or 2D)
fixed_dims = np.array([i for i,v in fixed_inputs])
free_dims = np.setdiff1d(np.arange(model.input_dim),fixed_dims)
plots = {}
#one dimensional plotting
if len(free_dims) == 1:
m, v = model.predict(X, full_cov=False, Y_metadata=model.Y_metadata, **predict_kw)
fmu, fv = model._raw_predict(X, full_cov=False, **predict_kw)
lower, upper = model.likelihood.predictive_quantiles(fmu, fv, (2.5, 97.5), Y_metadata=model.Y_metadata)
for d in which_data_ycols:
plots['gperrors'] = gperrors(X, m[:, d], lower[:, d], upper[:, d], edgecol=linecol, ax=ax, fignum=fignum, **kwargs )
if plot_training_data:
plots['dataplot'] = plot_data(model=model, which_data_rows=which_data_rows,
visible_dims=free_dims, data_symbol=data_symbol, mew=1.5, ax=ax, fignum=fignum)
#set the limits of the plot to some sensible values
ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper))
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
ax.set_xlim(X[:,free_dims].min(), X[:,free_dims].max())
ax.set_ylim(ymin, ymax)
elif len(free_dims) == 2:
raise NotImplementedError("Not implemented yet")
else:
raise NotImplementedError("Cannot define a frame with more than two input dimensions")
return plots

View file

@ -0,0 +1,109 @@
'''
Created on 4 Sep 2015
@author: maxz
'''
import unittest
import numpy as np
import GPy
class BGPLVMTest(unittest.TestCase):
def setUp(self):
np.random.seed(12345)
X, W = np.random.normal(0,1,(100,6)), np.random.normal(0,1,(6,13))
Y = X.dot(W) + np.random.normal(0, .1, (X.shape[0], W.shape[1]))
self.inan = np.random.binomial(1, .1, Y.shape).astype(bool)
self.X, self.W, self.Y = X,W,Y
self.Q = 3
self.m_full = GPy.models.BayesianGPLVM(Y, self.Q)
def test_lik_comparisons_m1_s0(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=False)
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_predict_missing_data(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
self.assertRaises(NotImplementedError, m.predict, m.X, full_cov=True)
mu1, var1 = m.predict(m.X, full_cov=False)
mu2, var2 = self.m_full.predict(self.m_full.X, full_cov=False)
np.testing.assert_allclose(mu1, mu2)
np.testing.assert_allclose(var1, var2)
mu1, var1 = m.predict(m.X.mean, full_cov=True)
mu2, var2 = self.m_full.predict(self.m_full.X.mean, full_cov=True)
np.testing.assert_allclose(mu1, mu2)
np.testing.assert_allclose(var1[:,:,0], var2)
mu1, var1 = m.predict(m.X.mean, full_cov=False)
mu2, var2 = self.m_full.predict(self.m_full.X.mean, full_cov=False)
np.testing.assert_allclose(mu1, mu2)
np.testing.assert_allclose(var1[:,[0]], var2)
def test_lik_comparisons_m0_s0(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=False, stochastic=False)
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_lik_comparisons_m1_s1(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_lik_comparisons_m0_s1(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=False, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_gradients_missingdata(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=False, batchsize=self.Y.shape[1])
assert(m.checkgrad())
def test_gradients_missingdata_stochastics(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=1)
assert(m.checkgrad())
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=4)
assert(m.checkgrad())
def test_gradients_stochastics(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=False, stochastic=True, batchsize=1)
assert(m.checkgrad())
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=False, stochastic=True, batchsize=4)
assert(m.checkgrad())
def test_predict(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()

View file

@ -2,11 +2,20 @@ import numpy as np
import scipy as sp import scipy as sp
from GPy.util import choleskies from GPy.util import choleskies
import GPy import GPy
from ..util.config import config
import unittest
try:
from . import linalg_cython
config.set('cython', 'working', 'True')
except ImportError:
config.set('cython', 'working', 'False')
""" """
These tests make sure that the opure python and cython codes work the same These tests make sure that the opure python and cython codes work the same
""" """
@unittest.skipIf(not config.getboolean('cython', 'working'),"Cython modules have not been built on this machine")
class CythonTestChols(np.testing.TestCase): class CythonTestChols(np.testing.TestCase):
def setUp(self): def setUp(self):
self.flat = np.random.randn(45,5) self.flat = np.random.randn(45,5)
@ -20,6 +29,7 @@ class CythonTestChols(np.testing.TestCase):
A2 = choleskies._triang_to_flat_cython(self.triang) A2 = choleskies._triang_to_flat_cython(self.triang)
np.testing.assert_allclose(A1, A2) np.testing.assert_allclose(A1, A2)
@unittest.skipIf(not config.getboolean('cython', 'working'),"Cython modules have not been built on this machine")
class test_stationary(np.testing.TestCase): class test_stationary(np.testing.TestCase):
def setUp(self): def setUp(self):
self.k = GPy.kern.RBF(10) self.k = GPy.kern.RBF(10)
@ -49,6 +59,7 @@ class test_stationary(np.testing.TestCase):
g2 = self.k._lengthscale_grads_cython(self.dKxz, self.X, self.Z) g2 = self.k._lengthscale_grads_cython(self.dKxz, self.X, self.Z)
np.testing.assert_allclose(g1, g2) np.testing.assert_allclose(g1, g2)
@unittest.skipIf(not config.getboolean('cython', 'working'),"Cython modules have not been built on this machine")
class test_choleskies_backprop(np.testing.TestCase): class test_choleskies_backprop(np.testing.TestCase):
def setUp(self): def setUp(self):
a =np.random.randn(10,12) a =np.random.randn(10,12)
@ -61,10 +72,3 @@ class test_choleskies_backprop(np.testing.TestCase):
r3 = GPy.util.choleskies.choleskies_cython.backprop_gradient_par_c(self.dL, self.L) r3 = GPy.util.choleskies.choleskies_cython.backprop_gradient_par_c(self.dL, self.L)
np.testing.assert_allclose(r1, r2) np.testing.assert_allclose(r1, r2)
np.testing.assert_allclose(r1, r3) np.testing.assert_allclose(r1, r3)

View file

@ -8,7 +8,7 @@ The test cases for various inference algorithms
import unittest, itertools import unittest, itertools
import numpy as np import numpy as np
import GPy import GPy
#np.seterr(invalid='raise')
class InferenceXTestCase(unittest.TestCase): class InferenceXTestCase(unittest.TestCase):

View file

@ -6,9 +6,16 @@ import numpy as np
import GPy import GPy
import sys import sys
from GPy.core.parameterization.param import Param from GPy.core.parameterization.param import Param
from ..util.config import config
verbose = 0 verbose = 0
try:
from . import linalg_cython
config.set('cython', 'working', 'True')
except ImportError:
config.set('cython', 'working', 'False')
class Kern_check_model(GPy.core.Model): class Kern_check_model(GPy.core.Model):
""" """
@ -371,6 +378,7 @@ class KernelTestsNonContinuous(unittest.TestCase):
X2 = self.X2[self.X2[:,-1]!=2] X2 = self.X2[self.X2[:,-1]!=2]
self.assertTrue(check_kernel_gradient_functions(kern, X=X, X2=X2, verbose=verbose, fixed_X_dims=-1)) self.assertTrue(check_kernel_gradient_functions(kern, X=X, X2=X2, verbose=verbose, fixed_X_dims=-1))
@unittest.skipIf(not config.getboolean('cython', 'working'),"Cython modules have not been built on this machine")
class Coregionalize_cython_test(unittest.TestCase): class Coregionalize_cython_test(unittest.TestCase):
""" """
Make sure that the coregionalize kernel work with and without cython enabled Make sure that the coregionalize kernel work with and without cython enabled
@ -437,6 +445,104 @@ class KernelTestsProductWithZeroValues(unittest.TestCase):
self.assertFalse(np.any(np.isnan(target)), self.assertFalse(np.any(np.isnan(target)),
"Gradient resulted in NaN") "Gradient resulted in NaN")
class Kernel_Psi_statistics_GradientTests(unittest.TestCase):
def setUp(self):
from GPy.core.parameterization.variational import NormalPosterior
N,M,Q = 100,20,3
X = np.random.randn(N,Q)
X_var = np.random.rand(N,Q)+0.01
self.Z = np.random.randn(M,Q)
self.qX = NormalPosterior(X, X_var)
self.w1 = np.random.randn(N)
self.w2 = np.random.randn(N,M)
self.w3 = np.random.randn(M,M)
self.w3 = self.w3+self.w3.T
self.w3n = np.random.randn(N,M,M)
self.w3n = self.w3n+np.swapaxes(self.w3n, 1,2)
def test_kernels(self):
from GPy.kern import RBF,Linear
Q = self.Z.shape[1]
kernels = [RBF(Q,ARD=True), Linear(Q,ARD=True)]
for k in kernels:
k.randomize()
self._test_kernel_param(k)
self._test_Z(k)
self._test_qX(k)
self._test_kernel_param(k, psi2n=True)
self._test_Z(k, psi2n=True)
self._test_qX(k, psi2n=True)
def _test_kernel_param(self, kernel, psi2n=False):
def f(p):
kernel.param_array[:] = p
psi0 = kernel.psi0(self.Z, self.qX)
psi1 = kernel.psi1(self.Z, self.qX)
if not psi2n:
psi2 = kernel.psi2(self.Z, self.qX)
return (self.w1*psi0).sum() + (self.w2*psi1).sum() + (self.w3*psi2).sum()
else:
psi2 = kernel.psi2n(self.Z, self.qX)
return (self.w1*psi0).sum() + (self.w2*psi1).sum() + (self.w3n*psi2).sum()
def df(p):
kernel.param_array[:] = p
kernel.update_gradients_expectations(self.w1, self.w2, self.w3 if not psi2n else self.w3n, self.Z, self.qX)
return kernel.gradient.copy()
from GPy.models import GradientChecker
m = GradientChecker(f, df, kernel.param_array.copy())
self.assertTrue(m.checkgrad())
def _test_Z(self, kernel, psi2n=False):
def f(p):
psi0 = kernel.psi0(p, self.qX)
psi1 = kernel.psi1(p, self.qX)
psi2 = kernel.psi2(p, self.qX)
if not psi2n:
psi2 = kernel.psi2(p, self.qX)
return (self.w1*psi0).sum() + (self.w2*psi1).sum() + (self.w3*psi2).sum()
else:
psi2 = kernel.psi2n(p, self.qX)
return (self.w1*psi0).sum() + (self.w2*psi1).sum() + (self.w3n*psi2).sum()
def df(p):
return kernel.gradients_Z_expectations(self.w1, self.w2, self.w3 if not psi2n else self.w3n, p, self.qX)
from GPy.models import GradientChecker
m = GradientChecker(f, df, self.Z.copy())
self.assertTrue(m.checkgrad())
def _test_qX(self, kernel, psi2n=False):
def f(p):
self.qX.param_array[:] = p
self.qX._trigger_params_changed()
psi0 = kernel.psi0(self.Z, self.qX)
psi1 = kernel.psi1(self.Z, self.qX)
if not psi2n:
psi2 = kernel.psi2(self.Z, self.qX)
return (self.w1*psi0).sum() + (self.w2*psi1).sum() + (self.w3*psi2).sum()
else:
psi2 = kernel.psi2n(self.Z, self.qX)
return (self.w1*psi0).sum() + (self.w2*psi1).sum() + (self.w3n*psi2).sum()
def df(p):
self.qX.param_array[:] = p
self.qX._trigger_params_changed()
grad = kernel.gradients_qX_expectations(self.w1, self.w2, self.w3 if not psi2n else self.w3n, self.Z, self.qX)
self.qX.set_gradients(grad)
return self.qX.gradient.copy()
from GPy.models import GradientChecker
m = GradientChecker(f, df, self.qX.param_array.copy())
self.assertTrue(m.checkgrad())
if __name__ == "__main__": if __name__ == "__main__":
print("Running unit tests, please be (very) patient...") print("Running unit tests, please be (very) patient...")

View file

@ -9,8 +9,7 @@ import inspect
from GPy.likelihoods import link_functions from GPy.likelihoods import link_functions
from GPy.core.parameterization import Param from GPy.core.parameterization import Param
from functools import partial from functools import partial
#np.random.seed(300) fixed_seed = 7
#np.random.seed(4)
#np.seterr(divide='raise') #np.seterr(divide='raise')
def dparam_partial(inst_func, *args): def dparam_partial(inst_func, *args):
@ -105,6 +104,7 @@ class TestNoiseModels(object):
Generic model checker Generic model checker
""" """
def setUp(self): def setUp(self):
np.random.seed(fixed_seed)
self.N = 15 self.N = 15
self.D = 3 self.D = 3
self.X = np.random.rand(self.N, self.D)*10 self.X = np.random.rand(self.N, self.D)*10
@ -218,7 +218,8 @@ class TestNoiseModels(object):
"constraints": [(".*variance", self.constrain_positive)] "constraints": [(".*variance", self.constrain_positive)]
}, },
"laplace": True, "laplace": True,
"ep": False # FIXME: Should be True when we have it working again "ep": False, # FIXME: Should be True when we have it working again
"variational_expectations": True,
}, },
"Gaussian_log": { "Gaussian_log": {
"model": GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var), "model": GPy.likelihoods.Gaussian(gp_link=link_functions.Log(), variance=self.var),
@ -227,7 +228,8 @@ class TestNoiseModels(object):
"vals": [self.var], "vals": [self.var],
"constraints": [(".*variance", self.constrain_positive)] "constraints": [(".*variance", self.constrain_positive)]
}, },
"laplace": True "laplace": True,
"variational_expectations": True
}, },
#"Gaussian_probit": { #"Gaussian_probit": {
#"model": GPy.likelihoods.gaussian(gp_link=link_functions.Probit(), variance=self.var, D=self.D, N=self.N), #"model": GPy.likelihoods.gaussian(gp_link=link_functions.Probit(), variance=self.var, D=self.D, N=self.N),
@ -252,7 +254,8 @@ class TestNoiseModels(object):
"link_f_constraints": [partial(self.constrain_bounded, lower=0, upper=1)], "link_f_constraints": [partial(self.constrain_bounded, lower=0, upper=1)],
"laplace": True, "laplace": True,
"Y": self.binary_Y, "Y": self.binary_Y,
"ep": False # FIXME: Should be True when we have it working again "ep": False, # FIXME: Should be True when we have it working again
"variational_expectations": True
}, },
"Exponential_default": { "Exponential_default": {
"model": GPy.likelihoods.Exponential(), "model": GPy.likelihoods.Exponential(),
@ -347,6 +350,10 @@ class TestNoiseModels(object):
ep = attributes["ep"] ep = attributes["ep"]
else: else:
ep = False ep = False
if "variational_expectations" in attributes:
var_exp = attributes["variational_expectations"]
else:
var_exp = False
#if len(param_vals) > 1: #if len(param_vals) > 1:
#raise NotImplementedError("Cannot support multiple params in likelihood yet!") #raise NotImplementedError("Cannot support multiple params in likelihood yet!")
@ -377,6 +384,11 @@ class TestNoiseModels(object):
if ep: if ep:
#ep likelihood gradcheck #ep likelihood gradcheck
yield self.t_ep_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints yield self.t_ep_fit_rbf_white, model, self.X, Y, f, Y_metadata, self.step, param_vals, param_names, param_constraints
if var_exp:
#Need to specify mu and var!
yield self.t_varexp, model, Y, Y_metadata
yield self.t_dexp_dmu, model, Y, Y_metadata
yield self.t_dexp_dvar, model, Y, Y_metadata
self.tearDown() self.tearDown()
@ -603,6 +615,87 @@ class TestNoiseModels(object):
print(m) print(m)
assert m.checkgrad(verbose=1, step=step) assert m.checkgrad(verbose=1, step=step)
################
# variational expectations #
################
@with_setup(setUp, tearDown)
def t_varexp(self, model, Y, Y_metadata):
#Test that the analytic implementation (if it exists) matches the generic gauss
#hermite implementation
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = model.variational_expectations(Y=Y, m=mu, v=var, gh_points=None, Y_metadata=Y_metadata)[0]
#Implementation of gauss hermite integration
shape = mu.shape
gh_x, gh_w= np.polynomial.hermite.hermgauss(50)
m,v,Y = mu.flatten(), var.flatten(), Y.flatten()
#make a grid of points
X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + m[:,None]
#evaluate the likelhood for the grid. First ax indexes the data (and mu, var) and the second indexes the grid.
# broadcast needs to be handled carefully.
logp = model.logpdf(X, Y[:,None], Y_metadata=Y_metadata)
#average over the gird to get derivatives of the Gaussian's parameters
#division by pi comes from fact that for each quadrature we need to scale by 1/sqrt(pi)
expectation_gh = np.dot(logp, gh_w)/np.sqrt(np.pi)
expectation_gh = expectation_gh.reshape(*shape)
np.testing.assert_almost_equal(expectation, expectation_gh, decimal=5)
@with_setup(setUp, tearDown)
def t_dexp_dmu(self, model, Y, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = functools.partial(model.variational_expectations, Y=Y, v=var, gh_points=None, Y_metadata=Y_metadata)
#Function to get the nth returned value
def F(mu):
return expectation(m=mu)[0]
def dmu(mu):
return expectation(m=mu)[1]
grad = GradientChecker(F, dmu, mu.copy(), 'm')
grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
@with_setup(setUp, tearDown)
def t_dexp_dvar(self, model, Y, Y_metadata):
print("\n{}".format(inspect.stack()[0][3]))
#Make mu and var (marginal means and variances of q(f)) draws from a GP
k = GPy.kern.RBF(1).K(np.linspace(0,1,Y.shape[0])[:, None])
L = GPy.util.linalg.jitchol(k)
mu = L.dot(np.random.randn(*Y.shape))
#Variance must be positive
var = np.abs(L.dot(np.random.randn(*Y.shape))) + 0.01
expectation = functools.partial(model.variational_expectations, Y=Y, m=mu, gh_points=None, Y_metadata=Y_metadata)
#Function to get the nth returned value
def F(var):
return expectation(v=var)[0]
def dvar(var):
return expectation(v=var)[2]
grad = GradientChecker(F, dvar, var.copy(), 'v')
self.constrain_positive('v', grad)
#grad.randomize()
print(grad)
print(model)
assert grad.checkgrad(verbose=1)
class LaplaceTests(unittest.TestCase): class LaplaceTests(unittest.TestCase):
""" """
@ -610,6 +703,7 @@ class LaplaceTests(unittest.TestCase):
""" """
def setUp(self): def setUp(self):
np.random.seed(fixed_seed)
self.N = 15 self.N = 15
self.D = 1 self.D = 1
self.X = np.random.rand(self.N, self.D)*10 self.X = np.random.rand(self.N, self.D)*10

View file

@ -7,7 +7,6 @@ _lim_val_exp = np.log(_lim_val)
_lim_val_square = np.sqrt(_lim_val) _lim_val_square = np.sqrt(_lim_val)
_lim_val_cube = cbrt(_lim_val) _lim_val_cube = cbrt(_lim_val)
from GPy.likelihoods.link_functions import Identity, Probit, Cloglog, Log, Log_ex_1, Reciprocal, Heaviside from GPy.likelihoods.link_functions import Identity, Probit, Cloglog, Log, Log_ex_1, Reciprocal, Heaviside
#np.seterr(over='raise')
class LinkFunctionTests(np.testing.TestCase): class LinkFunctionTests(np.testing.TestCase):
def setUp(self): def setUp(self):
@ -80,8 +79,7 @@ class LinkFunctionTests(np.testing.TestCase):
assert np.isinf(np.exp(np.log(self.f_upper_lim))) assert np.isinf(np.exp(np.log(self.f_upper_lim)))
#Check the clipping works #Check the clipping works
np.testing.assert_almost_equal(link.transf(self.f_lower_lim), 0, decimal=5) np.testing.assert_almost_equal(link.transf(self.f_lower_lim), 0, decimal=5)
#Need to look at most significant figures here rather than the decimals self.assertTrue(np.isfinite(link.transf(self.f_upper_lim)))
np.testing.assert_approx_equal(link.transf(self.f_upper_lim), _lim_val, significant=5)
self.check_overflow(link, lim_of_inf) self.check_overflow(link, lim_of_inf)
#Check that it would otherwise fail #Check that it would otherwise fail

View file

@ -13,10 +13,13 @@ class MiscTests(np.testing.TestCase):
def test_safe_exp_upper(self): def test_safe_exp_upper(self):
with warnings.catch_warnings(record=True) as w: with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always') # always print
assert np.isfinite(np.exp(self._lim_val_exp)) assert np.isfinite(np.exp(self._lim_val_exp))
assert np.isinf(np.exp(self._lim_val_exp + 1)) assert np.isinf(np.exp(self._lim_val_exp + 1))
assert np.isfinite(GPy.util.misc.safe_exp(self._lim_val_exp + 1)) assert np.isfinite(GPy.util.misc.safe_exp(self._lim_val_exp + 1))
print w
print len(w)
assert len(w)==1 # should have one overflow warning assert len(w)==1 # should have one overflow warning
def test_safe_exp_lower(self): def test_safe_exp_lower(self):

View file

@ -36,6 +36,26 @@ class MiscTests(unittest.TestCase):
np.testing.assert_almost_equal(np.diag(K_hat)[:, None], var) np.testing.assert_almost_equal(np.diag(K_hat)[:, None], var)
np.testing.assert_almost_equal(mu_hat, mu) np.testing.assert_almost_equal(mu_hat, mu)
def test_normalizer(self):
k = GPy.kern.RBF(1)
Y = self.Y
mu, std = Y.mean(0), Y.std(0)
m = GPy.models.GPRegression(self.X, Y, kernel=k, normalizer=True)
m.optimize()
assert(m.checkgrad())
k = GPy.kern.RBF(1)
m2 = GPy.models.GPRegression(self.X, (Y-mu)/std, kernel=k, normalizer=False)
m2[:] = m[:]
mu1, var1 = m.predict(m.X, full_cov=True)
mu2, var2 = m2.predict(m2.X, full_cov=True)
np.testing.assert_allclose(mu1, (mu2*std)+mu)
np.testing.assert_allclose(var1, var2)
mu1, var1 = m.predict(m.X, full_cov=False)
mu2, var2 = m2.predict(m2.X, full_cov=False)
np.testing.assert_allclose(mu1, (mu2*std)+mu)
np.testing.assert_allclose(var1, var2)
def test_sparse_raw_predict(self): def test_sparse_raw_predict(self):
k = GPy.kern.RBF(1) k = GPy.kern.RBF(1)
m = GPy.models.SparseGPRegression(self.X, self.Y, kernel=k) m = GPy.models.SparseGPRegression(self.X, self.Y, kernel=k)
@ -352,8 +372,8 @@ class GradientTests(np.testing.TestCase):
self.check_model(rbf, model_type='SparseGPRegression', dimension=2) self.check_model(rbf, model_type='SparseGPRegression', dimension=2)
def test_SparseGPRegression_rbf_linear_white_kern_1D(self): def test_SparseGPRegression_rbf_linear_white_kern_1D(self):
''' Testing the sparse GP regression with rbf kernel on 2d data ''' ''' Testing the sparse GP regression with rbf kernel on 1d data '''
rbflin = GPy.kern.RBF(1) + GPy.kern.Linear(1) rbflin = GPy.kern.RBF(1) + GPy.kern.Linear(1) + GPy.kern.White(1, 1e-5)
self.check_model(rbflin, model_type='SparseGPRegression', dimension=1) self.check_model(rbflin, model_type='SparseGPRegression', dimension=1)
def test_SparseGPRegression_rbf_linear_white_kern_2D(self): def test_SparseGPRegression_rbf_linear_white_kern_2D(self):
@ -472,6 +492,7 @@ class GradientTests(np.testing.TestCase):
self.assertTrue(m.checkgrad()) self.assertTrue(m.checkgrad())
def test_gp_kronecker_gaussian(self): def test_gp_kronecker_gaussian(self):
np.random.seed(0)
N1, N2 = 30, 20 N1, N2 = 30, 20
X1 = np.random.randn(N1, 1) X1 = np.random.randn(N1, 1)
X2 = np.random.randn(N2, 1) X2 = np.random.randn(N2, 1)
@ -492,16 +513,16 @@ class GradientTests(np.testing.TestCase):
m.randomize() m.randomize()
mm[:] = m[:] mm[:] = m[:]
assert np.allclose(m.log_likelihood(), mm.log_likelihood()) self.assertTrue(np.allclose(m.log_likelihood(), mm.log_likelihood()))
assert np.allclose(m.gradient, mm.gradient) self.assertTrue(np.allclose(m.gradient, mm.gradient))
X1test = np.random.randn(100, 1) X1test = np.random.randn(100, 1)
X2test = np.random.randn(100, 1) X2test = np.random.randn(100, 1)
mean1, var1 = m.predict(X1test, X2test) mean1, var1 = m.predict(X1test, X2test)
yy, xx = np.meshgrid(X2test, X1test) yy, xx = np.meshgrid(X2test, X1test)
Xgrid = np.vstack((xx.flatten(order='F'), yy.flatten(order='F'))).T Xgrid = np.vstack((xx.flatten(order='F'), yy.flatten(order='F'))).T
mean2, var2 = mm.predict(Xgrid) mean2, var2 = mm.predict(Xgrid)
assert np.allclose(mean1, mean2) self.assertTrue( np.allclose(mean1, mean2) )
assert np.allclose(var1, var2) self.assertTrue( np.allclose(var1, var2) )
def test_gp_VGPC(self): def test_gp_VGPC(self):
num_obs = 25 num_obs = 25
@ -509,7 +530,8 @@ class GradientTests(np.testing.TestCase):
X = X[:, None] X = X[:, None]
Y = 25. + np.sin(X / 20.) * 2. + np.random.rand(num_obs)[:, None] Y = 25. + np.sin(X / 20.) * 2. + np.random.rand(num_obs)[:, None]
kern = GPy.kern.Bias(1) + GPy.kern.RBF(1) kern = GPy.kern.Bias(1) + GPy.kern.RBF(1)
m = GPy.models.GPVariationalGaussianApproximation(X, Y, kern) lik = GPy.likelihoods.Gaussian()
m = GPy.models.GPVariationalGaussianApproximation(X, Y, kernel=kern, likelihood=lik)
self.assertTrue(m.checkgrad()) self.assertTrue(m.checkgrad())

View file

@ -20,6 +20,8 @@ from GPy.examples.dimensionality_reduction import mrd_simulation
from GPy.core.parameterization.variational import NormalPosterior from GPy.core.parameterization.variational import NormalPosterior
from GPy.models.gp_regression import GPRegression from GPy.models.gp_regression import GPRegression
from functools import reduce from functools import reduce
from GPy.util.caching import Cacher
from pickle import PicklingError
def toy_model(): def toy_model():
X = np.linspace(0,1,50)[:, None] X = np.linspace(0,1,50)[:, None]
@ -205,23 +207,6 @@ class Test(ListDictTestCase):
def _callback(self, what, which): def _callback(self, what, which):
what.count += 1 what.count += 1
@unittest.skip
def test_add_observer(self):
par = toy_model()
par.name = "original"
par.count = 0
par.add_observer(self, self._callback, 1)
pcopy = GPRegression(par.X.copy(), par.Y.copy(), kernel=par.kern.copy())
self.assertNotIn(par.observers[0], pcopy.observers)
pcopy = par.copy()
pcopy.name = "copy"
self.assertTrue(par.checkgrad())
self.assertTrue(pcopy.checkgrad())
self.assertTrue(pcopy.kern.checkgrad())
import ipdb;ipdb.set_trace()
self.assertIn(par.observers[0], pcopy.observers)
self.assertEqual(par.count, 3)
self.assertEqual(pcopy.count, 6) # 3 of each call to checkgrad
if __name__ == "__main__": if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_parameter_index_operations'] #import sys;sys.argv = ['', 'Test.test_parameter_index_operations']

1
GPy/testing/run_coverage.sh Executable file
View file

@ -0,0 +1 @@
nosetests . --with-coverage --cover-html --cover-html-dir=coverage --cover-package=GPy --cover-erase

View file

@ -16,4 +16,5 @@ from . import diag
from . import initialization from . import initialization
from . import multioutput from . import multioutput
from . import linalg_gpu from . import linalg_gpu
from . import parallel

View file

@ -3,6 +3,7 @@
from ..core.parameterization.observable import Observable from ..core.parameterization.observable import Observable
import collections, weakref import collections, weakref
from functools import reduce from functools import reduce
from pickle import PickleError
class Cacher(object): class Cacher(object):
def __init__(self, operation, limit=5, ignore_args=(), force_kwargs=()): def __init__(self, operation, limit=5, ignore_args=(), force_kwargs=()):
@ -76,7 +77,7 @@ class Cacher(object):
self.order.append(cache_id) self.order.append(cache_id)
self.cached_inputs[cache_id] = inputs self.cached_inputs[cache_id] = inputs
for a in inputs: for a in inputs:
if a is not None: if a is not None and not isinstance(a, int):
ind_id = self.id(a) ind_id = self.id(a)
v = self.cached_input_ids.get(ind_id, [weakref.ref(a), []]) v = self.cached_input_ids.get(ind_id, [weakref.ref(a), []])
v[1].append(cache_id) v[1].append(cache_id)
@ -103,9 +104,9 @@ class Cacher(object):
inputs = self.combine_inputs(args, kw, self.ignore_args) inputs = self.combine_inputs(args, kw, self.ignore_args)
cache_id = self.prepare_cache_id(inputs) cache_id = self.prepare_cache_id(inputs)
# 2: if anything is not cachable, we will just return the operation, without caching # 2: if anything is not cachable, we will just return the operation, without caching
if reduce(lambda a, b: a or (not (isinstance(b, Observable) or b is None)), inputs, False): if reduce(lambda a, b: a or (not (isinstance(b, Observable) or b is None or isinstance(b,int))), inputs, False):
#print 'WARNING: '+self.operation.__name__ + ' not cacheable!' # print 'WARNING: '+self.operation.__name__ + ' not cacheable!'
#print [not (isinstance(b, Observable)) for b in inputs] # print [not (isinstance(b, Observable)) for b in inputs]
return self.operation(*args, **kw) return self.operation(*args, **kw)
# 3&4: check whether this cache_id has been cached, then has it changed? # 3&4: check whether this cache_id has been cached, then has it changed?
try: try:
@ -149,10 +150,10 @@ class Cacher(object):
return Cacher(self.operation, self.limit, self.ignore_args, self.force_kwargs) return Cacher(self.operation, self.limit, self.ignore_args, self.force_kwargs)
def __getstate__(self, memo=None): def __getstate__(self, memo=None):
raise NotImplementedError("Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation))) raise PickleError("Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation)))
def __setstate__(self, memo=None): def __setstate__(self, memo=None):
raise NotImplementedError("Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation))) raise PickleError("Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation)))
@property @property
def __name__(self): def __name__(self):

View file

@ -4,8 +4,11 @@
import numpy as np import numpy as np
from . import linalg from . import linalg
from .config import config from .config import config
try:
from . import choleskies_cython from . import choleskies_cython
config.set('cython', 'working', 'True')
except ImportError:
config.set('cython', 'working', 'False')
def safe_root(N): def safe_root(N):
i = np.sqrt(N) i = np.sqrt(N)

View file

@ -46,6 +46,8 @@ def offdiag_view(A, offset=0):
return as_strided(Af[(1+offset):], shape=(A.shape[0]-1, A.shape[1]), strides=(A.strides[0] + A.itemsize, A.strides[1])) return as_strided(Af[(1+offset):], shape=(A.shape[0]-1, A.shape[1]), strides=(A.strides[0] + A.itemsize, A.strides[1]))
def _diag_ufunc(A,b,offset,func): def _diag_ufunc(A,b,offset,func):
b = np.squeeze(b)
assert b.ndim <= 1, "only implemented for one dimensional arrays"
dA = view(A, offset); func(dA,b,dA) dA = view(A, offset); func(dA,b,dA)
return A return A

View file

@ -7,47 +7,16 @@
import numpy as np import numpy as np
from scipy import linalg from scipy import linalg
import types from scipy.linalg import lapack, blas
import ctypes
from ctypes import byref, c_char, c_int, c_double # TODO
import scipy
import warnings
import os
from .config import config from .config import config
import logging import logging
from . import linalg_cython
try:
from . import linalg_cython
config.set('cython', 'working', 'True')
except ImportError:
config.set('cython', 'working', 'False')
_scipyversion = np.float64((scipy.__version__).split('.')[:2])
_fix_dpotri_scipy_bug = True
if np.all(_scipyversion >= np.array([0, 14])):
from scipy.linalg import lapack
_fix_dpotri_scipy_bug = False
elif np.all(_scipyversion >= np.array([0, 12])):
#import scipy.linalg.lapack.clapack as lapack
from scipy.linalg import lapack
else:
from scipy.linalg.lapack import flapack as lapack
if config.getboolean('anaconda', 'installed') and config.getboolean('anaconda', 'MKL'):
try:
anaconda_path = str(config.get('anaconda', 'location'))
mkl_rt = ctypes.cdll.LoadLibrary(os.path.join(anaconda_path, 'DLLs', 'mkl_rt.dll'))
dsyrk = mkl_rt.dsyrk
dsyr = mkl_rt.dsyr
_blas_available = True
print('anaconda installed and mkl is loaded')
except:
_blas_available = False
else:
try:
_blaslib = ctypes.cdll.LoadLibrary(np.core._dotblas.__file__) # @UndefinedVariable
dsyrk = _blaslib.dsyrk_
dsyr = _blaslib.dsyr_
_blas_available = True
except AttributeError as e:
_blas_available = False
warnings.warn("warning: caught this exception:" + str(e))
def force_F_ordered_symmetric(A): def force_F_ordered_symmetric(A):
""" """
@ -169,9 +138,6 @@ def dpotri(A, lower=1):
:returns: A inverse :returns: A inverse
""" """
if _fix_dpotri_scipy_bug:
assert lower==1, "scipy linalg behaviour is very weird. please use lower, fortran ordered arrays"
lower = 0
A = force_F_ordered(A) A = force_F_ordered(A)
R, info = lapack.dpotri(A, lower=lower) #needs to be zero here, seems to be a scipy bug R, info = lapack.dpotri(A, lower=lower) #needs to be zero here, seems to be a scipy bug
@ -300,8 +266,8 @@ def pca(Y, input_dim):
Z = linalg.svd(Y - Y.mean(axis=0), full_matrices=False) Z = linalg.svd(Y - Y.mean(axis=0), full_matrices=False)
[X, W] = [Z[0][:, 0:input_dim], np.dot(np.diag(Z[1]), Z[2]).T[:, 0:input_dim]] [X, W] = [Z[0][:, 0:input_dim], np.dot(np.diag(Z[1]), Z[2]).T[:, 0:input_dim]]
v = X.std(axis=0) v = X.std(axis=0)
X /= v; X /= v
W *= v; W *= v
return X, W.T return X, W.T
def ppca(Y, Q, iterations=100): def ppca(Y, Q, iterations=100):
@ -347,34 +313,15 @@ def tdot_blas(mat, out=None):
out[:] = 0.0 out[:] = 0.0
# # Call to DSYRK from BLAS # # Call to DSYRK from BLAS
# If already in Fortran order (rare), and has the right sorts of strides I
# could avoid the copy. I also thought swapping to cblas API would allow use
# of C order. However, I tried that and had errors with large matrices:
# http://homepages.inf.ed.ac.uk/imurray2/code/tdot/tdot_broken.py
mat = np.asfortranarray(mat) mat = np.asfortranarray(mat)
TRANS = c_char('n'.encode('ascii')) out = blas.dsyrk(alpha=1.0, a=mat, beta=0.0, c=out, overwrite_c=1,
N = c_int(mat.shape[0]) trans=0, lower=0)
K = c_int(mat.shape[1])
LDA = c_int(mat.shape[0])
UPLO = c_char('l'.encode('ascii'))
ALPHA = c_double(1.0)
A = mat.ctypes.data_as(ctypes.c_void_p)
BETA = c_double(0.0)
C = out.ctypes.data_as(ctypes.c_void_p)
LDC = c_int(np.max(out.strides) // 8)
dsyrk(byref(UPLO), byref(TRANS), byref(N), byref(K),
byref(ALPHA), A, byref(LDA), byref(BETA), C, byref(LDC))
symmetrify(out, upper=True) symmetrify(out, upper=True)
return np.ascontiguousarray(out) return np.ascontiguousarray(out)
def tdot(*args, **kwargs): def tdot(*args, **kwargs):
if _blas_available: return tdot_blas(*args, **kwargs)
return tdot_blas(*args, **kwargs)
else:
return tdot_numpy(*args, **kwargs)
def DSYR_blas(A, x, alpha=1.): def DSYR_blas(A, x, alpha=1.):
""" """
@ -386,15 +333,7 @@ def DSYR_blas(A, x, alpha=1.):
:param alpha: scalar :param alpha: scalar
""" """
N = c_int(A.shape[0]) A = blas.dsyr(lower=0, x=x, a=A, alpha=alpha, overwrite_a=True)
LDA = c_int(A.shape[0])
UPLO = c_char('l'.encode('ascii'))
ALPHA = c_double(alpha)
A_ = A.ctypes.data_as(ctypes.c_void_p)
x_ = x.ctypes.data_as(ctypes.c_void_p)
INCX = c_int(1)
dsyr(byref(UPLO), byref(N), byref(ALPHA),
x_, byref(INCX), A_, byref(LDA))
symmetrify(A, upper=True) symmetrify(A, upper=True)
def DSYR_numpy(A, x, alpha=1.): def DSYR_numpy(A, x, alpha=1.):
@ -411,10 +350,8 @@ def DSYR_numpy(A, x, alpha=1.):
def DSYR(*args, **kwargs): def DSYR(*args, **kwargs):
if _blas_available: return DSYR_blas(*args, **kwargs)
return DSYR_blas(*args, **kwargs)
else:
return DSYR_numpy(*args, **kwargs)
def symmetrify(A, upper=False): def symmetrify(A, upper=False):
""" """

View file

@ -1,44 +0,0 @@
import numpy as np
import GPy
def load_housing_data():
X = np.loadtxt('housing.data')
X, Y = X[:,:-1], X[:,-1:]
#scale the X data
xmax, xmin = X.max(0), X.min(0)
X = (X-xmin)/(xmax-xmin)
#loy the response
Y = np.log(Y)
return X, Y
def fit_full_GP():
X, Y = load_housing_data()
k = GPy.kern.RBF(X.shape[1], ARD=True) + GPy.kern.Linear(X.shape[1])
m = GPy.models.GPRegression(X, Y, kernel=k)
m.optimize('bfgs', max_iters=400, gtol=0)
return m
def fit_svgp_st():
np.random.seed(0)
X, Y = load_housing_data()
Z = X[np.random.permutation(X.shape[0])[:100]]
k = GPy.kern.RBF(X.shape[1], ARD=True) + GPy.kern.Linear(X.shape[1], ARD=True) + GPy.kern.White(1,0.01) + GPy.kern.Bias(1)
lik = GPy.likelihoods.StudentT(deg_free=3.)
m = GPy.core.SVGP(X, Y, Z=Z, kernel=k, likelihood=lik)
[m.optimize('scg', max_iters=40, gtol=0, messages=1, xtol=0, ftol=0) for i in range(10)]
m.optimize('bfgs', max_iters=4000, gtol=0, messages=1, xtol=0, ftol=0)
return m
if __name__=="__main__":
import timeit

View file

@ -1,506 +0,0 @@
0.00632 18.00 2.310 0 0.5380 6.5750 65.20 4.0900 1 296.0 15.30 396.90 4.98 24.00
0.02731 0.00 7.070 0 0.4690 6.4210 78.90 4.9671 2 242.0 17.80 396.90 9.14 21.60
0.02729 0.00 7.070 0 0.4690 7.1850 61.10 4.9671 2 242.0 17.80 392.83 4.03 34.70
0.03237 0.00 2.180 0 0.4580 6.9980 45.80 6.0622 3 222.0 18.70 394.63 2.94 33.40
0.06905 0.00 2.180 0 0.4580 7.1470 54.20 6.0622 3 222.0 18.70 396.90 5.33 36.20
0.02985 0.00 2.180 0 0.4580 6.4300 58.70 6.0622 3 222.0 18.70 394.12 5.21 28.70
0.08829 12.50 7.870 0 0.5240 6.0120 66.60 5.5605 5 311.0 15.20 395.60 12.43 22.90
0.14455 12.50 7.870 0 0.5240 6.1720 96.10 5.9505 5 311.0 15.20 396.90 19.15 27.10
0.21124 12.50 7.870 0 0.5240 5.6310 100.00 6.0821 5 311.0 15.20 386.63 29.93 16.50
0.17004 12.50 7.870 0 0.5240 6.0040 85.90 6.5921 5 311.0 15.20 386.71 17.10 18.90
0.22489 12.50 7.870 0 0.5240 6.3770 94.30 6.3467 5 311.0 15.20 392.52 20.45 15.00
0.11747 12.50 7.870 0 0.5240 6.0090 82.90 6.2267 5 311.0 15.20 396.90 13.27 18.90
0.09378 12.50 7.870 0 0.5240 5.8890 39.00 5.4509 5 311.0 15.20 390.50 15.71 21.70
0.62976 0.00 8.140 0 0.5380 5.9490 61.80 4.7075 4 307.0 21.00 396.90 8.26 20.40
0.63796 0.00 8.140 0 0.5380 6.0960 84.50 4.4619 4 307.0 21.00 380.02 10.26 18.20
0.62739 0.00 8.140 0 0.5380 5.8340 56.50 4.4986 4 307.0 21.00 395.62 8.47 19.90
1.05393 0.00 8.140 0 0.5380 5.9350 29.30 4.4986 4 307.0 21.00 386.85 6.58 23.10
0.78420 0.00 8.140 0 0.5380 5.9900 81.70 4.2579 4 307.0 21.00 386.75 14.67 17.50
0.80271 0.00 8.140 0 0.5380 5.4560 36.60 3.7965 4 307.0 21.00 288.99 11.69 20.20
0.72580 0.00 8.140 0 0.5380 5.7270 69.50 3.7965 4 307.0 21.00 390.95 11.28 18.20
1.25179 0.00 8.140 0 0.5380 5.5700 98.10 3.7979 4 307.0 21.00 376.57 21.02 13.60
0.85204 0.00 8.140 0 0.5380 5.9650 89.20 4.0123 4 307.0 21.00 392.53 13.83 19.60
1.23247 0.00 8.140 0 0.5380 6.1420 91.70 3.9769 4 307.0 21.00 396.90 18.72 15.20
0.98843 0.00 8.140 0 0.5380 5.8130 100.00 4.0952 4 307.0 21.00 394.54 19.88 14.50
0.75026 0.00 8.140 0 0.5380 5.9240 94.10 4.3996 4 307.0 21.00 394.33 16.30 15.60
0.84054 0.00 8.140 0 0.5380 5.5990 85.70 4.4546 4 307.0 21.00 303.42 16.51 13.90
0.67191 0.00 8.140 0 0.5380 5.8130 90.30 4.6820 4 307.0 21.00 376.88 14.81 16.60
0.95577 0.00 8.140 0 0.5380 6.0470 88.80 4.4534 4 307.0 21.00 306.38 17.28 14.80
0.77299 0.00 8.140 0 0.5380 6.4950 94.40 4.4547 4 307.0 21.00 387.94 12.80 18.40
1.00245 0.00 8.140 0 0.5380 6.6740 87.30 4.2390 4 307.0 21.00 380.23 11.98 21.00
1.13081 0.00 8.140 0 0.5380 5.7130 94.10 4.2330 4 307.0 21.00 360.17 22.60 12.70
1.35472 0.00 8.140 0 0.5380 6.0720 100.00 4.1750 4 307.0 21.00 376.73 13.04 14.50
1.38799 0.00 8.140 0 0.5380 5.9500 82.00 3.9900 4 307.0 21.00 232.60 27.71 13.20
1.15172 0.00 8.140 0 0.5380 5.7010 95.00 3.7872 4 307.0 21.00 358.77 18.35 13.10
1.61282 0.00 8.140 0 0.5380 6.0960 96.90 3.7598 4 307.0 21.00 248.31 20.34 13.50
0.06417 0.00 5.960 0 0.4990 5.9330 68.20 3.3603 5 279.0 19.20 396.90 9.68 18.90
0.09744 0.00 5.960 0 0.4990 5.8410 61.40 3.3779 5 279.0 19.20 377.56 11.41 20.00
0.08014 0.00 5.960 0 0.4990 5.8500 41.50 3.9342 5 279.0 19.20 396.90 8.77 21.00
0.17505 0.00 5.960 0 0.4990 5.9660 30.20 3.8473 5 279.0 19.20 393.43 10.13 24.70
0.02763 75.00 2.950 0 0.4280 6.5950 21.80 5.4011 3 252.0 18.30 395.63 4.32 30.80
0.03359 75.00 2.950 0 0.4280 7.0240 15.80 5.4011 3 252.0 18.30 395.62 1.98 34.90
0.12744 0.00 6.910 0 0.4480 6.7700 2.90 5.7209 3 233.0 17.90 385.41 4.84 26.60
0.14150 0.00 6.910 0 0.4480 6.1690 6.60 5.7209 3 233.0 17.90 383.37 5.81 25.30
0.15936 0.00 6.910 0 0.4480 6.2110 6.50 5.7209 3 233.0 17.90 394.46 7.44 24.70
0.12269 0.00 6.910 0 0.4480 6.0690 40.00 5.7209 3 233.0 17.90 389.39 9.55 21.20
0.17142 0.00 6.910 0 0.4480 5.6820 33.80 5.1004 3 233.0 17.90 396.90 10.21 19.30
0.18836 0.00 6.910 0 0.4480 5.7860 33.30 5.1004 3 233.0 17.90 396.90 14.15 20.00
0.22927 0.00 6.910 0 0.4480 6.0300 85.50 5.6894 3 233.0 17.90 392.74 18.80 16.60
0.25387 0.00 6.910 0 0.4480 5.3990 95.30 5.8700 3 233.0 17.90 396.90 30.81 14.40
0.21977 0.00 6.910 0 0.4480 5.6020 62.00 6.0877 3 233.0 17.90 396.90 16.20 19.40
0.08873 21.00 5.640 0 0.4390 5.9630 45.70 6.8147 4 243.0 16.80 395.56 13.45 19.70
0.04337 21.00 5.640 0 0.4390 6.1150 63.00 6.8147 4 243.0 16.80 393.97 9.43 20.50
0.05360 21.00 5.640 0 0.4390 6.5110 21.10 6.8147 4 243.0 16.80 396.90 5.28 25.00
0.04981 21.00 5.640 0 0.4390 5.9980 21.40 6.8147 4 243.0 16.80 396.90 8.43 23.40
0.01360 75.00 4.000 0 0.4100 5.8880 47.60 7.3197 3 469.0 21.10 396.90 14.80 18.90
0.01311 90.00 1.220 0 0.4030 7.2490 21.90 8.6966 5 226.0 17.90 395.93 4.81 35.40
0.02055 85.00 0.740 0 0.4100 6.3830 35.70 9.1876 2 313.0 17.30 396.90 5.77 24.70
0.01432 100.00 1.320 0 0.4110 6.8160 40.50 8.3248 5 256.0 15.10 392.90 3.95 31.60
0.15445 25.00 5.130 0 0.4530 6.1450 29.20 7.8148 8 284.0 19.70 390.68 6.86 23.30
0.10328 25.00 5.130 0 0.4530 5.9270 47.20 6.9320 8 284.0 19.70 396.90 9.22 19.60
0.14932 25.00 5.130 0 0.4530 5.7410 66.20 7.2254 8 284.0 19.70 395.11 13.15 18.70
0.17171 25.00 5.130 0 0.4530 5.9660 93.40 6.8185 8 284.0 19.70 378.08 14.44 16.00
0.11027 25.00 5.130 0 0.4530 6.4560 67.80 7.2255 8 284.0 19.70 396.90 6.73 22.20
0.12650 25.00 5.130 0 0.4530 6.7620 43.40 7.9809 8 284.0 19.70 395.58 9.50 25.00
0.01951 17.50 1.380 0 0.4161 7.1040 59.50 9.2229 3 216.0 18.60 393.24 8.05 33.00
0.03584 80.00 3.370 0 0.3980 6.2900 17.80 6.6115 4 337.0 16.10 396.90 4.67 23.50
0.04379 80.00 3.370 0 0.3980 5.7870 31.10 6.6115 4 337.0 16.10 396.90 10.24 19.40
0.05789 12.50 6.070 0 0.4090 5.8780 21.40 6.4980 4 345.0 18.90 396.21 8.10 22.00
0.13554 12.50 6.070 0 0.4090 5.5940 36.80 6.4980 4 345.0 18.90 396.90 13.09 17.40
0.12816 12.50 6.070 0 0.4090 5.8850 33.00 6.4980 4 345.0 18.90 396.90 8.79 20.90
0.08826 0.00 10.810 0 0.4130 6.4170 6.60 5.2873 4 305.0 19.20 383.73 6.72 24.20
0.15876 0.00 10.810 0 0.4130 5.9610 17.50 5.2873 4 305.0 19.20 376.94 9.88 21.70
0.09164 0.00 10.810 0 0.4130 6.0650 7.80 5.2873 4 305.0 19.20 390.91 5.52 22.80
0.19539 0.00 10.810 0 0.4130 6.2450 6.20 5.2873 4 305.0 19.20 377.17 7.54 23.40
0.07896 0.00 12.830 0 0.4370 6.2730 6.00 4.2515 5 398.0 18.70 394.92 6.78 24.10
0.09512 0.00 12.830 0 0.4370 6.2860 45.00 4.5026 5 398.0 18.70 383.23 8.94 21.40
0.10153 0.00 12.830 0 0.4370 6.2790 74.50 4.0522 5 398.0 18.70 373.66 11.97 20.00
0.08707 0.00 12.830 0 0.4370 6.1400 45.80 4.0905 5 398.0 18.70 386.96 10.27 20.80
0.05646 0.00 12.830 0 0.4370 6.2320 53.70 5.0141 5 398.0 18.70 386.40 12.34 21.20
0.08387 0.00 12.830 0 0.4370 5.8740 36.60 4.5026 5 398.0 18.70 396.06 9.10 20.30
0.04113 25.00 4.860 0 0.4260 6.7270 33.50 5.4007 4 281.0 19.00 396.90 5.29 28.00
0.04462 25.00 4.860 0 0.4260 6.6190 70.40 5.4007 4 281.0 19.00 395.63 7.22 23.90
0.03659 25.00 4.860 0 0.4260 6.3020 32.20 5.4007 4 281.0 19.00 396.90 6.72 24.80
0.03551 25.00 4.860 0 0.4260 6.1670 46.70 5.4007 4 281.0 19.00 390.64 7.51 22.90
0.05059 0.00 4.490 0 0.4490 6.3890 48.00 4.7794 3 247.0 18.50 396.90 9.62 23.90
0.05735 0.00 4.490 0 0.4490 6.6300 56.10 4.4377 3 247.0 18.50 392.30 6.53 26.60
0.05188 0.00 4.490 0 0.4490 6.0150 45.10 4.4272 3 247.0 18.50 395.99 12.86 22.50
0.07151 0.00 4.490 0 0.4490 6.1210 56.80 3.7476 3 247.0 18.50 395.15 8.44 22.20
0.05660 0.00 3.410 0 0.4890 7.0070 86.30 3.4217 2 270.0 17.80 396.90 5.50 23.60
0.05302 0.00 3.410 0 0.4890 7.0790 63.10 3.4145 2 270.0 17.80 396.06 5.70 28.70
0.04684 0.00 3.410 0 0.4890 6.4170 66.10 3.0923 2 270.0 17.80 392.18 8.81 22.60
0.03932 0.00 3.410 0 0.4890 6.4050 73.90 3.0921 2 270.0 17.80 393.55 8.20 22.00
0.04203 28.00 15.040 0 0.4640 6.4420 53.60 3.6659 4 270.0 18.20 395.01 8.16 22.90
0.02875 28.00 15.040 0 0.4640 6.2110 28.90 3.6659 4 270.0 18.20 396.33 6.21 25.00
0.04294 28.00 15.040 0 0.4640 6.2490 77.30 3.6150 4 270.0 18.20 396.90 10.59 20.60
0.12204 0.00 2.890 0 0.4450 6.6250 57.80 3.4952 2 276.0 18.00 357.98 6.65 28.40
0.11504 0.00 2.890 0 0.4450 6.1630 69.60 3.4952 2 276.0 18.00 391.83 11.34 21.40
0.12083 0.00 2.890 0 0.4450 8.0690 76.00 3.4952 2 276.0 18.00 396.90 4.21 38.70
0.08187 0.00 2.890 0 0.4450 7.8200 36.90 3.4952 2 276.0 18.00 393.53 3.57 43.80
0.06860 0.00 2.890 0 0.4450 7.4160 62.50 3.4952 2 276.0 18.00 396.90 6.19 33.20
0.14866 0.00 8.560 0 0.5200 6.7270 79.90 2.7778 5 384.0 20.90 394.76 9.42 27.50
0.11432 0.00 8.560 0 0.5200 6.7810 71.30 2.8561 5 384.0 20.90 395.58 7.67 26.50
0.22876 0.00 8.560 0 0.5200 6.4050 85.40 2.7147 5 384.0 20.90 70.80 10.63 18.60
0.21161 0.00 8.560 0 0.5200 6.1370 87.40 2.7147 5 384.0 20.90 394.47 13.44 19.30
0.13960 0.00 8.560 0 0.5200 6.1670 90.00 2.4210 5 384.0 20.90 392.69 12.33 20.10
0.13262 0.00 8.560 0 0.5200 5.8510 96.70 2.1069 5 384.0 20.90 394.05 16.47 19.50
0.17120 0.00 8.560 0 0.5200 5.8360 91.90 2.2110 5 384.0 20.90 395.67 18.66 19.50
0.13117 0.00 8.560 0 0.5200 6.1270 85.20 2.1224 5 384.0 20.90 387.69 14.09 20.40
0.12802 0.00 8.560 0 0.5200 6.4740 97.10 2.4329 5 384.0 20.90 395.24 12.27 19.80
0.26363 0.00 8.560 0 0.5200 6.2290 91.20 2.5451 5 384.0 20.90 391.23 15.55 19.40
0.10793 0.00 8.560 0 0.5200 6.1950 54.40 2.7778 5 384.0 20.90 393.49 13.00 21.70
0.10084 0.00 10.010 0 0.5470 6.7150 81.60 2.6775 6 432.0 17.80 395.59 10.16 22.80
0.12329 0.00 10.010 0 0.5470 5.9130 92.90 2.3534 6 432.0 17.80 394.95 16.21 18.80
0.22212 0.00 10.010 0 0.5470 6.0920 95.40 2.5480 6 432.0 17.80 396.90 17.09 18.70
0.14231 0.00 10.010 0 0.5470 6.2540 84.20 2.2565 6 432.0 17.80 388.74 10.45 18.50
0.17134 0.00 10.010 0 0.5470 5.9280 88.20 2.4631 6 432.0 17.80 344.91 15.76 18.30
0.13158 0.00 10.010 0 0.5470 6.1760 72.50 2.7301 6 432.0 17.80 393.30 12.04 21.20
0.15098 0.00 10.010 0 0.5470 6.0210 82.60 2.7474 6 432.0 17.80 394.51 10.30 19.20
0.13058 0.00 10.010 0 0.5470 5.8720 73.10 2.4775 6 432.0 17.80 338.63 15.37 20.40
0.14476 0.00 10.010 0 0.5470 5.7310 65.20 2.7592 6 432.0 17.80 391.50 13.61 19.30
0.06899 0.00 25.650 0 0.5810 5.8700 69.70 2.2577 2 188.0 19.10 389.15 14.37 22.00
0.07165 0.00 25.650 0 0.5810 6.0040 84.10 2.1974 2 188.0 19.10 377.67 14.27 20.30
0.09299 0.00 25.650 0 0.5810 5.9610 92.90 2.0869 2 188.0 19.10 378.09 17.93 20.50
0.15038 0.00 25.650 0 0.5810 5.8560 97.00 1.9444 2 188.0 19.10 370.31 25.41 17.30
0.09849 0.00 25.650 0 0.5810 5.8790 95.80 2.0063 2 188.0 19.10 379.38 17.58 18.80
0.16902 0.00 25.650 0 0.5810 5.9860 88.40 1.9929 2 188.0 19.10 385.02 14.81 21.40
0.38735 0.00 25.650 0 0.5810 5.6130 95.60 1.7572 2 188.0 19.10 359.29 27.26 15.70
0.25915 0.00 21.890 0 0.6240 5.6930 96.00 1.7883 4 437.0 21.20 392.11 17.19 16.20
0.32543 0.00 21.890 0 0.6240 6.4310 98.80 1.8125 4 437.0 21.20 396.90 15.39 18.00
0.88125 0.00 21.890 0 0.6240 5.6370 94.70 1.9799 4 437.0 21.20 396.90 18.34 14.30
0.34006 0.00 21.890 0 0.6240 6.4580 98.90 2.1185 4 437.0 21.20 395.04 12.60 19.20
1.19294 0.00 21.890 0 0.6240 6.3260 97.70 2.2710 4 437.0 21.20 396.90 12.26 19.60
0.59005 0.00 21.890 0 0.6240 6.3720 97.90 2.3274 4 437.0 21.20 385.76 11.12 23.00
0.32982 0.00 21.890 0 0.6240 5.8220 95.40 2.4699 4 437.0 21.20 388.69 15.03 18.40
0.97617 0.00 21.890 0 0.6240 5.7570 98.40 2.3460 4 437.0 21.20 262.76 17.31 15.60
0.55778 0.00 21.890 0 0.6240 6.3350 98.20 2.1107 4 437.0 21.20 394.67 16.96 18.10
0.32264 0.00 21.890 0 0.6240 5.9420 93.50 1.9669 4 437.0 21.20 378.25 16.90 17.40
0.35233 0.00 21.890 0 0.6240 6.4540 98.40 1.8498 4 437.0 21.20 394.08 14.59 17.10
0.24980 0.00 21.890 0 0.6240 5.8570 98.20 1.6686 4 437.0 21.20 392.04 21.32 13.30
0.54452 0.00 21.890 0 0.6240 6.1510 97.90 1.6687 4 437.0 21.20 396.90 18.46 17.80
0.29090 0.00 21.890 0 0.6240 6.1740 93.60 1.6119 4 437.0 21.20 388.08 24.16 14.00
1.62864 0.00 21.890 0 0.6240 5.0190 100.00 1.4394 4 437.0 21.20 396.90 34.41 14.40
3.32105 0.00 19.580 1 0.8710 5.4030 100.00 1.3216 5 403.0 14.70 396.90 26.82 13.40
4.09740 0.00 19.580 0 0.8710 5.4680 100.00 1.4118 5 403.0 14.70 396.90 26.42 15.60
2.77974 0.00 19.580 0 0.8710 4.9030 97.80 1.3459 5 403.0 14.70 396.90 29.29 11.80
2.37934 0.00 19.580 0 0.8710 6.1300 100.00 1.4191 5 403.0 14.70 172.91 27.80 13.80
2.15505 0.00 19.580 0 0.8710 5.6280 100.00 1.5166 5 403.0 14.70 169.27 16.65 15.60
2.36862 0.00 19.580 0 0.8710 4.9260 95.70 1.4608 5 403.0 14.70 391.71 29.53 14.60
2.33099 0.00 19.580 0 0.8710 5.1860 93.80 1.5296 5 403.0 14.70 356.99 28.32 17.80
2.73397 0.00 19.580 0 0.8710 5.5970 94.90 1.5257 5 403.0 14.70 351.85 21.45 15.40
1.65660 0.00 19.580 0 0.8710 6.1220 97.30 1.6180 5 403.0 14.70 372.80 14.10 21.50
1.49632 0.00 19.580 0 0.8710 5.4040 100.00 1.5916 5 403.0 14.70 341.60 13.28 19.60
1.12658 0.00 19.580 1 0.8710 5.0120 88.00 1.6102 5 403.0 14.70 343.28 12.12 15.30
2.14918 0.00 19.580 0 0.8710 5.7090 98.50 1.6232 5 403.0 14.70 261.95 15.79 19.40
1.41385 0.00 19.580 1 0.8710 6.1290 96.00 1.7494 5 403.0 14.70 321.02 15.12 17.00
3.53501 0.00 19.580 1 0.8710 6.1520 82.60 1.7455 5 403.0 14.70 88.01 15.02 15.60
2.44668 0.00 19.580 0 0.8710 5.2720 94.00 1.7364 5 403.0 14.70 88.63 16.14 13.10
1.22358 0.00 19.580 0 0.6050 6.9430 97.40 1.8773 5 403.0 14.70 363.43 4.59 41.30
1.34284 0.00 19.580 0 0.6050 6.0660 100.00 1.7573 5 403.0 14.70 353.89 6.43 24.30
1.42502 0.00 19.580 0 0.8710 6.5100 100.00 1.7659 5 403.0 14.70 364.31 7.39 23.30
1.27346 0.00 19.580 1 0.6050 6.2500 92.60 1.7984 5 403.0 14.70 338.92 5.50 27.00
1.46336 0.00 19.580 0 0.6050 7.4890 90.80 1.9709 5 403.0 14.70 374.43 1.73 50.00
1.83377 0.00 19.580 1 0.6050 7.8020 98.20 2.0407 5 403.0 14.70 389.61 1.92 50.00
1.51902 0.00 19.580 1 0.6050 8.3750 93.90 2.1620 5 403.0 14.70 388.45 3.32 50.00
2.24236 0.00 19.580 0 0.6050 5.8540 91.80 2.4220 5 403.0 14.70 395.11 11.64 22.70
2.92400 0.00 19.580 0 0.6050 6.1010 93.00 2.2834 5 403.0 14.70 240.16 9.81 25.00
2.01019 0.00 19.580 0 0.6050 7.9290 96.20 2.0459 5 403.0 14.70 369.30 3.70 50.00
1.80028 0.00 19.580 0 0.6050 5.8770 79.20 2.4259 5 403.0 14.70 227.61 12.14 23.80
2.30040 0.00 19.580 0 0.6050 6.3190 96.10 2.1000 5 403.0 14.70 297.09 11.10 23.80
2.44953 0.00 19.580 0 0.6050 6.4020 95.20 2.2625 5 403.0 14.70 330.04 11.32 22.30
1.20742 0.00 19.580 0 0.6050 5.8750 94.60 2.4259 5 403.0 14.70 292.29 14.43 17.40
2.31390 0.00 19.580 0 0.6050 5.8800 97.30 2.3887 5 403.0 14.70 348.13 12.03 19.10
0.13914 0.00 4.050 0 0.5100 5.5720 88.50 2.5961 5 296.0 16.60 396.90 14.69 23.10
0.09178 0.00 4.050 0 0.5100 6.4160 84.10 2.6463 5 296.0 16.60 395.50 9.04 23.60
0.08447 0.00 4.050 0 0.5100 5.8590 68.70 2.7019 5 296.0 16.60 393.23 9.64 22.60
0.06664 0.00 4.050 0 0.5100 6.5460 33.10 3.1323 5 296.0 16.60 390.96 5.33 29.40
0.07022 0.00 4.050 0 0.5100 6.0200 47.20 3.5549 5 296.0 16.60 393.23 10.11 23.20
0.05425 0.00 4.050 0 0.5100 6.3150 73.40 3.3175 5 296.0 16.60 395.60 6.29 24.60
0.06642 0.00 4.050 0 0.5100 6.8600 74.40 2.9153 5 296.0 16.60 391.27 6.92 29.90
0.05780 0.00 2.460 0 0.4880 6.9800 58.40 2.8290 3 193.0 17.80 396.90 5.04 37.20
0.06588 0.00 2.460 0 0.4880 7.7650 83.30 2.7410 3 193.0 17.80 395.56 7.56 39.80
0.06888 0.00 2.460 0 0.4880 6.1440 62.20 2.5979 3 193.0 17.80 396.90 9.45 36.20
0.09103 0.00 2.460 0 0.4880 7.1550 92.20 2.7006 3 193.0 17.80 394.12 4.82 37.90
0.10008 0.00 2.460 0 0.4880 6.5630 95.60 2.8470 3 193.0 17.80 396.90 5.68 32.50
0.08308 0.00 2.460 0 0.4880 5.6040 89.80 2.9879 3 193.0 17.80 391.00 13.98 26.40
0.06047 0.00 2.460 0 0.4880 6.1530 68.80 3.2797 3 193.0 17.80 387.11 13.15 29.60
0.05602 0.00 2.460 0 0.4880 7.8310 53.60 3.1992 3 193.0 17.80 392.63 4.45 50.00
0.07875 45.00 3.440 0 0.4370 6.7820 41.10 3.7886 5 398.0 15.20 393.87 6.68 32.00
0.12579 45.00 3.440 0 0.4370 6.5560 29.10 4.5667 5 398.0 15.20 382.84 4.56 29.80
0.08370 45.00 3.440 0 0.4370 7.1850 38.90 4.5667 5 398.0 15.20 396.90 5.39 34.90
0.09068 45.00 3.440 0 0.4370 6.9510 21.50 6.4798 5 398.0 15.20 377.68 5.10 37.00
0.06911 45.00 3.440 0 0.4370 6.7390 30.80 6.4798 5 398.0 15.20 389.71 4.69 30.50
0.08664 45.00 3.440 0 0.4370 7.1780 26.30 6.4798 5 398.0 15.20 390.49 2.87 36.40
0.02187 60.00 2.930 0 0.4010 6.8000 9.90 6.2196 1 265.0 15.60 393.37 5.03 31.10
0.01439 60.00 2.930 0 0.4010 6.6040 18.80 6.2196 1 265.0 15.60 376.70 4.38 29.10
0.01381 80.00 0.460 0 0.4220 7.8750 32.00 5.6484 4 255.0 14.40 394.23 2.97 50.00
0.04011 80.00 1.520 0 0.4040 7.2870 34.10 7.3090 2 329.0 12.60 396.90 4.08 33.30
0.04666 80.00 1.520 0 0.4040 7.1070 36.60 7.3090 2 329.0 12.60 354.31 8.61 30.30
0.03768 80.00 1.520 0 0.4040 7.2740 38.30 7.3090 2 329.0 12.60 392.20 6.62 34.60
0.03150 95.00 1.470 0 0.4030 6.9750 15.30 7.6534 3 402.0 17.00 396.90 4.56 34.90
0.01778 95.00 1.470 0 0.4030 7.1350 13.90 7.6534 3 402.0 17.00 384.30 4.45 32.90
0.03445 82.50 2.030 0 0.4150 6.1620 38.40 6.2700 2 348.0 14.70 393.77 7.43 24.10
0.02177 82.50 2.030 0 0.4150 7.6100 15.70 6.2700 2 348.0 14.70 395.38 3.11 42.30
0.03510 95.00 2.680 0 0.4161 7.8530 33.20 5.1180 4 224.0 14.70 392.78 3.81 48.50
0.02009 95.00 2.680 0 0.4161 8.0340 31.90 5.1180 4 224.0 14.70 390.55 2.88 50.00
0.13642 0.00 10.590 0 0.4890 5.8910 22.30 3.9454 4 277.0 18.60 396.90 10.87 22.60
0.22969 0.00 10.590 0 0.4890 6.3260 52.50 4.3549 4 277.0 18.60 394.87 10.97 24.40
0.25199 0.00 10.590 0 0.4890 5.7830 72.70 4.3549 4 277.0 18.60 389.43 18.06 22.50
0.13587 0.00 10.590 1 0.4890 6.0640 59.10 4.2392 4 277.0 18.60 381.32 14.66 24.40
0.43571 0.00 10.590 1 0.4890 5.3440 100.00 3.8750 4 277.0 18.60 396.90 23.09 20.00
0.17446 0.00 10.590 1 0.4890 5.9600 92.10 3.8771 4 277.0 18.60 393.25 17.27 21.70
0.37578 0.00 10.590 1 0.4890 5.4040 88.60 3.6650 4 277.0 18.60 395.24 23.98 19.30
0.21719 0.00 10.590 1 0.4890 5.8070 53.80 3.6526 4 277.0 18.60 390.94 16.03 22.40
0.14052 0.00 10.590 0 0.4890 6.3750 32.30 3.9454 4 277.0 18.60 385.81 9.38 28.10
0.28955 0.00 10.590 0 0.4890 5.4120 9.80 3.5875 4 277.0 18.60 348.93 29.55 23.70
0.19802 0.00 10.590 0 0.4890 6.1820 42.40 3.9454 4 277.0 18.60 393.63 9.47 25.00
0.04560 0.00 13.890 1 0.5500 5.8880 56.00 3.1121 5 276.0 16.40 392.80 13.51 23.30
0.07013 0.00 13.890 0 0.5500 6.6420 85.10 3.4211 5 276.0 16.40 392.78 9.69 28.70
0.11069 0.00 13.890 1 0.5500 5.9510 93.80 2.8893 5 276.0 16.40 396.90 17.92 21.50
0.11425 0.00 13.890 1 0.5500 6.3730 92.40 3.3633 5 276.0 16.40 393.74 10.50 23.00
0.35809 0.00 6.200 1 0.5070 6.9510 88.50 2.8617 8 307.0 17.40 391.70 9.71 26.70
0.40771 0.00 6.200 1 0.5070 6.1640 91.30 3.0480 8 307.0 17.40 395.24 21.46 21.70
0.62356 0.00 6.200 1 0.5070 6.8790 77.70 3.2721 8 307.0 17.40 390.39 9.93 27.50
0.61470 0.00 6.200 0 0.5070 6.6180 80.80 3.2721 8 307.0 17.40 396.90 7.60 30.10
0.31533 0.00 6.200 0 0.5040 8.2660 78.30 2.8944 8 307.0 17.40 385.05 4.14 44.80
0.52693 0.00 6.200 0 0.5040 8.7250 83.00 2.8944 8 307.0 17.40 382.00 4.63 50.00
0.38214 0.00 6.200 0 0.5040 8.0400 86.50 3.2157 8 307.0 17.40 387.38 3.13 37.60
0.41238 0.00 6.200 0 0.5040 7.1630 79.90 3.2157 8 307.0 17.40 372.08 6.36 31.60
0.29819 0.00 6.200 0 0.5040 7.6860 17.00 3.3751 8 307.0 17.40 377.51 3.92 46.70
0.44178 0.00 6.200 0 0.5040 6.5520 21.40 3.3751 8 307.0 17.40 380.34 3.76 31.50
0.53700 0.00 6.200 0 0.5040 5.9810 68.10 3.6715 8 307.0 17.40 378.35 11.65 24.30
0.46296 0.00 6.200 0 0.5040 7.4120 76.90 3.6715 8 307.0 17.40 376.14 5.25 31.70
0.57529 0.00 6.200 0 0.5070 8.3370 73.30 3.8384 8 307.0 17.40 385.91 2.47 41.70
0.33147 0.00 6.200 0 0.5070 8.2470 70.40 3.6519 8 307.0 17.40 378.95 3.95 48.30
0.44791 0.00 6.200 1 0.5070 6.7260 66.50 3.6519 8 307.0 17.40 360.20 8.05 29.00
0.33045 0.00 6.200 0 0.5070 6.0860 61.50 3.6519 8 307.0 17.40 376.75 10.88 24.00
0.52058 0.00 6.200 1 0.5070 6.6310 76.50 4.1480 8 307.0 17.40 388.45 9.54 25.10
0.51183 0.00 6.200 0 0.5070 7.3580 71.60 4.1480 8 307.0 17.40 390.07 4.73 31.50
0.08244 30.00 4.930 0 0.4280 6.4810 18.50 6.1899 6 300.0 16.60 379.41 6.36 23.70
0.09252 30.00 4.930 0 0.4280 6.6060 42.20 6.1899 6 300.0 16.60 383.78 7.37 23.30
0.11329 30.00 4.930 0 0.4280 6.8970 54.30 6.3361 6 300.0 16.60 391.25 11.38 22.00
0.10612 30.00 4.930 0 0.4280 6.0950 65.10 6.3361 6 300.0 16.60 394.62 12.40 20.10
0.10290 30.00 4.930 0 0.4280 6.3580 52.90 7.0355 6 300.0 16.60 372.75 11.22 22.20
0.12757 30.00 4.930 0 0.4280 6.3930 7.80 7.0355 6 300.0 16.60 374.71 5.19 23.70
0.20608 22.00 5.860 0 0.4310 5.5930 76.50 7.9549 7 330.0 19.10 372.49 12.50 17.60
0.19133 22.00 5.860 0 0.4310 5.6050 70.20 7.9549 7 330.0 19.10 389.13 18.46 18.50
0.33983 22.00 5.860 0 0.4310 6.1080 34.90 8.0555 7 330.0 19.10 390.18 9.16 24.30
0.19657 22.00 5.860 0 0.4310 6.2260 79.20 8.0555 7 330.0 19.10 376.14 10.15 20.50
0.16439 22.00 5.860 0 0.4310 6.4330 49.10 7.8265 7 330.0 19.10 374.71 9.52 24.50
0.19073 22.00 5.860 0 0.4310 6.7180 17.50 7.8265 7 330.0 19.10 393.74 6.56 26.20
0.14030 22.00 5.860 0 0.4310 6.4870 13.00 7.3967 7 330.0 19.10 396.28 5.90 24.40
0.21409 22.00 5.860 0 0.4310 6.4380 8.90 7.3967 7 330.0 19.10 377.07 3.59 24.80
0.08221 22.00 5.860 0 0.4310 6.9570 6.80 8.9067 7 330.0 19.10 386.09 3.53 29.60
0.36894 22.00 5.860 0 0.4310 8.2590 8.40 8.9067 7 330.0 19.10 396.90 3.54 42.80
0.04819 80.00 3.640 0 0.3920 6.1080 32.00 9.2203 1 315.0 16.40 392.89 6.57 21.90
0.03548 80.00 3.640 0 0.3920 5.8760 19.10 9.2203 1 315.0 16.40 395.18 9.25 20.90
0.01538 90.00 3.750 0 0.3940 7.4540 34.20 6.3361 3 244.0 15.90 386.34 3.11 44.00
0.61154 20.00 3.970 0 0.6470 8.7040 86.90 1.8010 5 264.0 13.00 389.70 5.12 50.00
0.66351 20.00 3.970 0 0.6470 7.3330 100.00 1.8946 5 264.0 13.00 383.29 7.79 36.00
0.65665 20.00 3.970 0 0.6470 6.8420 100.00 2.0107 5 264.0 13.00 391.93 6.90 30.10
0.54011 20.00 3.970 0 0.6470 7.2030 81.80 2.1121 5 264.0 13.00 392.80 9.59 33.80
0.53412 20.00 3.970 0 0.6470 7.5200 89.40 2.1398 5 264.0 13.00 388.37 7.26 43.10
0.52014 20.00 3.970 0 0.6470 8.3980 91.50 2.2885 5 264.0 13.00 386.86 5.91 48.80
0.82526 20.00 3.970 0 0.6470 7.3270 94.50 2.0788 5 264.0 13.00 393.42 11.25 31.00
0.55007 20.00 3.970 0 0.6470 7.2060 91.60 1.9301 5 264.0 13.00 387.89 8.10 36.50
0.76162 20.00 3.970 0 0.6470 5.5600 62.80 1.9865 5 264.0 13.00 392.40 10.45 22.80
0.78570 20.00 3.970 0 0.6470 7.0140 84.60 2.1329 5 264.0 13.00 384.07 14.79 30.70
0.57834 20.00 3.970 0 0.5750 8.2970 67.00 2.4216 5 264.0 13.00 384.54 7.44 50.00
0.54050 20.00 3.970 0 0.5750 7.4700 52.60 2.8720 5 264.0 13.00 390.30 3.16 43.50
0.09065 20.00 6.960 1 0.4640 5.9200 61.50 3.9175 3 223.0 18.60 391.34 13.65 20.70
0.29916 20.00 6.960 0 0.4640 5.8560 42.10 4.4290 3 223.0 18.60 388.65 13.00 21.10
0.16211 20.00 6.960 0 0.4640 6.2400 16.30 4.4290 3 223.0 18.60 396.90 6.59 25.20
0.11460 20.00 6.960 0 0.4640 6.5380 58.70 3.9175 3 223.0 18.60 394.96 7.73 24.40
0.22188 20.00 6.960 1 0.4640 7.6910 51.80 4.3665 3 223.0 18.60 390.77 6.58 35.20
0.05644 40.00 6.410 1 0.4470 6.7580 32.90 4.0776 4 254.0 17.60 396.90 3.53 32.40
0.09604 40.00 6.410 0 0.4470 6.8540 42.80 4.2673 4 254.0 17.60 396.90 2.98 32.00
0.10469 40.00 6.410 1 0.4470 7.2670 49.00 4.7872 4 254.0 17.60 389.25 6.05 33.20
0.06127 40.00 6.410 1 0.4470 6.8260 27.60 4.8628 4 254.0 17.60 393.45 4.16 33.10
0.07978 40.00 6.410 0 0.4470 6.4820 32.10 4.1403 4 254.0 17.60 396.90 7.19 29.10
0.21038 20.00 3.330 0 0.4429 6.8120 32.20 4.1007 5 216.0 14.90 396.90 4.85 35.10
0.03578 20.00 3.330 0 0.4429 7.8200 64.50 4.6947 5 216.0 14.90 387.31 3.76 45.40
0.03705 20.00 3.330 0 0.4429 6.9680 37.20 5.2447 5 216.0 14.90 392.23 4.59 35.40
0.06129 20.00 3.330 1 0.4429 7.6450 49.70 5.2119 5 216.0 14.90 377.07 3.01 46.00
0.01501 90.00 1.210 1 0.4010 7.9230 24.80 5.8850 1 198.0 13.60 395.52 3.16 50.00
0.00906 90.00 2.970 0 0.4000 7.0880 20.80 7.3073 1 285.0 15.30 394.72 7.85 32.20
0.01096 55.00 2.250 0 0.3890 6.4530 31.90 7.3073 1 300.0 15.30 394.72 8.23 22.00
0.01965 80.00 1.760 0 0.3850 6.2300 31.50 9.0892 1 241.0 18.20 341.60 12.93 20.10
0.03871 52.50 5.320 0 0.4050 6.2090 31.30 7.3172 6 293.0 16.60 396.90 7.14 23.20
0.04590 52.50 5.320 0 0.4050 6.3150 45.60 7.3172 6 293.0 16.60 396.90 7.60 22.30
0.04297 52.50 5.320 0 0.4050 6.5650 22.90 7.3172 6 293.0 16.60 371.72 9.51 24.80
0.03502 80.00 4.950 0 0.4110 6.8610 27.90 5.1167 4 245.0 19.20 396.90 3.33 28.50
0.07886 80.00 4.950 0 0.4110 7.1480 27.70 5.1167 4 245.0 19.20 396.90 3.56 37.30
0.03615 80.00 4.950 0 0.4110 6.6300 23.40 5.1167 4 245.0 19.20 396.90 4.70 27.90
0.08265 0.00 13.920 0 0.4370 6.1270 18.40 5.5027 4 289.0 16.00 396.90 8.58 23.90
0.08199 0.00 13.920 0 0.4370 6.0090 42.30 5.5027 4 289.0 16.00 396.90 10.40 21.70
0.12932 0.00 13.920 0 0.4370 6.6780 31.10 5.9604 4 289.0 16.00 396.90 6.27 28.60
0.05372 0.00 13.920 0 0.4370 6.5490 51.00 5.9604 4 289.0 16.00 392.85 7.39 27.10
0.14103 0.00 13.920 0 0.4370 5.7900 58.00 6.3200 4 289.0 16.00 396.90 15.84 20.30
0.06466 70.00 2.240 0 0.4000 6.3450 20.10 7.8278 5 358.0 14.80 368.24 4.97 22.50
0.05561 70.00 2.240 0 0.4000 7.0410 10.00 7.8278 5 358.0 14.80 371.58 4.74 29.00
0.04417 70.00 2.240 0 0.4000 6.8710 47.40 7.8278 5 358.0 14.80 390.86 6.07 24.80
0.03537 34.00 6.090 0 0.4330 6.5900 40.40 5.4917 7 329.0 16.10 395.75 9.50 22.00
0.09266 34.00 6.090 0 0.4330 6.4950 18.40 5.4917 7 329.0 16.10 383.61 8.67 26.40
0.10000 34.00 6.090 0 0.4330 6.9820 17.70 5.4917 7 329.0 16.10 390.43 4.86 33.10
0.05515 33.00 2.180 0 0.4720 7.2360 41.10 4.0220 7 222.0 18.40 393.68 6.93 36.10
0.05479 33.00 2.180 0 0.4720 6.6160 58.10 3.3700 7 222.0 18.40 393.36 8.93 28.40
0.07503 33.00 2.180 0 0.4720 7.4200 71.90 3.0992 7 222.0 18.40 396.90 6.47 33.40
0.04932 33.00 2.180 0 0.4720 6.8490 70.30 3.1827 7 222.0 18.40 396.90 7.53 28.20
0.49298 0.00 9.900 0 0.5440 6.6350 82.50 3.3175 4 304.0 18.40 396.90 4.54 22.80
0.34940 0.00 9.900 0 0.5440 5.9720 76.70 3.1025 4 304.0 18.40 396.24 9.97 20.30
2.63548 0.00 9.900 0 0.5440 4.9730 37.80 2.5194 4 304.0 18.40 350.45 12.64 16.10
0.79041 0.00 9.900 0 0.5440 6.1220 52.80 2.6403 4 304.0 18.40 396.90 5.98 22.10
0.26169 0.00 9.900 0 0.5440 6.0230 90.40 2.8340 4 304.0 18.40 396.30 11.72 19.40
0.26938 0.00 9.900 0 0.5440 6.2660 82.80 3.2628 4 304.0 18.40 393.39 7.90 21.60
0.36920 0.00 9.900 0 0.5440 6.5670 87.30 3.6023 4 304.0 18.40 395.69 9.28 23.80
0.25356 0.00 9.900 0 0.5440 5.7050 77.70 3.9450 4 304.0 18.40 396.42 11.50 16.20
0.31827 0.00 9.900 0 0.5440 5.9140 83.20 3.9986 4 304.0 18.40 390.70 18.33 17.80
0.24522 0.00 9.900 0 0.5440 5.7820 71.70 4.0317 4 304.0 18.40 396.90 15.94 19.80
0.40202 0.00 9.900 0 0.5440 6.3820 67.20 3.5325 4 304.0 18.40 395.21 10.36 23.10
0.47547 0.00 9.900 0 0.5440 6.1130 58.80 4.0019 4 304.0 18.40 396.23 12.73 21.00
0.16760 0.00 7.380 0 0.4930 6.4260 52.30 4.5404 5 287.0 19.60 396.90 7.20 23.80
0.18159 0.00 7.380 0 0.4930 6.3760 54.30 4.5404 5 287.0 19.60 396.90 6.87 23.10
0.35114 0.00 7.380 0 0.4930 6.0410 49.90 4.7211 5 287.0 19.60 396.90 7.70 20.40
0.28392 0.00 7.380 0 0.4930 5.7080 74.30 4.7211 5 287.0 19.60 391.13 11.74 18.50
0.34109 0.00 7.380 0 0.4930 6.4150 40.10 4.7211 5 287.0 19.60 396.90 6.12 25.00
0.19186 0.00 7.380 0 0.4930 6.4310 14.70 5.4159 5 287.0 19.60 393.68 5.08 24.60
0.30347 0.00 7.380 0 0.4930 6.3120 28.90 5.4159 5 287.0 19.60 396.90 6.15 23.00
0.24103 0.00 7.380 0 0.4930 6.0830 43.70 5.4159 5 287.0 19.60 396.90 12.79 22.20
0.06617 0.00 3.240 0 0.4600 5.8680 25.80 5.2146 4 430.0 16.90 382.44 9.97 19.30
0.06724 0.00 3.240 0 0.4600 6.3330 17.20 5.2146 4 430.0 16.90 375.21 7.34 22.60
0.04544 0.00 3.240 0 0.4600 6.1440 32.20 5.8736 4 430.0 16.90 368.57 9.09 19.80
0.05023 35.00 6.060 0 0.4379 5.7060 28.40 6.6407 1 304.0 16.90 394.02 12.43 17.10
0.03466 35.00 6.060 0 0.4379 6.0310 23.30 6.6407 1 304.0 16.90 362.25 7.83 19.40
0.05083 0.00 5.190 0 0.5150 6.3160 38.10 6.4584 5 224.0 20.20 389.71 5.68 22.20
0.03738 0.00 5.190 0 0.5150 6.3100 38.50 6.4584 5 224.0 20.20 389.40 6.75 20.70
0.03961 0.00 5.190 0 0.5150 6.0370 34.50 5.9853 5 224.0 20.20 396.90 8.01 21.10
0.03427 0.00 5.190 0 0.5150 5.8690 46.30 5.2311 5 224.0 20.20 396.90 9.80 19.50
0.03041 0.00 5.190 0 0.5150 5.8950 59.60 5.6150 5 224.0 20.20 394.81 10.56 18.50
0.03306 0.00 5.190 0 0.5150 6.0590 37.30 4.8122 5 224.0 20.20 396.14 8.51 20.60
0.05497 0.00 5.190 0 0.5150 5.9850 45.40 4.8122 5 224.0 20.20 396.90 9.74 19.00
0.06151 0.00 5.190 0 0.5150 5.9680 58.50 4.8122 5 224.0 20.20 396.90 9.29 18.70
0.01301 35.00 1.520 0 0.4420 7.2410 49.30 7.0379 1 284.0 15.50 394.74 5.49 32.70
0.02498 0.00 1.890 0 0.5180 6.5400 59.70 6.2669 1 422.0 15.90 389.96 8.65 16.50
0.02543 55.00 3.780 0 0.4840 6.6960 56.40 5.7321 5 370.0 17.60 396.90 7.18 23.90
0.03049 55.00 3.780 0 0.4840 6.8740 28.10 6.4654 5 370.0 17.60 387.97 4.61 31.20
0.03113 0.00 4.390 0 0.4420 6.0140 48.50 8.0136 3 352.0 18.80 385.64 10.53 17.50
0.06162 0.00 4.390 0 0.4420 5.8980 52.30 8.0136 3 352.0 18.80 364.61 12.67 17.20
0.01870 85.00 4.150 0 0.4290 6.5160 27.70 8.5353 4 351.0 17.90 392.43 6.36 23.10
0.01501 80.00 2.010 0 0.4350 6.6350 29.70 8.3440 4 280.0 17.00 390.94 5.99 24.50
0.02899 40.00 1.250 0 0.4290 6.9390 34.50 8.7921 1 335.0 19.70 389.85 5.89 26.60
0.06211 40.00 1.250 0 0.4290 6.4900 44.40 8.7921 1 335.0 19.70 396.90 5.98 22.90
0.07950 60.00 1.690 0 0.4110 6.5790 35.90 10.7103 4 411.0 18.30 370.78 5.49 24.10
0.07244 60.00 1.690 0 0.4110 5.8840 18.50 10.7103 4 411.0 18.30 392.33 7.79 18.60
0.01709 90.00 2.020 0 0.4100 6.7280 36.10 12.1265 5 187.0 17.00 384.46 4.50 30.10
0.04301 80.00 1.910 0 0.4130 5.6630 21.90 10.5857 4 334.0 22.00 382.80 8.05 18.20
0.10659 80.00 1.910 0 0.4130 5.9360 19.50 10.5857 4 334.0 22.00 376.04 5.57 20.60
8.98296 0.00 18.100 1 0.7700 6.2120 97.40 2.1222 24 666.0 20.20 377.73 17.60 17.80
3.84970 0.00 18.100 1 0.7700 6.3950 91.00 2.5052 24 666.0 20.20 391.34 13.27 21.70
5.20177 0.00 18.100 1 0.7700 6.1270 83.40 2.7227 24 666.0 20.20 395.43 11.48 22.70
4.26131 0.00 18.100 0 0.7700 6.1120 81.30 2.5091 24 666.0 20.20 390.74 12.67 22.60
4.54192 0.00 18.100 0 0.7700 6.3980 88.00 2.5182 24 666.0 20.20 374.56 7.79 25.00
3.83684 0.00 18.100 0 0.7700 6.2510 91.10 2.2955 24 666.0 20.20 350.65 14.19 19.90
3.67822 0.00 18.100 0 0.7700 5.3620 96.20 2.1036 24 666.0 20.20 380.79 10.19 20.80
4.22239 0.00 18.100 1 0.7700 5.8030 89.00 1.9047 24 666.0 20.20 353.04 14.64 16.80
3.47428 0.00 18.100 1 0.7180 8.7800 82.90 1.9047 24 666.0 20.20 354.55 5.29 21.90
4.55587 0.00 18.100 0 0.7180 3.5610 87.90 1.6132 24 666.0 20.20 354.70 7.12 27.50
3.69695 0.00 18.100 0 0.7180 4.9630 91.40 1.7523 24 666.0 20.20 316.03 14.00 21.90
13.52220 0.00 18.100 0 0.6310 3.8630 100.00 1.5106 24 666.0 20.20 131.42 13.33 23.10
4.89822 0.00 18.100 0 0.6310 4.9700 100.00 1.3325 24 666.0 20.20 375.52 3.26 50.00
5.66998 0.00 18.100 1 0.6310 6.6830 96.80 1.3567 24 666.0 20.20 375.33 3.73 50.00
6.53876 0.00 18.100 1 0.6310 7.0160 97.50 1.2024 24 666.0 20.20 392.05 2.96 50.00
9.23230 0.00 18.100 0 0.6310 6.2160 100.00 1.1691 24 666.0 20.20 366.15 9.53 50.00
8.26725 0.00 18.100 1 0.6680 5.8750 89.60 1.1296 24 666.0 20.20 347.88 8.88 50.00
11.10810 0.00 18.100 0 0.6680 4.9060 100.00 1.1742 24 666.0 20.20 396.90 34.77 13.80
18.49820 0.00 18.100 0 0.6680 4.1380 100.00 1.1370 24 666.0 20.20 396.90 37.97 13.80
19.60910 0.00 18.100 0 0.6710 7.3130 97.90 1.3163 24 666.0 20.20 396.90 13.44 15.00
15.28800 0.00 18.100 0 0.6710 6.6490 93.30 1.3449 24 666.0 20.20 363.02 23.24 13.90
9.82349 0.00 18.100 0 0.6710 6.7940 98.80 1.3580 24 666.0 20.20 396.90 21.24 13.30
23.64820 0.00 18.100 0 0.6710 6.3800 96.20 1.3861 24 666.0 20.20 396.90 23.69 13.10
17.86670 0.00 18.100 0 0.6710 6.2230 100.00 1.3861 24 666.0 20.20 393.74 21.78 10.20
88.97620 0.00 18.100 0 0.6710 6.9680 91.90 1.4165 24 666.0 20.20 396.90 17.21 10.40
15.87440 0.00 18.100 0 0.6710 6.5450 99.10 1.5192 24 666.0 20.20 396.90 21.08 10.90
9.18702 0.00 18.100 0 0.7000 5.5360 100.00 1.5804 24 666.0 20.20 396.90 23.60 11.30
7.99248 0.00 18.100 0 0.7000 5.5200 100.00 1.5331 24 666.0 20.20 396.90 24.56 12.30
20.08490 0.00 18.100 0 0.7000 4.3680 91.20 1.4395 24 666.0 20.20 285.83 30.63 8.80
16.81180 0.00 18.100 0 0.7000 5.2770 98.10 1.4261 24 666.0 20.20 396.90 30.81 7.20
24.39380 0.00 18.100 0 0.7000 4.6520 100.00 1.4672 24 666.0 20.20 396.90 28.28 10.50
22.59710 0.00 18.100 0 0.7000 5.0000 89.50 1.5184 24 666.0 20.20 396.90 31.99 7.40
14.33370 0.00 18.100 0 0.7000 4.8800 100.00 1.5895 24 666.0 20.20 372.92 30.62 10.20
8.15174 0.00 18.100 0 0.7000 5.3900 98.90 1.7281 24 666.0 20.20 396.90 20.85 11.50
6.96215 0.00 18.100 0 0.7000 5.7130 97.00 1.9265 24 666.0 20.20 394.43 17.11 15.10
5.29305 0.00 18.100 0 0.7000 6.0510 82.50 2.1678 24 666.0 20.20 378.38 18.76 23.20
11.57790 0.00 18.100 0 0.7000 5.0360 97.00 1.7700 24 666.0 20.20 396.90 25.68 9.70
8.64476 0.00 18.100 0 0.6930 6.1930 92.60 1.7912 24 666.0 20.20 396.90 15.17 13.80
13.35980 0.00 18.100 0 0.6930 5.8870 94.70 1.7821 24 666.0 20.20 396.90 16.35 12.70
8.71675 0.00 18.100 0 0.6930 6.4710 98.80 1.7257 24 666.0 20.20 391.98 17.12 13.10
5.87205 0.00 18.100 0 0.6930 6.4050 96.00 1.6768 24 666.0 20.20 396.90 19.37 12.50
7.67202 0.00 18.100 0 0.6930 5.7470 98.90 1.6334 24 666.0 20.20 393.10 19.92 8.50
38.35180 0.00 18.100 0 0.6930 5.4530 100.00 1.4896 24 666.0 20.20 396.90 30.59 5.00
9.91655 0.00 18.100 0 0.6930 5.8520 77.80 1.5004 24 666.0 20.20 338.16 29.97 6.30
25.04610 0.00 18.100 0 0.6930 5.9870 100.00 1.5888 24 666.0 20.20 396.90 26.77 5.60
14.23620 0.00 18.100 0 0.6930 6.3430 100.00 1.5741 24 666.0 20.20 396.90 20.32 7.20
9.59571 0.00 18.100 0 0.6930 6.4040 100.00 1.6390 24 666.0 20.20 376.11 20.31 12.10
24.80170 0.00 18.100 0 0.6930 5.3490 96.00 1.7028 24 666.0 20.20 396.90 19.77 8.30
41.52920 0.00 18.100 0 0.6930 5.5310 85.40 1.6074 24 666.0 20.20 329.46 27.38 8.50
67.92080 0.00 18.100 0 0.6930 5.6830 100.00 1.4254 24 666.0 20.20 384.97 22.98 5.00
20.71620 0.00 18.100 0 0.6590 4.1380 100.00 1.1781 24 666.0 20.20 370.22 23.34 11.90
11.95110 0.00 18.100 0 0.6590 5.6080 100.00 1.2852 24 666.0 20.20 332.09 12.13 27.90
7.40389 0.00 18.100 0 0.5970 5.6170 97.90 1.4547 24 666.0 20.20 314.64 26.40 17.20
14.43830 0.00 18.100 0 0.5970 6.8520 100.00 1.4655 24 666.0 20.20 179.36 19.78 27.50
51.13580 0.00 18.100 0 0.5970 5.7570 100.00 1.4130 24 666.0 20.20 2.60 10.11 15.00
14.05070 0.00 18.100 0 0.5970 6.6570 100.00 1.5275 24 666.0 20.20 35.05 21.22 17.20
18.81100 0.00 18.100 0 0.5970 4.6280 100.00 1.5539 24 666.0 20.20 28.79 34.37 17.90
28.65580 0.00 18.100 0 0.5970 5.1550 100.00 1.5894 24 666.0 20.20 210.97 20.08 16.30
45.74610 0.00 18.100 0 0.6930 4.5190 100.00 1.6582 24 666.0 20.20 88.27 36.98 7.00
18.08460 0.00 18.100 0 0.6790 6.4340 100.00 1.8347 24 666.0 20.20 27.25 29.05 7.20
10.83420 0.00 18.100 0 0.6790 6.7820 90.80 1.8195 24 666.0 20.20 21.57 25.79 7.50
25.94060 0.00 18.100 0 0.6790 5.3040 89.10 1.6475 24 666.0 20.20 127.36 26.64 10.40
73.53410 0.00 18.100 0 0.6790 5.9570 100.00 1.8026 24 666.0 20.20 16.45 20.62 8.80
11.81230 0.00 18.100 0 0.7180 6.8240 76.50 1.7940 24 666.0 20.20 48.45 22.74 8.40
11.08740 0.00 18.100 0 0.7180 6.4110 100.00 1.8589 24 666.0 20.20 318.75 15.02 16.70
7.02259 0.00 18.100 0 0.7180 6.0060 95.30 1.8746 24 666.0 20.20 319.98 15.70 14.20
12.04820 0.00 18.100 0 0.6140 5.6480 87.60 1.9512 24 666.0 20.20 291.55 14.10 20.80
7.05042 0.00 18.100 0 0.6140 6.1030 85.10 2.0218 24 666.0 20.20 2.52 23.29 13.40
8.79212 0.00 18.100 0 0.5840 5.5650 70.60 2.0635 24 666.0 20.20 3.65 17.16 11.70
15.86030 0.00 18.100 0 0.6790 5.8960 95.40 1.9096 24 666.0 20.20 7.68 24.39 8.30
12.24720 0.00 18.100 0 0.5840 5.8370 59.70 1.9976 24 666.0 20.20 24.65 15.69 10.20
37.66190 0.00 18.100 0 0.6790 6.2020 78.70 1.8629 24 666.0 20.20 18.82 14.52 10.90
7.36711 0.00 18.100 0 0.6790 6.1930 78.10 1.9356 24 666.0 20.20 96.73 21.52 11.00
9.33889 0.00 18.100 0 0.6790 6.3800 95.60 1.9682 24 666.0 20.20 60.72 24.08 9.50
8.49213 0.00 18.100 0 0.5840 6.3480 86.10 2.0527 24 666.0 20.20 83.45 17.64 14.50
10.06230 0.00 18.100 0 0.5840 6.8330 94.30 2.0882 24 666.0 20.20 81.33 19.69 14.10
6.44405 0.00 18.100 0 0.5840 6.4250 74.80 2.2004 24 666.0 20.20 97.95 12.03 16.10
5.58107 0.00 18.100 0 0.7130 6.4360 87.90 2.3158 24 666.0 20.20 100.19 16.22 14.30
13.91340 0.00 18.100 0 0.7130 6.2080 95.00 2.2222 24 666.0 20.20 100.63 15.17 11.70
11.16040 0.00 18.100 0 0.7400 6.6290 94.60 2.1247 24 666.0 20.20 109.85 23.27 13.40
14.42080 0.00 18.100 0 0.7400 6.4610 93.30 2.0026 24 666.0 20.20 27.49 18.05 9.60
15.17720 0.00 18.100 0 0.7400 6.1520 100.00 1.9142 24 666.0 20.20 9.32 26.45 8.70
13.67810 0.00 18.100 0 0.7400 5.9350 87.90 1.8206 24 666.0 20.20 68.95 34.02 8.40
9.39063 0.00 18.100 0 0.7400 5.6270 93.90 1.8172 24 666.0 20.20 396.90 22.88 12.80
22.05110 0.00 18.100 0 0.7400 5.8180 92.40 1.8662 24 666.0 20.20 391.45 22.11 10.50
9.72418 0.00 18.100 0 0.7400 6.4060 97.20 2.0651 24 666.0 20.20 385.96 19.52 17.10
5.66637 0.00 18.100 0 0.7400 6.2190 100.00 2.0048 24 666.0 20.20 395.69 16.59 18.40
9.96654 0.00 18.100 0 0.7400 6.4850 100.00 1.9784 24 666.0 20.20 386.73 18.85 15.40
12.80230 0.00 18.100 0 0.7400 5.8540 96.60 1.8956 24 666.0 20.20 240.52 23.79 10.80
10.67180 0.00 18.100 0 0.7400 6.4590 94.80 1.9879 24 666.0 20.20 43.06 23.98 11.80
6.28807 0.00 18.100 0 0.7400 6.3410 96.40 2.0720 24 666.0 20.20 318.01 17.79 14.90
9.92485 0.00 18.100 0 0.7400 6.2510 96.60 2.1980 24 666.0 20.20 388.52 16.44 12.60
9.32909 0.00 18.100 0 0.7130 6.1850 98.70 2.2616 24 666.0 20.20 396.90 18.13 14.10
7.52601 0.00 18.100 0 0.7130 6.4170 98.30 2.1850 24 666.0 20.20 304.21 19.31 13.00
6.71772 0.00 18.100 0 0.7130 6.7490 92.60 2.3236 24 666.0 20.20 0.32 17.44 13.40
5.44114 0.00 18.100 0 0.7130 6.6550 98.20 2.3552 24 666.0 20.20 355.29 17.73 15.20
5.09017 0.00 18.100 0 0.7130 6.2970 91.80 2.3682 24 666.0 20.20 385.09 17.27 16.10
8.24809 0.00 18.100 0 0.7130 7.3930 99.30 2.4527 24 666.0 20.20 375.87 16.74 17.80
9.51363 0.00 18.100 0 0.7130 6.7280 94.10 2.4961 24 666.0 20.20 6.68 18.71 14.90
4.75237 0.00 18.100 0 0.7130 6.5250 86.50 2.4358 24 666.0 20.20 50.92 18.13 14.10
4.66883 0.00 18.100 0 0.7130 5.9760 87.90 2.5806 24 666.0 20.20 10.48 19.01 12.70
8.20058 0.00 18.100 0 0.7130 5.9360 80.30 2.7792 24 666.0 20.20 3.50 16.94 13.50
7.75223 0.00 18.100 0 0.7130 6.3010 83.70 2.7831 24 666.0 20.20 272.21 16.23 14.90
6.80117 0.00 18.100 0 0.7130 6.0810 84.40 2.7175 24 666.0 20.20 396.90 14.70 20.00
4.81213 0.00 18.100 0 0.7130 6.7010 90.00 2.5975 24 666.0 20.20 255.23 16.42 16.40
3.69311 0.00 18.100 0 0.7130 6.3760 88.40 2.5671 24 666.0 20.20 391.43 14.65 17.70
6.65492 0.00 18.100 0 0.7130 6.3170 83.00 2.7344 24 666.0 20.20 396.90 13.99 19.50
5.82115 0.00 18.100 0 0.7130 6.5130 89.90 2.8016 24 666.0 20.20 393.82 10.29 20.20
7.83932 0.00 18.100 0 0.6550 6.2090 65.40 2.9634 24 666.0 20.20 396.90 13.22 21.40
3.16360 0.00 18.100 0 0.6550 5.7590 48.20 3.0665 24 666.0 20.20 334.40 14.13 19.90
3.77498 0.00 18.100 0 0.6550 5.9520 84.70 2.8715 24 666.0 20.20 22.01 17.15 19.00
4.42228 0.00 18.100 0 0.5840 6.0030 94.50 2.5403 24 666.0 20.20 331.29 21.32 19.10
15.57570 0.00 18.100 0 0.5800 5.9260 71.00 2.9084 24 666.0 20.20 368.74 18.13 19.10
13.07510 0.00 18.100 0 0.5800 5.7130 56.70 2.8237 24 666.0 20.20 396.90 14.76 20.10
4.34879 0.00 18.100 0 0.5800 6.1670 84.00 3.0334 24 666.0 20.20 396.90 16.29 19.90
4.03841 0.00 18.100 0 0.5320 6.2290 90.70 3.0993 24 666.0 20.20 395.33 12.87 19.60
3.56868 0.00 18.100 0 0.5800 6.4370 75.00 2.8965 24 666.0 20.20 393.37 14.36 23.20
4.64689 0.00 18.100 0 0.6140 6.9800 67.60 2.5329 24 666.0 20.20 374.68 11.66 29.80
8.05579 0.00 18.100 0 0.5840 5.4270 95.40 2.4298 24 666.0 20.20 352.58 18.14 13.80
6.39312 0.00 18.100 0 0.5840 6.1620 97.40 2.2060 24 666.0 20.20 302.76 24.10 13.30
4.87141 0.00 18.100 0 0.6140 6.4840 93.60 2.3053 24 666.0 20.20 396.21 18.68 16.70
15.02340 0.00 18.100 0 0.6140 5.3040 97.30 2.1007 24 666.0 20.20 349.48 24.91 12.00
10.23300 0.00 18.100 0 0.6140 6.1850 96.70 2.1705 24 666.0 20.20 379.70 18.03 14.60
14.33370 0.00 18.100 0 0.6140 6.2290 88.00 1.9512 24 666.0 20.20 383.32 13.11 21.40
5.82401 0.00 18.100 0 0.5320 6.2420 64.70 3.4242 24 666.0 20.20 396.90 10.74 23.00
5.70818 0.00 18.100 0 0.5320 6.7500 74.90 3.3317 24 666.0 20.20 393.07 7.74 23.70
5.73116 0.00 18.100 0 0.5320 7.0610 77.00 3.4106 24 666.0 20.20 395.28 7.01 25.00
2.81838 0.00 18.100 0 0.5320 5.7620 40.30 4.0983 24 666.0 20.20 392.92 10.42 21.80
2.37857 0.00 18.100 0 0.5830 5.8710 41.90 3.7240 24 666.0 20.20 370.73 13.34 20.60
3.67367 0.00 18.100 0 0.5830 6.3120 51.90 3.9917 24 666.0 20.20 388.62 10.58 21.20
5.69175 0.00 18.100 0 0.5830 6.1140 79.80 3.5459 24 666.0 20.20 392.68 14.98 19.10
4.83567 0.00 18.100 0 0.5830 5.9050 53.20 3.1523 24 666.0 20.20 388.22 11.45 20.60
0.15086 0.00 27.740 0 0.6090 5.4540 92.70 1.8209 4 711.0 20.10 395.09 18.06 15.20
0.18337 0.00 27.740 0 0.6090 5.4140 98.30 1.7554 4 711.0 20.10 344.05 23.97 7.00
0.20746 0.00 27.740 0 0.6090 5.0930 98.00 1.8226 4 711.0 20.10 318.43 29.68 8.10
0.10574 0.00 27.740 0 0.6090 5.9830 98.80 1.8681 4 711.0 20.10 390.11 18.07 13.60
0.11132 0.00 27.740 0 0.6090 5.9830 83.50 2.1099 4 711.0 20.10 396.90 13.35 20.10
0.17331 0.00 9.690 0 0.5850 5.7070 54.00 2.3817 6 391.0 19.20 396.90 12.01 21.80
0.27957 0.00 9.690 0 0.5850 5.9260 42.60 2.3817 6 391.0 19.20 396.90 13.59 24.50
0.17899 0.00 9.690 0 0.5850 5.6700 28.80 2.7986 6 391.0 19.20 393.29 17.60 23.10
0.28960 0.00 9.690 0 0.5850 5.3900 72.90 2.7986 6 391.0 19.20 396.90 21.14 19.70
0.26838 0.00 9.690 0 0.5850 5.7940 70.60 2.8927 6 391.0 19.20 396.90 14.10 18.30
0.23912 0.00 9.690 0 0.5850 6.0190 65.30 2.4091 6 391.0 19.20 396.90 12.92 21.20
0.17783 0.00 9.690 0 0.5850 5.5690 73.50 2.3999 6 391.0 19.20 395.77 15.10 17.50
0.22438 0.00 9.690 0 0.5850 6.0270 79.70 2.4982 6 391.0 19.20 396.90 14.33 16.80
0.06263 0.00 11.930 0 0.5730 6.5930 69.10 2.4786 1 273.0 21.00 391.99 9.67 22.40
0.04527 0.00 11.930 0 0.5730 6.1200 76.70 2.2875 1 273.0 21.00 396.90 9.08 20.60
0.06076 0.00 11.930 0 0.5730 6.9760 91.00 2.1675 1 273.0 21.00 396.90 5.64 23.90
0.10959 0.00 11.930 0 0.5730 6.7940 89.30 2.3889 1 273.0 21.00 393.45 6.48 22.00
0.04741 0.00 11.930 0 0.5730 6.0300 80.80 2.5050 1 273.0 21.00 396.90 7.88 11.90

View file

@ -0,0 +1,21 @@
# Copyright (c) 2015, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import abc
import numpy as np
class Evaluation(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def evaluate(self, gt, pred):
"""Compute a scalar for access the performance"""
return None
class RMSE(Evaluation):
"Rooted Mean Square Error"
name = 'RMSE'
def evaluate(self, gt, pred):
return np.sqrt(np.square(gt-pred).astype(np.float).mean())

View file

@ -0,0 +1,109 @@
# Copyright (c) 2015, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import abc
import numpy as np
import GPy
class RegressionMethod(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self.preprocess = True
def _preprocess(self, data, train):
"""Zero-mean, unit-variance normalization by default"""
if train:
inputs, labels = data
self.data_mean = inputs.mean(axis=0)
self.data_std = inputs.std(axis=0)
self.labels_mean = labels.mean(axis=0)
self.labels_std = labels.std(axis=0)
return ((inputs-self.data_mean)/self.data_std, (labels-self.labels_mean)/self.labels_std)
else:
return (data-self.data_mean)/self.data_std
def _reverse_trans_labels(self, labels):
return labels*self.labels_std+self.labels_mean
def fit(self, train_data):
if self.preprocess:
train_data = self._preprocess(train_data, True)
return self._fit(train_data)
def predict(self, test_data):
if self.preprocess:
test_data = self._preprocess(test_data, False)
labels = self._predict(test_data)
if self.preprocess:
labels = self._reverse_trans_labels(labels)
return labels
@abc.abstractmethod
def _fit(self, train_data):
"""Fit the model. Return True if successful"""
return True
@abc.abstractmethod
def _predict(self, test_data):
"""Predict on test data"""
return None
class GP_RBF(RegressionMethod):
name = 'GP_RBF'
def _fit(self, train_data):
inputs, labels = train_data
self.model = GPy.models.GPRegression(inputs, labels,kernel=GPy.kern.RBF(inputs.shape[-1],ARD=True) +GPy.kern.Linear(inputs.shape[1], ARD=True) )
self.model.likelihood.variance[:] = labels.var()*0.01
self.model.optimize()
return True
def _predict(self, test_data):
return self.model.predict(test_data)[0]
class SparseGP_RBF(RegressionMethod):
name = 'SparseGP_RBF'
def _fit(self, train_data):
inputs, labels = train_data
self.model = GPy.models.SparseGPRegression(inputs, labels,kernel=GPy.kern.RBF(inputs.shape[-1],ARD=True) +GPy.kern.Linear(inputs.shape[1], ARD=True) ,num_inducing=100)
self.model.likelihood.variance[:] = labels.var()*0.01
self.model.optimize()
return True
def _predict(self, test_data):
return self.model.predict(test_data)[0]
# class MRD_RBF(RegressionMethod):
# name = 'MRD_RBF'
#
# def _fit(self, train_data):
# inputs, labels = train_data
# Q = 5
# self.model = GPy.models.MRD([inputs, labels],Q,kernel=GPy.kern.RBF(Q,ARD=True),num_inducing=50)
# self.model.Y0.likelihood.variance[:] = inputs.var()*0.01
# self.model.Y1.likelihood.variance[:] = labels.var()*0.01
# self.model.optimize()
# return True
#
# def _predict(self, test_data):
# return self.model.predict(self.model.Y0.infer_newX(test_data)[0])[0]
class SVIGP_RBF(RegressionMethod):
name = 'SVIGP_RBF'
def _fit(self, train_data):
X, Y = train_data
Z = X[np.random.permutation(X.shape[0])[:100]]
k = GPy.kern.RBF(X.shape[1], ARD=True) + GPy.kern.Linear(X.shape[1], ARD=True) + GPy.kern.White(X.shape[1],0.01)
lik = GPy.likelihoods.StudentT(deg_free=3.)
self.model = GPy.core.SVGP(X, Y, Z=Z, kernel=k, likelihood=lik)
[self.model.optimize('scg', max_iters=40, gtol=0, messages=0, xtol=0, ftol=0) for i in range(10)]
self.model.optimize('bfgs', max_iters=1000, gtol=0, messages=0)
return True
def _predict(self, test_data):
return self.model.predict(test_data)[0]

View file

@ -0,0 +1,64 @@
# Copyright (c) 2015, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
import abc
import os
import numpy as np
class Output(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def output(self, config, results):
"""Return the test data: training data and labels"""
return None
class ScreenOutput(Output):
def output(self, config, results):
print('='*10+'Report'+'='*10)
print('\t'.join([' ']+[m.name+'('+e+')' for m in config['methods'] for e in [a.name for a in config['evaluations']]+['time']]))
for task_i in range(len(config['tasks'])):
print(config['tasks'][task_i].name+'\t', end='')
outputs = []
for method_i in range(len(config['methods'])):
for ei in range(len(config['evaluations'])+1):
m,s = results[task_i, method_i, ei].mean(), results[task_i, method_i, ei].std()
outputs.append('%e(%e)'%(m,s))
print('\t'.join(outputs))
class CSVOutput(Output):
def __init__(self, outpath, prjname):
self.fname = os.path.join(outpath, prjname+'.csv')
def output(self, config, results):
with open(self.fname,'w') as f:
f.write(','.join([' ']+[m.name+'('+e+')' for m in config['methods'] for e in [a.name for a in config['evaluations']]+['time']])+'\n')
for task_i in range(len(config['tasks'])):
f.write(config['tasks'][task_i].name+',')
outputs = []
for method_i in range(len(config['methods'])):
for ei in range(len(config['evaluations'])+1):
m,s = results[task_i, method_i, ei].mean(), results[task_i, method_i, ei].std()
outputs.append('%e (%e)'%(m,s))
f.write(','.join(outputs)+'\n')
f.close()
class H5Output(Output):
def __init__(self, outpath, prjname):
self.fname = os.path.join(outpath, prjname+'.h5')
def output(self, config, results):
try:
import h5py
f = h5py.File(self.fname,'w')
d = f.create_dataset('results',results.shape, dtype=results.dtype)
d[:] = results
f.close()
except:
raise 'Fails to write the parameters into a HDF5 file!'

View file

@ -0,0 +1,53 @@
# Copyright (c) 2015, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
from evaluation import RMSE
from methods import GP_RBF, SVIGP_RBF, SparseGP_RBF
from tasks import Housing, WineQuality
from outputs import ScreenOutput, CSVOutput, H5Output
import numpy as np
import time
outpath = '.'
prjname = 'regression'
config = {
'evaluations':[RMSE],
'methods':[GP_RBF, SVIGP_RBF, SparseGP_RBF],
'tasks':[WineQuality,Housing],
'repeats':2,
'outputs': [ScreenOutput(), CSVOutput(outpath, prjname), H5Output(outpath, prjname)]
}
if __name__=='__main__':
results = np.zeros((len(config['tasks']), len(config['methods']), len(config['evaluations'])+1, config['repeats']))
for task_i in range(len(config['tasks'])):
dataset = config['tasks'][task_i]()
print('Benchmarking on '+dataset.name)
res = dataset.load_data()
if not res: print('Fail to load '+config['tasks'][task_i].name); continue
train = dataset.get_training_data()
test = dataset.get_test_data()
for method_i in range(len(config['methods'])):
method = config['methods'][method_i]
print('With the method '+method.name, end='')
for ri in range(config['repeats']):
m = method()
t_st = time.time()
m.fit(train)
pred = m.predict(test[0])
t_pd = time.time() - t_st
for ei in range(len(config['evaluations'])):
evalu = config['evaluations'][ei]()
results[task_i, method_i, ei, ri] = evalu.evaluate(test[1], pred)
results[task_i, method_i, -1, ri] = t_pd
print('.',end='')
print()
[out.output(config, results) for out in config['outputs']]

View file

@ -0,0 +1,86 @@
# Copyright (c) 2015, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import abc
import os
import numpy as np
class RegressionTask(object):
__metaclass__ = abc.ABCMeta
def __init__(self, datapath='./'):
self.datapath = datapath
@abc.abstractmethod
def load_data(self):
"""Download the dataset if not exist. Return True if successful"""
return True
@abc.abstractmethod
def get_training_data(self):
"""Return the training data: training data and labels"""
return None
@abc.abstractmethod
def get_test_data(self):
"""Return the test data: training data and labels"""
return None
class Housing(RegressionTask):
name='Housing'
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data"
filename = 'housing.data'
def load_data(self):
from GPy.util.datasets import download_url, data_path
if not os.path.exists(os.path.join(data_path,self.datapath, self.filename)):
download_url(Housing.url, self.datapath, messages=True)
if not os.path.exists(os.path.join(data_path, self.datapath, self.filename)):
return False
data = np.loadtxt(os.path.join(data_path, self.datapath, self.filename))
self.data = data
data_train = data[:250,:-1]
label_train = data[:250, -1:]
self.train = (data_train, label_train)
data_test = data[250:,:-1]
label_test = data[250:,-1:]
self.test = (data_test, label_test)
return True
def get_training_data(self):
return self.train
def get_test_data(self):
return self.test
class WineQuality(RegressionTask):
name='WineQuality'
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
filename = 'winequality-red.csv'
def load_data(self):
from GPy.util.datasets import download_url, data_path
if not os.path.exists(os.path.join(data_path,self.datapath, self.filename)):
download_url(self.url, self.datapath, messages=True)
if not os.path.exists(os.path.join(data_path, self.datapath, self.filename)):
return False
data = np.loadtxt(os.path.join(data_path, self.datapath, self.filename),skiprows=1,delimiter=';')
self.data = data
data_train = data[:1000,:-1]
label_train = data[:1000, -1:]
self.train = (data_train, label_train)
data_test = data[1000:,:-1]
label_test = data[1000:,-1:]
self.test = (data_test, label_test)
return True
def get_training_data(self):
return self.train
def get_test_data(self):
return self.test

View file

@ -12,18 +12,31 @@ version = '0.6.1'
def read(fname): def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read() return open(os.path.join(os.path.dirname(__file__), fname)).read()
#compile_flags = ["-march=native", '-fopenmp', '-O3', ] #Mac OS X Clang doesn't support OpenMP th the current time.
compile_flags = [ '-fopenmp', '-O3', ] #This detects if we are building on a Mac
def ismac():
platform = sys.platform
ismac = False
if platform[:6] == 'darwin':
ismac = True
return ismac
if ismac():
compile_flags = [ '-O3', ]
link_args = ['']
else:
compile_flags = [ '-fopenmp', '-O3', ]
link_args = ['-lgomp']
ext_mods = [Extension(name='GPy.kern._src.stationary_cython', ext_mods = [Extension(name='GPy.kern._src.stationary_cython',
sources=['GPy/kern/_src/stationary_cython.c','GPy/kern/_src/stationary_utils.c'], sources=['GPy/kern/_src/stationary_cython.c','GPy/kern/_src/stationary_utils.c'],
include_dirs=[np.get_include()], include_dirs=[np.get_include()],
extra_compile_args=compile_flags, extra_compile_args=compile_flags,
extra_link_args = ['-lgomp']), extra_link_args = link_args),
Extension(name='GPy.util.choleskies_cython', Extension(name='GPy.util.choleskies_cython',
sources=['GPy/util/choleskies_cython.c'], sources=['GPy/util/choleskies_cython.c'],
include_dirs=[np.get_include()], include_dirs=[np.get_include()],
extra_link_args = ['-lgomp'], extra_link_args = link_args,
extra_compile_args=compile_flags), extra_compile_args=compile_flags),
Extension(name='GPy.util.linalg_cython', Extension(name='GPy.util.linalg_cython',
sources=['GPy/util/linalg_cython.c'], sources=['GPy/util/linalg_cython.c'],