[kernel] plot_ard added (some other fixes as well)

This commit is contained in:
Max Zwiessele 2015-10-09 16:07:57 +01:00
parent e3617942d4
commit d2d8a62d2d
14 changed files with 371 additions and 337 deletions

View file

@ -1,5 +1,5 @@
# This is the local installation configuration file for GPy
[plotting]
library = plotly
#library = matplotlib
#library = plotly
library = matplotlib

View file

@ -1,35 +1,54 @@
# Copyright (c) 2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
try:
#===========================================================================
# Load in your plotting library here and
# save it under the name plotting_library!
# This is hooking the library in
# for the usage in GPy:
from ..util.config import config
lib = config.get('plotting', 'library')
if lib == 'matplotlib':
import matplotlib
from .matplot_dep import plot_definitions
plotting_library = plot_definitions.MatplotlibPlots()
if lib == 'plotly':
import plotly
from .plotly_dep import plot_definitions
plotting_library = plot_definitions.PlotlyPlots()
#===========================================================================
except (ImportError, NameError):
raise
import warnings
warnings.warn(ImportWarning("{} not available, install newest version of {} for plotting".format(lib, lib)))
config.set('plotting', 'library', 'none')
current_lib = [None]
def change_plotting_library(lib):
try:
#===========================================================================
# Load in your plotting library here and
# save it under the name plotting_library!
# This is hooking the library in
# for the usage in GPy:
if lib == 'matplotlib':
import matplotlib
from .matplot_dep.plot_definitions import MatplotlibPlots
current_lib[0] = MatplotlibPlots()
if lib == 'plotly':
import plotly
from .plotly_dep.plot_definitions import PlotlyPlots
current_lib[0] = PlotlyPlots()
if lib == 'none':
current_lib[0] = None
#===========================================================================
except (ImportError, NameError):
import warnings
warnings.warn(ImportWarning("{} not available, install newest version of {} for plotting".format(lib, lib)))
config.set('plotting', 'library', 'none')
from ..util.config import config
lib = config.get('plotting', 'library')
change_plotting_library(lib)
def plotting_library():
return current_lib[0]
def show(figure, **kwargs):
"""
Show the specific plotting library figure, returned by
add_to_canvas().
kwargs are the plotting library specific options
for showing/drawing a figure.
"""
return plotting_library().show_canvas(figure, **kwargs)
if config.get('plotting', 'library') is not 'none':
# Inject the plots into classes here:
# Already converted to new style:
from . import gpy_plot
from ..core import GP
GP.plot_data = gpy_plot.data_plots.plot_data
GP.plot_errorbars_trainset = gpy_plot.data_plots.plot_errorbars_trainset
@ -40,10 +59,10 @@ if config.get('plotting', 'library') is not 'none':
GP.plot = gpy_plot.gp_plots.plot
GP.plot_f = gpy_plot.gp_plots.plot_f
GP.plot_magnification = gpy_plot.latent_plots.plot_magnification
from ..core import SparseGP
SparseGP.plot_inducing = gpy_plot.data_plots.plot_inducing
from ..models import GPLVM, BayesianGPLVM, bayesian_gplvm_minibatch, SSGPLVM, SSMRD
GPLVM.plot_latent = gpy_plot.latent_plots.plot_latent
GPLVM.plot_scatter = gpy_plot.latent_plots.plot_latent_scatter
@ -61,11 +80,11 @@ if config.get('plotting', 'library') is not 'none':
SSGPLVM.plot_scatter = gpy_plot.latent_plots.plot_latent_scatter
SSGPLVM.plot_inducing = gpy_plot.latent_plots.plot_latent_inducing
SSGPLVM.plot_steepest_gradient_map = gpy_plot.latent_plots.plot_steepest_gradient_map
from ..kern import Kern
Kern.plot_covariance = gpy_plot.kernel_plots.plot_covariance
Kern.plot_covariance = gpy_plot.kernel_plots.plot_ARD
Kern.plot_ARD = gpy_plot.kernel_plots.plot_ARD
from ..inference.optimization import Optimizer
Optimizer.plot = gpy_plot.inference_plots.plot_optimizer
# Variational plot!
# Variational plot!

View file

@ -1,3 +1,2 @@
from .. import plotting_library
pl = plotting_library
from . import data_plots, gp_plots, latent_plots, kernel_plots, plot_util, inference_plots

View file

@ -1,21 +1,21 @@
#===============================================================================
# Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt).
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ -28,7 +28,8 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from . import pl
from . import plotting_library as pl
#from .. import gpy_plot
from .plot_util import get_x_y_var, get_free_dims, get_which_data_ycols,\
get_which_data_rows, update_not_existing_kwargs, helper_predict_with_model
@ -51,12 +52,12 @@ def plot_data(self, which_data_rows='all',
:param {'2d','3d'} projection: whether to plot in 2d or 3d. This only applies when plotting two dimensional inputs!
:param str label: the label for the plot
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
:returns list: of plots created.
"""
canvas, plot_kwargs = pl.new_canvas(projection=projection, **plot_kwargs)
canvas, plot_kwargs = pl().new_canvas(projection=projection, **plot_kwargs)
plots = _plot_data(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, label, **plot_kwargs)
return pl.add_to_canvas(canvas, plots)
return pl().add_to_canvas(canvas, plots)
def _plot_data(self, canvas, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
@ -66,26 +67,26 @@ def _plot_data(self, canvas, which_data_rows='all',
X, _, Y = get_x_y_var(self)
free_dims = get_free_dims(self, visible_dims, None)
plots = {}
plots['dataplot'] = []
#one dimensional plotting
if len(free_dims) == 1:
for d in ycols:
update_not_existing_kwargs(plot_kwargs, pl.defaults.data_1d) # @UndefinedVariable
plots['dataplot'].append(pl.scatter(canvas, X[rows, free_dims], Y[rows, d], label=label, **plot_kwargs))
update_not_existing_kwargs(plot_kwargs, pl().defaults.data_1d) # @UndefinedVariable
plots['dataplot'].append(pl().scatter(canvas, X[rows, free_dims], Y[rows, d], label=label, **plot_kwargs))
#2D plotting
elif len(free_dims) == 2:
if projection=='2d':
for d in ycols:
update_not_existing_kwargs(plot_kwargs, pl.defaults.data_2d) # @UndefinedVariable
plots['dataplot'].append(pl.scatter(canvas, X[rows, free_dims[0]], X[rows, free_dims[1]],
update_not_existing_kwargs(plot_kwargs, pl().defaults.data_2d) # @UndefinedVariable
plots['dataplot'].append(pl().scatter(canvas, X[rows, free_dims[0]], X[rows, free_dims[1]],
color=Y[rows, d], label=label, **plot_kwargs))
else:
for d in ycols:
update_not_existing_kwargs(plot_kwargs, pl.defaults.data_2d) # @UndefinedVariable
plots['dataplot'].append(pl.scatter(canvas, X[rows, free_dims[0]], X[rows, free_dims[1]],
update_not_existing_kwargs(plot_kwargs, pl().defaults.data_2d) # @UndefinedVariable
plots['dataplot'].append(pl().scatter(canvas, X[rows, free_dims[0]], X[rows, free_dims[1]],
Z=Y[rows, d], color=Y[rows, d], label=label, **plot_kwargs))
elif len(free_dims) == 0:
pass #Nothing to plot!
@ -98,7 +99,7 @@ def plot_data_error(self, which_data_rows='all',
projection='2d', label=None, **error_kwargs):
"""
Plot the training data input error.
For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed.
Can plot only part of the data
@ -114,12 +115,12 @@ def plot_data_error(self, which_data_rows='all',
:param dict error_kwargs: kwargs for the error plot for the plotting library you are using
:param str label: the label for the plot
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
:returns list: of plots created.
"""
canvas, error_kwargs = pl.new_canvas(projection=projection, **error_kwargs)
canvas, error_kwargs = pl().new_canvas(projection=projection, **error_kwargs)
plots = _plot_data_error(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, label, **error_kwargs)
return pl.add_to_canvas(canvas, plots)
return pl().add_to_canvas(canvas, plots)
def _plot_data_error(self, canvas, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
@ -129,27 +130,27 @@ def _plot_data_error(self, canvas, which_data_rows='all',
X, X_variance, Y = get_x_y_var(self)
free_dims = get_free_dims(self, visible_dims, None)
plots = {}
if X_variance is not None:
plots['xerrorplot'] = []
if X_variance is not None:
plots['xerrorplot'] = []
#one dimensional plotting
if len(free_dims) == 1:
for d in ycols:
update_not_existing_kwargs(error_kwargs, pl.defaults.xerrorbar)
plots['xerrorplot'].append(pl.xerrorbar(canvas, X[rows, free_dims].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims].flatten()), label=label,
update_not_existing_kwargs(error_kwargs, pl().defaults.xerrorbar)
plots['xerrorplot'].append(pl().xerrorbar(canvas, X[rows, free_dims].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims].flatten()), label=label,
**error_kwargs))
#2D plotting
elif len(free_dims) == 2:
update_not_existing_kwargs(error_kwargs, pl.defaults.xerrorbar) # @UndefinedVariable
update_not_existing_kwargs(error_kwargs, pl().defaults.xerrorbar) # @UndefinedVariable
for d in ycols:
plots['xerrorplot'].append(pl.xerrorbar(canvas, X[rows, free_dims[0]].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims[0]].flatten()), label=label,
plots['xerrorplot'].append(pl().xerrorbar(canvas, X[rows, free_dims[0]].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims[0]].flatten()), label=label,
**error_kwargs))
plots['yerrorplot'].append(pl.xerrorbar(canvas, X[rows, free_dims[1]].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims[1]].flatten()), label=label,
plots['yerrorplot'].append(pl().xerrorbar(canvas, X[rows, free_dims[1]].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims[1]].flatten()), label=label,
**error_kwargs))
elif len(free_dims) == 0:
pass #Nothing to plot!
@ -161,13 +162,13 @@ def _plot_data_error(self, canvas, which_data_rows='all',
def plot_inducing(self, visible_dims=None, projection='2d', label=None, **plot_kwargs):
"""
Plot the inducing inputs of a sparse gp model
:param array-like visible_dims: an array specifying the input dimensions to plot (maximum two)
:param kwargs plot_kwargs: keyword arguments for the plotting library
"""
canvas, kwargs = pl.new_canvas(projection=projection, **plot_kwargs)
canvas, kwargs = pl().new_canvas(projection=projection, **plot_kwargs)
plots = _plot_inducing(self, canvas, visible_dims, projection, label, **kwargs)
return pl.add_to_canvas(canvas, plots)
return pl().add_to_canvas(canvas, plots)
def _plot_inducing(self, canvas, visible_dims, projection, label, **plot_kwargs):
if visible_dims is None:
@ -180,15 +181,15 @@ def _plot_inducing(self, canvas, visible_dims, projection, label, **plot_kwargs)
#one dimensional plotting
if len(free_dims) == 1:
update_not_existing_kwargs(plot_kwargs, pl.defaults.inducing_1d) # @UndefinedVariable
plots['inducing'] = pl.plot_axis_lines(canvas, Z[:, free_dims], **plot_kwargs)
update_not_existing_kwargs(plot_kwargs, pl().defaults.inducing_1d) # @UndefinedVariable
plots['inducing'] = pl().plot_axis_lines(canvas, Z[:, free_dims], **plot_kwargs)
#2D plotting
elif len(free_dims) == 2 and projection == '3d':
update_not_existing_kwargs(plot_kwargs, pl.defaults.inducing_3d) # @UndefinedVariable
plots['inducing'] = pl.plot_axis_lines(canvas, Z[:, free_dims], **plot_kwargs)
update_not_existing_kwargs(plot_kwargs, pl().defaults.inducing_3d) # @UndefinedVariable
plots['inducing'] = pl().plot_axis_lines(canvas, Z[:, free_dims], **plot_kwargs)
elif len(free_dims) == 2:
update_not_existing_kwargs(plot_kwargs, pl.defaults.inducing_2d) # @UndefinedVariable
plots['inducing'] = pl.scatter(canvas, Z[:, free_dims[0]], Z[:, free_dims[1]],
update_not_existing_kwargs(plot_kwargs, pl().defaults.inducing_2d) # @UndefinedVariable
plots['inducing'] = pl().scatter(canvas, Z[:, free_dims[0]], Z[:, free_dims[1]],
**plot_kwargs)
elif len(free_dims) == 0:
pass #Nothing to plot!
@ -197,18 +198,18 @@ def _plot_inducing(self, canvas, visible_dims, projection, label, **plot_kwargs)
return plots
def plot_errorbars_trainset(self, which_data_rows='all',
which_data_ycols='all', fixed_inputs=None,
which_data_ycols='all', fixed_inputs=None,
plot_raw=False, apply_link=False, label=None, projection='2d',
predict_kw=None, **plot_kwargs):
"""
Plot the errorbars of the GP likelihood on the training data.
These are the errorbars after the appropriate
These are the errorbars after the appropriate
approximations according to the likelihood are done.
This also works for heteroscedastic likelihoods.
Give the Y_metadata in the predict_kw if you need it.
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice self.X, self.Y
:param which_data_ycols: when the data has several columns (independant outputs), only plot these
@ -217,13 +218,13 @@ def plot_errorbars_trainset(self, which_data_rows='all',
:param dict predict_kwargs: kwargs for the prediction used to predict the right quantiles.
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
"""
canvas, kwargs = pl.new_canvas(projection=projection, **plot_kwargs)
plots = _plot_errorbars_trainset(self, canvas, which_data_rows, which_data_ycols,
canvas, kwargs = pl().new_canvas(projection=projection, **plot_kwargs)
plots = _plot_errorbars_trainset(self, canvas, which_data_rows, which_data_ycols,
fixed_inputs, plot_raw, apply_link, label, projection, predict_kw, **kwargs)
return pl.add_to_canvas(canvas, plots)
return pl().add_to_canvas(canvas, plots)
def _plot_errorbars_trainset(self, canvas,
which_data_rows='all', which_data_ycols='all',
def _plot_errorbars_trainset(self, canvas,
which_data_rows='all', which_data_ycols='all',
fixed_inputs=None,
plot_raw=False, apply_link=False,
label=None, projection='2d', predict_kw=None, **plot_kwargs):
@ -232,43 +233,43 @@ def _plot_errorbars_trainset(self, canvas,
rows = get_which_data_rows(self, which_data_rows)
X, _, Y = get_x_y_var(self)
if fixed_inputs is None:
fixed_inputs = []
free_dims = get_free_dims(self, None, fixed_inputs)
free_dims = get_free_dims(self, None, fixed_inputs)
Xgrid = X.copy()
for i, v in fixed_inputs:
Xgrid[:, i] = v
plots = []
if len(free_dims)<=2:
update_not_existing_kwargs(plot_kwargs, pl.defaults.yerrorbar)
update_not_existing_kwargs(plot_kwargs, pl().defaults.yerrorbar)
if predict_kw is None:
predict_kw = {}
if 'Y_metadata' not in predict_kw:
predict_kw['Y_metadata'] = self.Y_metadata or {}
mu, percs, _ = helper_predict_with_model(self, Xgrid, plot_raw,
apply_link, (2.5, 97.5),
mu, percs, _ = helper_predict_with_model(self, Xgrid, plot_raw,
apply_link, (2.5, 97.5),
ycols, predict_kw)
if len(free_dims)==1:
for d in ycols:
plots.append(pl.yerrorbar(canvas, X[rows,free_dims[0]], mu[rows,d],
plots.append(pl().yerrorbar(canvas, X[rows,free_dims[0]], mu[rows,d],
np.vstack([mu[rows, d] - percs[0][rows, d], percs[1][rows, d] - mu[rows,d]]),
label=label,
label=label,
**plot_kwargs))
elif len(free_dims) == 2:
for d in ycols:
plots.append(pl.yerrorbar(canvas, X[rows,free_dims[0]], X[rows,free_dims[1]],
plots.append(pl().yerrorbar(canvas, X[rows,free_dims[0]], X[rows,free_dims[1]],
np.vstack([mu[rows, d] - percs[0][rows, d], percs[1][rows, d] - mu[rows,d]]),
color=Y[rows,d],
label=label,
label=label,
**plot_kwargs))
plots.append(pl.xerrorbar(canvas, X[rows,free_dims[0]], X[rows,free_dims[1]],
plots.append(pl().xerrorbar(canvas, X[rows,free_dims[0]], X[rows,free_dims[1]],
np.vstack([mu[rows, d] - percs[0][rows, d], percs[1][rows, d] - mu[rows,d]]),
color=Y[rows,d],
label=label,
label=label,
**plot_kwargs))
pass #Nothing to plot!
else:

View file

@ -1,21 +1,21 @@
#===============================================================================
# Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt).
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ -30,7 +30,7 @@
import numpy as np
from . import pl
from . import plotting_library as pl
from .plot_util import helper_for_plot_data, update_not_existing_kwargs, \
helper_predict_with_model, get_which_data_ycols
from .data_plots import _plot_data, _plot_inducing, _plot_data_error
@ -47,8 +47,8 @@ def plot_mean(self, plot_limits=None, fixed_inputs=None,
Plot the mean of the GP.
Give the Y_metadata in the predict_kw if you need it.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
@ -57,22 +57,22 @@ def plot_mean(self, plot_limits=None, fixed_inputs=None,
:param bool plot_raw: plot the latent function (usually denoted f) only?
:param bool apply_link: whether to apply the link function of the GP to the raw prediction.
:param array-like which_data_ycols: which columns of y to plot (array-like or list of ints)
:param int levels: for 2D plotting, the number of contour levels to use is
:param int levels: for 2D plotting, the number of contour levels to use is
:param {'2d','3d'} projection: whether to plot in 2d or 3d. This only applies when plotting two dimensional inputs!
:param str label: the label for the plot.
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
"""
canvas, kwargs = pl.new_canvas(projection=projection, **kwargs)
canvas, kwargs = pl().new_canvas(projection=projection, **kwargs)
helper_data = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw,
apply_link, None,
get_which_data_ycols(self, which_data_ycols),
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw,
apply_link, None,
get_which_data_ycols(self, which_data_ycols),
predict_kw)
plots = _plot_mean(self, canvas, helper_data, helper_prediction,
plots = _plot_mean(self, canvas, helper_data, helper_prediction,
levels, projection, label, **kwargs)
return pl.add_to_canvas(canvas, plots)
return pl().add_to_canvas(canvas, plots)
def _plot_mean(self, canvas, helper_data, helper_prediction,
def _plot_mean(self, canvas, helper_data, helper_prediction,
levels=20, projection='2d', label=None,
**kwargs):
@ -81,31 +81,31 @@ def _plot_mean(self, canvas, helper_data, helper_prediction,
mu, _, _ = helper_prediction
if len(free_dims)==1:
# 1D plotting:
update_not_existing_kwargs(kwargs, pl.defaults.meanplot_1d) # @UndefinedVariable
plots = dict(gpmean=[pl.plot(canvas, Xgrid[:, free_dims], mu, label=label, **kwargs)])
update_not_existing_kwargs(kwargs, pl().defaults.meanplot_1d) # @UndefinedVariable
plots = dict(gpmean=[pl().plot(canvas, Xgrid[:, free_dims], mu, label=label, **kwargs)])
else:
if projection == '2d':
update_not_existing_kwargs(kwargs, pl.defaults.meanplot_2d) # @UndefinedVariable
plots = dict(gpmean=[pl.contour(canvas, x[:,0], y[0,:],
mu.reshape(resolution, resolution),
update_not_existing_kwargs(kwargs, pl().defaults.meanplot_2d) # @UndefinedVariable
plots = dict(gpmean=[pl().contour(canvas, x[:,0], y[0,:],
mu.reshape(resolution, resolution),
levels=levels, label=label, **kwargs)])
elif projection == '3d':
update_not_existing_kwargs(kwargs, pl.defaults.meanplot_3d) # @UndefinedVariable
plots = dict(gpmean=[pl.surface(canvas, x, y,
mu.reshape(resolution, resolution),
label=label,
update_not_existing_kwargs(kwargs, pl().defaults.meanplot_3d) # @UndefinedVariable
plots = dict(gpmean=[pl().surface(canvas, x, y,
mu.reshape(resolution, resolution),
label=label,
**kwargs)])
elif len(free_dims)==0:
pass # Nothing to plot!
else:
raise RuntimeError('Cannot plot mean in more then 2 input dimensions')
return plots
def plot_confidence(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=False,
apply_link=False, visible_dims=None,
which_data_ycols='all', label='gp confidence',
predict_kw=None,
predict_kw=None,
**kwargs):
"""
Plot the confidence interval between the percentiles lower and upper.
@ -113,7 +113,7 @@ def plot_confidence(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=
Note: Only implemented for one dimension!
Give the Y_metadata in the predict_kw if you need it.
:param float lower: the lower percentile to plot
:param float upper: the upper percentile to plot
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
@ -127,24 +127,24 @@ def plot_confidence(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=
:param array-like which_data_ycols: which columns of the output y (!) to plot (array-like or list of ints)
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
"""
canvas, kwargs = pl.new_canvas(**kwargs)
canvas, kwargs = pl().new_canvas(**kwargs)
ycols = get_which_data_ycols(self, which_data_ycols)
helper_data = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw, apply_link,
(lower, upper),
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw, apply_link,
(lower, upper),
ycols, predict_kw)
plots = _plot_confidence(self, canvas, helper_data, helper_prediction, label, **kwargs)
return pl.add_to_canvas(canvas, plots)
return pl().add_to_canvas(canvas, plots)
def _plot_confidence(self, canvas, helper_data, helper_prediction, label, **kwargs):
_, _, _, _, free_dims, Xgrid, _, _, _, _, _ = helper_data
update_not_existing_kwargs(kwargs, pl.defaults.confidence_interval) # @UndefinedVariable
update_not_existing_kwargs(kwargs, pl().defaults.confidence_interval) # @UndefinedVariable
if len(free_dims)<=1:
if len(free_dims)==1:
percs = helper_prediction[1]
fills = []
for d in range(helper_prediction[0].shape[1]):
fills.append(pl.fill_between(canvas, Xgrid[:,free_dims[0]], percs[0][:,d], percs[1][:,d], label=label, **kwargs))
fills.append(pl().fill_between(canvas, Xgrid[:,free_dims[0]], percs[0][:,d], percs[1][:,d], label=label, **kwargs))
return dict(gpconfidence=fills)
else:
pass #Nothing to plot!
@ -163,8 +163,8 @@ def plot_samples(self, plot_limits=None, fixed_inputs=None,
Plot the mean of the GP.
Give the Y_metadata in the predict_kw if you need it.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
@ -175,17 +175,17 @@ def plot_samples(self, plot_limits=None, fixed_inputs=None,
:param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints)
:param array-like which_data_ycols: which columns of y to plot (array-like or list of ints)
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
:param int levels: for 2D plotting, the number of contour levels to use is
:param int levels: for 2D plotting, the number of contour levels to use is
"""
canvas, kwargs = pl.new_canvas(projection=projection, **kwargs)
canvas, kwargs = pl().new_canvas(projection=projection, **kwargs)
ycols = get_which_data_ycols(self, which_data_ycols)
helper_data = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw, apply_link,
None,
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw, apply_link,
None,
ycols, predict_kw, samples)
plots = _plot_samples(self, canvas, helper_data, helper_prediction,
projection, label, **kwargs)
return pl.add_to_canvas(canvas, plots)
return pl().add_to_canvas(canvas, plots)
def _plot_samples(self, canvas, helper_data, helper_prediction, projection,
label, **kwargs):
@ -195,14 +195,14 @@ def _plot_samples(self, canvas, helper_data, helper_prediction, projection,
if len(free_dims)<=2:
if len(free_dims)==1:
# 1D plotting:
update_not_existing_kwargs(kwargs, pl.defaults.samples_1d) # @UndefinedVariable
return dict(gpmean=[pl.plot(canvas, Xgrid[:, free_dims], samples, label=label, **kwargs)])
update_not_existing_kwargs(kwargs, pl().defaults.samples_1d) # @UndefinedVariable
return dict(gpmean=[pl().plot(canvas, Xgrid[:, free_dims], samples, label=label, **kwargs)])
elif len(free_dims)==2 and projection=='3d':
update_not_existing_kwargs(kwargs, pl.defaults.samples_3d) # @UndefinedVariable
update_not_existing_kwargs(kwargs, pl().defaults.samples_3d) # @UndefinedVariable
for s in range(samples.shape[-1]):
return dict(gpmean=[pl.surface(canvas, x,
y, samples[:, s].reshape(resolution, resolution),
**kwargs)])
return dict(gpmean=[pl().surface(canvas, x,
y, samples[:, s].reshape(resolution, resolution),
**kwargs)])
else:
pass # Nothing to plot!
else:
@ -211,10 +211,10 @@ def _plot_samples(self, canvas, helper_data, helper_prediction, projection,
def plot_density(self, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=False,
apply_link=False, visible_dims=None,
apply_link=False, visible_dims=None,
which_data_ycols='all',
levels=35, label='gp density',
predict_kw=None,
levels=35, label='gp density',
predict_kw=None,
**kwargs):
"""
Plot the confidence interval between the percentiles lower and upper.
@ -222,7 +222,7 @@ def plot_density(self, plot_limits=None, fixed_inputs=None,
Note: Only implemented for one dimension!
Give the Y_metadata in the predict_kw if you need it.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
@ -232,30 +232,30 @@ def plot_density(self, plot_limits=None, fixed_inputs=None,
:param bool apply_link: whether to apply the link function of the GP to the raw prediction.
:param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints)
:param array-like which_data_ycols: which columns of y to plot (array-like or list of ints)
:param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you.
:param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you.
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
"""
canvas, kwargs = pl.new_canvas(**kwargs)
canvas, kwargs = pl().new_canvas(**kwargs)
helper_data = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw,
apply_link, np.linspace(2.5, 97.5, levels*2),
get_which_data_ycols(self, which_data_ycols),
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw,
apply_link, np.linspace(2.5, 97.5, levels*2),
get_which_data_ycols(self, which_data_ycols),
predict_kw)
plots = _plot_density(self, canvas, helper_data, helper_prediction, label, **kwargs)
return pl.add_to_canvas(canvas, plots)
return pl().add_to_canvas(canvas, plots)
def _plot_density(self, canvas, helper_data, helper_prediction, label, **kwargs):
_, _, _, _, free_dims, Xgrid, _, _, _, _, _ = helper_data
mu, percs, _ = helper_prediction
update_not_existing_kwargs(kwargs, pl.defaults.density) # @UndefinedVariable
update_not_existing_kwargs(kwargs, pl().defaults.density) # @UndefinedVariable
if len(free_dims)<=1:
if len(free_dims)==1:
# 1D plotting:
fills = []
for d in range(mu.shape[1]):
fills.append(pl.fill_gradient(canvas, Xgrid[:, free_dims[0]], [p[:,d] for p in percs], label=label, **kwargs))
fills.append(pl().fill_gradient(canvas, Xgrid[:, free_dims[0]], [p[:,d] for p in percs], label=label, **kwargs))
return dict(gpdensity=fills)
else:
pass # Nothing to plot!
@ -263,20 +263,20 @@ def _plot_density(self, canvas, helper_data, helper_prediction, label, **kwargs)
raise RuntimeError('Can only plot density in one input dimension')
def plot(self, plot_limits=None, fixed_inputs=None,
resolution=None,
plot_raw=False, apply_link=False,
resolution=None,
plot_raw=False, apply_link=False,
which_data_ycols='all', which_data_rows='all',
visible_dims=None,
levels=20, samples=0, samples_likelihood=0, lower=2.5, upper=97.5,
visible_dims=None,
levels=20, samples=0, samples_likelihood=0, lower=2.5, upper=97.5,
plot_data=True, plot_inducing=True, plot_density=False,
predict_kw=None, projection='2d', legend=False, **kwargs):
predict_kw=None, projection='2d', legend=False, **kwargs):
"""
Convinience function for plotting the fit of a GP.
Give the Y_metadata in the predict_kw if you need it.
If you want fine graned control use the specific plotting functions supplied in the model.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v.
@ -289,7 +289,7 @@ def plot(self, plot_limits=None, fixed_inputs=None,
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice self.X, self.Y
:param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints)
:param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you.
:param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you.
:param int samples: the number of samples to draw from the GP and plot into the plot. This will allways be samples from the latent function.
:param int samples_likelihood: the number of samples to draw from the GP and apply the likelihood noise. This is usually not what you want!
:param float lower: the lower percentile to plot
@ -299,14 +299,14 @@ def plot(self, plot_limits=None, fixed_inputs=None,
:param bool plot_density: plot density instead of the confidence interval?
:param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here
"""
canvas, _ = pl.new_canvas(projection=projection, **kwargs)
canvas, _ = pl().new_canvas(projection=projection, **kwargs)
helper_data = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw,
apply_link, np.linspace(2.5, 97.5, levels*2) if plot_density else (lower,upper),
get_which_data_ycols(self, which_data_ycols),
helper_prediction = helper_predict_with_model(self, helper_data[5], plot_raw,
apply_link, np.linspace(2.5, 97.5, levels*2) if plot_density else (lower,upper),
get_which_data_ycols(self, which_data_ycols),
predict_kw, samples)
if plot_raw and not apply_link:
# It does not make sense to plot the data (which lives not in the latent function space) into latent function space.
# It does not make sense to plot the data (which lives not in the latent function space) into latent function space.
plot_data = False
plots = {}
if plot_data:
@ -314,33 +314,33 @@ def plot(self, plot_limits=None, fixed_inputs=None,
plots.update(_plot_data_error(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, "Data Error"))
plots.update(_plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_inducing, plot_density, projection))
if plot_raw and (samples_likelihood > 0):
helper_prediction = helper_predict_with_model(self, helper_data[5], False,
apply_link, None,
get_which_data_ycols(self, which_data_ycols),
helper_prediction = helper_predict_with_model(self, helper_data[5], False,
apply_link, None,
get_which_data_ycols(self, which_data_ycols),
predict_kw, samples_likelihood)
plots.update(_plot_samples(canvas, helper_data, helper_prediction, projection, "Lik Samples"))
if hasattr(self, 'Z') and plot_inducing:
plots.update(_plot_inducing(self, canvas, visible_dims, projection, 'Inducing'))
return pl.add_to_canvas(canvas, plots, legend=legend)
return pl().add_to_canvas(canvas, plots, legend=legend)
def plot_f(self, plot_limits=None, fixed_inputs=None,
resolution=None,
apply_link=False,
resolution=None,
apply_link=False,
which_data_ycols='all', which_data_rows='all',
visible_dims=None,
levels=20, samples=0, lower=2.5, upper=97.5,
visible_dims=None,
levels=20, samples=0, lower=2.5, upper=97.5,
plot_density=False,
plot_data=True, plot_inducing=True,
projection='2d', legend=False,
predict_kw=None,
**kwargs):
predict_kw=None,
**kwargs):
"""
Convinience function for plotting the fit of a GP.
This is the same as plot, except it plots the latent function fit of the GP!
If you want fine graned control use the specific plotting functions supplied in the model.
Give the Y_metadata in the predict_kw if you need it.
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
@ -354,7 +354,7 @@ def plot_f(self, plot_limits=None, fixed_inputs=None,
:param which_data_rows: which of the training data to plot (default all)
:type which_data_rows: 'all' or a slice object to slice self.X, self.Y
:param array-like visible_dims: an array specifying the input dimensions to plot (maximum two)
:param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you.
:param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you.
:param int samples: the number of samples to draw from the GP and plot into the plot. This will allways be samples from the latent function.
:param float lower: the lower percentile to plot
:param float upper: the upper percentile to plot
@ -365,17 +365,17 @@ def plot_f(self, plot_limits=None, fixed_inputs=None,
:param dict error_kwargs: kwargs for the error plot for the plotting library you are using
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
"""
plot(self, plot_limits, fixed_inputs, resolution, True,
apply_link, which_data_ycols, which_data_rows,
plot(self, plot_limits, fixed_inputs, resolution, True,
apply_link, which_data_ycols, which_data_rows,
visible_dims, levels, samples, 0,
lower, upper, plot_data, plot_inducing,
lower, upper, plot_data, plot_inducing,
plot_density, predict_kw, projection, legend)
def _plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_inducing=True, plot_density=False, projection='2d'):
def _plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_inducing=True, plot_density=False, projection='2d'):
plots.update(_plot_mean(self, canvas, helper_data, helper_prediction, levels, projection, 'Mean'))
try:
if projection=='2d':
if not plot_density:
@ -385,7 +385,7 @@ def _plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_indu
except RuntimeError:
#plotting in 2d
pass
if helper_prediction[2] is not None:
plots.update(_plot_samples(self, canvas, helper_data, helper_prediction, projection, "Samples"))
plots.update(_plot_samples(self, canvas, helper_data, helper_prediction, projection, "Samples"))
return plots

View file

@ -5,24 +5,23 @@
#import Tango
#from base_plots import gpplot, x_frame1D, x_frame2D
from . import pl
from . import plotting_library as pl
def plot_optimizer(optimizer, **kwargs):
if optimizer.trace == None:
print("No trace present so I can't plot it. Please check that the optimizer actually supplies a trace.")
else:
canvas, kwargs = pl.new_canvas(**kwargs)
plots = dict(trace=pl.plot(range(len(optimizer.trace)), optimizer.trace))
return pl.add_to_canvas(canvas, plots, xlabel='Iteration', ylabel='f(x)')
canvas, kwargs = pl().new_canvas(**kwargs)
plots = dict(trace=pl().plot(range(len(optimizer.trace)), optimizer.trace))
return pl().add_to_canvas(canvas, plots, xlabel='Iteration', ylabel='f(x)')
def plot_sgd_traces(optimizer):
figure = pl.figure(2,1)
canvas, _ = pl.new_canvas(figure, 1, 1, title="Parameters")
figure = pl().figure(2,1)
canvas, _ = pl().new_canvas(figure, 1, 1, title="Parameters")
plots = dict(lines=[])
for k in optimizer.param_traces.keys():
plots['lines'].append(pl.plot(canvas, range(len(optimizer.param_traces[k])), optimizer.param_traces[k], label=k))
pl.add_to_canvas(canvas, legend=True)
canvas, _ = pl.new_canvas(figure, 1, 2, title="Objective function")
pl.plot(canvas, range(len(optimizer.fopt_trace)), optimizer.fopt_trace)
return pl.add_to_canvas(canvas, plots, legend=True)
plots['lines'].append(pl().plot(canvas, range(len(optimizer.param_traces[k])), optimizer.param_traces[k], label=k))
pl().add_to_canvas(canvas, legend=True)
canvas, _ = pl().new_canvas(figure, 1, 2, title="Objective function")
pl().plot(canvas, range(len(optimizer.fopt_trace)), optimizer.fopt_trace)
return pl().add_to_canvas(canvas, plots, legend=True)

View file

@ -1,21 +1,21 @@
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#
# * Neither the name of GPy.plotting.gpy_plot.kernel_plots nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ -28,14 +28,14 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from . import pl
from . import plotting_library as pl
from .. import Tango
from .plot_util import get_x_y_var,\
update_not_existing_kwargs, \
helper_for_plot_data, scatter_label_generator, subsample_X,\
find_best_layout_for_subplots
def plot_ARD(kernel, filtering=None, **kwargs):
def plot_ARD(kernel, filtering=None, legend=False, **kwargs):
"""
If an ARD kernel is present, plot a bar representation using matplotlib
@ -45,11 +45,8 @@ def plot_ARD(kernel, filtering=None, **kwargs):
will be used for plotting.
:type filtering: list of names to use for ARD plot
"""
canvas, kwargs = pl.new_canvas(kwargs)
Tango.reset()
bars = []
ard_params = np.atleast_2d(kernel.input_sensitivity(summarize=False))
bottom = 0
last_bottom = bottom
@ -59,20 +56,24 @@ def plot_ARD(kernel, filtering=None, **kwargs):
if filtering is None:
filtering = kernel.parameter_names(recursive=False)
bars = []
kwargs = update_not_existing_kwargs(kwargs, pl().defaults.ard)
canvas, kwargs = pl().new_canvas(xlim=(-.5, kernel.input_dim-.5), **kwargs)
for i in range(ard_params.shape[0]):
if kernel.parameters[i].name in filtering:
c = Tango.nextMedium()
bars.append(pl.barplot(canvas, x, ard_params[i,:], color=c, label=kernel.parameters[i].name, bottom=bottom))
bars.append(pl().barplot(canvas, x,
ard_params[i,:], color=c,
label=kernel.parameters[i].name,
bottom=bottom, **kwargs))
last_bottom = ard_params[i,:]
bottom += last_bottom
else:
print("filtering out {}".format(kernel.parameters[i].name))
plt.add_to_canvas()
ax.set_xlim(-.5, kernel.input_dim - .5)
add_bar_labels(fig, ax, [bars[-1]], bottom=bottom-last_bottom)
#add_bar_labels(fig, ax, [bars[-1]], bottom=bottom-last_bottom)
return dict(barplots=bars)
return pl().add_to_canvas(canvas, bars, legend=legend)
def plot_covariance(kernel, x=None, label=None, plot_limits=None, visible_dims=None, resolution=None, projection=None, levels=20, **mpl_kwargs):
"""
@ -85,7 +86,7 @@ def plot_covariance(kernel, x=None, label=None, plot_limits=None, visible_dims=N
:resolution: the resolution of the lines used in plotting
:mpl_kwargs avalid keyword arguments to pass through to matplotlib (e.g. lw=7)
"""
canvas, error_kwargs = pl.new_canvas(projection=projection, **error_kwargs)
canvas, error_kwargs = pl().new_canvas(projection=projection, **error_kwargs)
_, _, _, _, free_dims, Xgrid, x, y, _, _, resolution = helper_for_plot_data(kernel, plot_limits, visible_dims, None, resolution)
if len(free_dims)<=2:
@ -96,22 +97,22 @@ def plot_covariance(kernel, x=None, label=None, plot_limits=None, visible_dims=N
assert x.size == 1, "The size of the fixed variable x is not 1"
x = x.reshape((1, 1))
# 1D plotting:
update_not_existing_kwargs(kwargs, pl.defaults.meanplot_1d) # @UndefinedVariable
plots = dict(covariance=[pl.plot(canvas, Xgrid[:, free_dims], mu, label=label, **kwargs)])
update_not_existing_kwargs(kwargs, pl().defaults.meanplot_1d) # @UndefinedVariable
plots = dict(covariance=[pl().plot(canvas, Xgrid[:, free_dims], mu, label=label, **kwargs)])
else:
if projection == '2d':
update_not_existing_kwargs(kwargs, pl.defaults.meanplot_2d) # @UndefinedVariable
plots = dict(covariance=[pl.contour(canvas, x, y,
mu.reshape(resolution, resolution).T,
update_not_existing_kwargs(kwargs, pl().defaults.meanplot_2d) # @UndefinedVariable
plots = dict(covariance=[pl().contour(canvas, x, y,
mu.reshape(resolution, resolution).T,
levels=levels, label=label, **kwargs)])
elif projection == '3d':
update_not_existing_kwargs(kwargs, pl.defaults.meanplot_3d) # @UndefinedVariable
plots = dict(covariance=[pl.surface(canvas, x, y,
mu.reshape(resolution, resolution).T,
label=label,
update_not_existing_kwargs(kwargs, pl().defaults.meanplot_3d) # @UndefinedVariable
plots = dict(covariance=[pl().surface(canvas, x, y,
mu.reshape(resolution, resolution).T,
label=label,
**kwargs)])
return pl.add_to_canvas(canvas, plots)
return pl().add_to_canvas(canvas, plots)
if kernel.input_dim == 1:
@ -158,5 +159,5 @@ def plot_covariance(kernel, x=None, label=None, plot_limits=None, visible_dims=N
ax.set_title("k(x1,x2 ; %0.1f,%0.1f)" % (x[0, 0], x[0, 1]))
else:
raise NotImplementedError("Cannot plot a kernel with more than two input dimensions")
pass

View file

@ -1,21 +1,21 @@
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#
# * Neither the name of GPy.plotting.gpy_plot.latent_plots nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ -28,7 +28,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from . import pl
from . import plotting_library as pl
from .plot_util import get_x_y_var,\
update_not_existing_kwargs, \
helper_for_plot_data, scatter_label_generator, subsample_X,\
@ -49,34 +49,34 @@ def _wait_for_updates(view, updates):
except TypeError:
# No updateable view:
pass
def _plot_latent_scatter(canvas, X, visible_dims, labels, marker, num_samples, projection='2d', **kwargs):
from .. import Tango
Tango.reset()
X, labels = subsample_X(X, labels, num_samples)
scatters = []
scatters = []
generate_colors = 'color' not in kwargs
for x, y, z, this_label, _, m in scatter_label_generator(labels, X, visible_dims, marker):
update_not_existing_kwargs(kwargs, pl.defaults.latent_scatter)
update_not_existing_kwargs(kwargs, pl().defaults.latent_scatter)
if generate_colors:
kwargs['color'] = Tango.nextMedium()
if projection == '3d':
scatters.append(pl.scatter(canvas, x, y, Z=z, marker=m, label=this_label, **kwargs))
else: scatters.append(pl.scatter(canvas, x, y, marker=m, label=this_label, **kwargs))
scatters.append(pl().scatter(canvas, x, y, Z=z, marker=m, label=this_label, **kwargs))
else: scatters.append(pl().scatter(canvas, x, y, marker=m, label=this_label, **kwargs))
return scatters
def plot_latent_scatter(self, labels=None,
def plot_latent_scatter(self, labels=None,
which_indices=None,
legend=True,
plot_limits=None,
marker='<>^vsd',
marker='<>^vsd',
num_samples=1000,
projection='2d',
**kwargs):
"""
Plot a scatter plot of the latent space.
:param array-like labels: a label for each data point (row) of the inputs
:param (int, int) which_indices: which input dimensions to plot against each other
:param bool legend: whether to plot the legend on the figure
@ -84,11 +84,11 @@ def plot_latent_scatter(self, labels=None,
:type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))
:param str marker: markers to use - cycle if more labels then markers are given
:param kwargs: the kwargs for the scatter plots
"""
"""
input_1, input_2, input_3 = sig_dims = self.get_most_significant_input_dimensions(which_indices)
canvas, kwargs = pl.new_canvas(projection=projection,
xlabel='latent dimension %i' % input_1,
canvas, kwargs = pl().new_canvas(projection=projection,
xlabel='latent dimension %i' % input_1,
ylabel='latent dimension %i' % input_2,
zlabel='latent dimension %i' % input_3, **kwargs)
X, _, _ = get_x_y_var(self)
@ -98,22 +98,22 @@ def plot_latent_scatter(self, labels=None,
else:
legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]
scatters = _plot_latent_scatter(canvas, X, sig_dims, labels, marker, num_samples, projection=projection, **kwargs)
return pl.add_to_canvas(canvas, dict(scatter=scatters), legend=legend)
return pl().add_to_canvas(canvas, dict(scatter=scatters), legend=legend)
def plot_latent_inducing(self,
def plot_latent_inducing(self,
which_indices=None,
legend=False,
plot_limits=None,
marker='^',
marker='^',
num_samples=1000,
projection='2d',
**kwargs):
"""
Plot a scatter plot of the inducing inputs.
:param array-like labels: a label for each data point (row) of the inputs
:param (int, int) which_indices: which input dimensions to plot against each other
:param bool legend: whether to plot the legend on the figure
@ -121,54 +121,54 @@ def plot_latent_inducing(self,
:type plot_limits: (xmin, xmax, ymin, ymax) or ((xmin, xmax), (ymin, ymax))
:param str marker: markers to use - cycle if more labels then markers are given
:param kwargs: the kwargs for the scatter plots
"""
"""
input_1, input_2, input_3 = sig_dims = self.get_most_significant_input_dimensions(which_indices)
if 'color' not in kwargs:
kwargs['color'] = 'white'
canvas, kwargs = pl.new_canvas(projection=projection,
xlabel='latent dimension %i' % input_1,
canvas, kwargs = pl().new_canvas(projection=projection,
xlabel='latent dimension %i' % input_1,
ylabel='latent dimension %i' % input_2,
zlabel='latent dimension %i' % input_3, **kwargs)
Z = self.Z.values
labels = np.array(['inducing'] * Z.shape[0])
scatters = _plot_latent_scatter(canvas, Z, sig_dims, labels, marker, num_samples, projection=projection, **kwargs)
return pl.add_to_canvas(canvas, dict(scatter=scatters), legend=legend)
return pl().add_to_canvas(canvas, dict(scatter=scatters), legend=legend)
def _plot_magnification(self, canvas, which_indices, Xgrid,
def _plot_magnification(self, canvas, which_indices, Xgrid,
xmin, xmax, resolution, updates,
mean=True, covariance=True,
kern=None,
mean=True, covariance=True,
kern=None,
**imshow_kwargs):
def plot_function(x):
Xtest_full = np.zeros((x.shape[0], Xgrid.shape[1]))
Xtest_full[:, which_indices] = x
mf = self.predict_magnification(Xtest_full, kern=kern, mean=mean, covariance=covariance)
return mf.reshape(resolution, resolution).T
imshow_kwargs = update_not_existing_kwargs(imshow_kwargs, pl.defaults.magnification)
imshow_kwargs = update_not_existing_kwargs(imshow_kwargs, pl().defaults.magnification)
try:
if updates:
return pl.imshow_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, **imshow_kwargs)
return pl().imshow_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, **imshow_kwargs)
else: raise NotImplementedError
except NotImplementedError:
return pl.imshow(canvas, plot_function(Xgrid[:, which_indices]), (xmin[0], xmax[0], xmin[1], xmax[1]), **imshow_kwargs)
return pl().imshow(canvas, plot_function(Xgrid[:, which_indices]), (xmin[0], xmax[0], xmin[1], xmax[1]), **imshow_kwargs)
def plot_magnification(self, labels=None, which_indices=None,
resolution=60, marker='<>^vsd', legend=True,
plot_limits=None,
updates=False,
mean=True, covariance=True,
updates=False,
mean=True, covariance=True,
kern=None, num_samples=1000,
scatter_kwargs=None, **imshow_kwargs):
"""
Plot the magnification factor of the GP on the inputs. This is the
Plot the magnification factor of the GP on the inputs. This is the
density of the GP as a gray scale.
:param array-like labels: a label for each data point (row) of the inputs
:param (int, int) which_indices: which input dimensions to plot against each other
:param int resolution: the resolution at which we predict the magnification factor
@ -180,13 +180,13 @@ def plot_magnification(self, labels=None, which_indices=None,
:param bool mean: use the mean of the Wishart embedding for the magnification factor
:param bool covariance: use the covariance of the Wishart embedding for the magnification factor
:param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction
:param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.
:param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.
:param imshow_kwargs: the kwargs for the imshow (magnification factor)
:param kwargs: the kwargs for the scatter plots
"""
input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2]
X, _, _, _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, plot_limits, which_indices, None, resolution)
canvas, imshow_kwargs = pl.new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),
X, _, _, _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, plot_limits, which_indices, None, resolution)
canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),
xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs)
if (labels is not None):
legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]
@ -195,18 +195,18 @@ def plot_magnification(self, labels=None, which_indices=None,
legend = False
scatters = _plot_latent_scatter(canvas, X, which_indices, labels, marker, num_samples, projection='2d', **scatter_kwargs or {})
view = _plot_magnification(self, canvas, which_indices[:2], Xgrid, xmin, xmax, resolution, updates, mean, covariance, kern, **imshow_kwargs)
retval = pl.add_to_canvas(canvas, dict(scatter=scatters, imshow=view),
legend=legend,
retval = pl().add_to_canvas(canvas, dict(scatter=scatters, imshow=view),
legend=legend,
)
_wait_for_updates(view, updates)
return retval
def _plot_latent(self, canvas, which_indices, Xgrid,
def _plot_latent(self, canvas, which_indices, Xgrid,
xmin, xmax, resolution, updates,
kern=None,
kern=None,
**imshow_kwargs):
def plot_function(x):
Xtest_full = np.zeros((x.shape[0], Xgrid.shape[1]))
@ -214,26 +214,26 @@ def _plot_latent(self, canvas, which_indices, Xgrid,
mf = np.log(self.predict(Xtest_full, kern=kern)[1])
return mf.reshape(resolution, resolution).T
imshow_kwargs = update_not_existing_kwargs(imshow_kwargs, pl.defaults.latent)
imshow_kwargs = update_not_existing_kwargs(imshow_kwargs, pl().defaults.latent)
try:
if updates:
return pl.imshow_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, **imshow_kwargs)
return pl().imshow_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, **imshow_kwargs)
else: raise NotImplementedError
except NotImplementedError:
return pl.imshow(canvas, plot_function(Xgrid[:, which_indices]), (xmin[0], xmax[0], xmin[1], xmax[1]), **imshow_kwargs)
return pl().imshow(canvas, plot_function(Xgrid[:, which_indices]), (xmin[0], xmax[0], xmin[1], xmax[1]), **imshow_kwargs)
def plot_latent(self, labels=None, which_indices=None,
resolution=60, legend=True,
plot_limits=None,
updates=False,
kern=None, marker='<>^vsd',
updates=False,
kern=None, marker='<>^vsd',
num_samples=1000,
scatter_kwargs=None, **imshow_kwargs):
"""
Plot the latent space of the GP on the inputs. This is the
density of the GP posterior as a grey scale and the
Plot the latent space of the GP on the inputs. This is the
density of the GP posterior as a grey scale and the
scatter plot of the input dimemsions selected by which_indices.
:param array-like labels: a label for each data point (row) of the inputs
:param (int, int) which_indices: which input dimensions to plot against each other
:param int resolution: the resolution at which we predict the magnification factor
@ -243,13 +243,13 @@ def plot_latent(self, labels=None, which_indices=None,
:param bool updates: if possible, make interactive updates using the specific library you are using
:param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction
:param str marker: markers to use - cycle if more labels then markers are given
:param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.
:param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.
:param imshow_kwargs: the kwargs for the imshow (magnification factor)
:param scatter_kwargs: the kwargs for the scatter plots
"""
input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2]
X, _, _, _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, plot_limits, which_indices, None, resolution)
canvas, imshow_kwargs = pl.new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),
X, _, _, _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, plot_limits, which_indices, None, resolution)
canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),
xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs)
if (labels is not None):
legend = find_best_layout_for_subplots(len(np.unique(labels)))[1]
@ -258,13 +258,13 @@ def plot_latent(self, labels=None, which_indices=None,
legend = False
scatters = _plot_latent_scatter(canvas, X, which_indices, labels, marker, num_samples, projection='2d', **scatter_kwargs or {})
view = _plot_latent(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, updates, kern, **imshow_kwargs)
retval = pl.add_to_canvas(canvas, dict(scatter=scatters, imshow=view), legend=legend)
retval = pl().add_to_canvas(canvas, dict(scatter=scatters, imshow=view), legend=legend)
_wait_for_updates(view, updates)
return retval
def _plot_steepest_gradient_map(self, canvas, which_indices, Xgrid,
def _plot_steepest_gradient_map(self, canvas, which_indices, Xgrid,
xmin, xmax, resolution, output_labels, updates,
kern=None, annotation_kwargs=None,
kern=None, annotation_kwargs=None,
**imshow_kwargs):
if output_labels is None:
output_labels = range(self.output_dim)
@ -274,30 +274,30 @@ def _plot_steepest_gradient_map(self, canvas, which_indices, Xgrid,
#dmu_dX = self.predictive_gradients(Xgrid, kern=kern)[0].sum(1)
argmax = np.argmax(dmu_dX, 1).astype(int)
return dmu_dX.max(1).reshape(resolution, resolution).T, np.array(output_labels)[argmax].reshape(resolution, resolution).T
annotation_kwargs = update_not_existing_kwargs(annotation_kwargs or {}, pl.defaults.annotation)
imshow_kwargs = update_not_existing_kwargs(imshow_kwargs or {}, pl.defaults.gradient)
annotation_kwargs = update_not_existing_kwargs(annotation_kwargs or {}, pl().defaults.annotation)
imshow_kwargs = update_not_existing_kwargs(imshow_kwargs or {}, pl().defaults.gradient)
try:
if updates:
return dict(annotation=pl.annotation_heatmap_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, imshow_kwargs=imshow_kwargs, **annotation_kwargs))
return dict(annotation=pl().annotation_heatmap_interact(canvas, plot_function, (xmin[0], xmax[0], xmin[1], xmax[1]), resolution=resolution, imshow_kwargs=imshow_kwargs, **annotation_kwargs))
else:
raise NotImplementedError
except NotImplementedError:
imshow, annotation = pl.annotation_heatmap(canvas, *plot_function(Xgrid[:, which_indices]), extent=(xmin[0], xmax[0], xmin[1], xmax[1]), imshow_kwargs=imshow_kwargs, **annotation_kwargs)
imshow, annotation = pl().annotation_heatmap(canvas, *plot_function(Xgrid[:, which_indices]), extent=(xmin[0], xmax[0], xmin[1], xmax[1]), imshow_kwargs=imshow_kwargs, **annotation_kwargs)
return dict(heatmap=imshow, annotation=annotation)
def plot_steepest_gradient_map(self, output_labels=None, data_labels=None, which_indices=None,
resolution=15, legend=True,
plot_limits=None,
updates=False,
kern=None, marker='<>^vsd',
updates=False,
kern=None, marker='<>^vsd',
num_samples=1000,
annotation_kwargs=None, scatter_kwargs=None, **imshow_kwargs):
"""
Plot the latent space of the GP on the inputs. This is the
density of the GP posterior as a grey scale and the
Plot the latent space of the GP on the inputs. This is the
density of the GP posterior as a grey scale and the
scatter plot of the input dimemsions selected by which_indices.
:param array-like labels: a label for each data point (row) of the inputs
:param (int, int) which_indices: which input dimensions to plot against each other
:param int resolution: the resolution at which we predict the magnification factor
@ -307,14 +307,14 @@ def plot_steepest_gradient_map(self, output_labels=None, data_labels=None, which
:param bool updates: if possible, make interactive updates using the specific library you are using
:param :py:class:`~GPy.kern.Kern` kern: the kernel to use for prediction
:param str marker: markers to use - cycle if more labels then markers are given
:param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.
:param int num_samples: the number of samples to plot maximally. We do a stratified subsample from the labels, if the number of samples (in X) is higher then num_samples.
:param imshow_kwargs: the kwargs for the imshow (magnification factor)
:param annotation_kwargs: the kwargs for the annotation plot
:param scatter_kwargs: the kwargs for the scatter plots
"""
input_1, input_2 = which_indices = self.get_most_significant_input_dimensions(which_indices)[:2]
X, _, _, _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, plot_limits, which_indices, None, resolution)
canvas, imshow_kwargs = pl.new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),
X, _, _, _, _, Xgrid, _, _, xmin, xmax, resolution = helper_for_plot_data(self, plot_limits, which_indices, None, resolution)
canvas, imshow_kwargs = pl().new_canvas(xlim=(xmin[0], xmax[0]), ylim=(xmin[1], xmax[1]),
xlabel='latent dimension %i' % input_1, ylabel='latent dimension %i' % input_2, **imshow_kwargs)
if (data_labels is not None):
legend = find_best_layout_for_subplots(len(np.unique(data_labels)))[1]
@ -323,7 +323,7 @@ def plot_steepest_gradient_map(self, output_labels=None, data_labels=None, which
legend = False
plots = dict(scatter=_plot_latent_scatter(canvas, X, which_indices, data_labels, marker, num_samples, **scatter_kwargs or {}))
plots.update(_plot_steepest_gradient_map(self, canvas, which_indices, Xgrid, xmin, xmax, resolution, output_labels, updates, kern, annotation_kwargs=annotation_kwargs, **imshow_kwargs))
retval = pl.add_to_canvas(canvas, plots, legend=legend)
retval = pl().add_to_canvas(canvas, plots, legend=legend)
_wait_for_updates(plots['annotation'], updates)
return retval

View file

@ -34,8 +34,8 @@ import itertools
def in_ipynb():
try:
get_ipython()
return True
cfg = get_ipython().config
return 'TerminalIPythonApp' not in cfg
except NameError:
return False

View file

@ -1,21 +1,21 @@
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ -34,11 +34,11 @@ from plotly.graph_objs import Line
'''
This file is for defaults for the gpy plot, specific to the plotting library.
Create a kwargs dictionary with the right name for the plotting function
Create a kwargs dictionary with the right name for the plotting function
you are implementing. If you do not provide defaults, the default behaviour of
the plotting library will be used.
the plotting library will be used.
In the code, always ise plotting.gpy_plots.defaults to get the defaults, as
In the code, always ise plotting.gpy_plots.defaults to get the defaults, as
it gives back an empty default, when defaults are not defined.
'''
@ -50,24 +50,24 @@ inducing_2d = dict(marker_kwargs=dict(size='8', opacity=.7, line=Line(width=.5,
inducing_3d = dict(marker_kwargs=dict(size='8', opacity=.7, line=Line(width=.5, color='black')), opacity=.7, color='white', marker='star-triangle-up')
xerrorbar = dict(color='black', error_kwargs=dict(thickness=.5), opacity=.5)
yerrorbar = dict(color=Tango.colorsHex['darkRed'], error_kwargs=dict(thickness=.5), opacity=.5)
#
#
# # GP plots:
meanplot_1d = dict(color=Tango.colorsHex['mediumBlue'], line_kwargs=dict(width=2))
meanplot_2d = dict(colorscale='Hot')
meanplot_3d = dict(colorscale='Hot', opacity=.8)
samples_1d = dict(color=Tango.colorsHex['mediumBlue'], line_kwargs=dict(width=.3))
samples_3d = dict(cmap='Hot', opacity=.5)
confidence_interval = dict(mode='lines', line_kwargs=dict(color=Tango.colorsHex['darkBlue'], width=.4),
confidence_interval = dict(mode='lines', line_kwargs=dict(color=Tango.colorsHex['darkBlue'], width=.4),
color=Tango.colorsHex['lightBlue'], opacity=.3)
# density = dict(alpha=.5, color=Tango.colorsHex['lightBlue'])
#
#
# # GPLVM plots:
# data_y_1d = dict(linewidth=0, cmap='RdBu', s=40)
# data_y_1d_plot = dict(color='k', linewidth=1.5)
#
#
# # Kernel plots:
# ard = dict(edgecolor='k', linewidth=1.2)
#
ard = dict(linewidth=1.2, barmode='stack')
#
# # Input plots:
latent = dict(colorscale='Greys', reversescale=True, zsmooth='best')
gradient = dict(colorscale='RdBu', opacity=.7)

View file

@ -64,7 +64,10 @@ class PlotlyPlots(AbstractPlottingLibrary):
figure = tools.make_subplots(rows, cols, specs=specs)
return figure
def new_canvas(self, canvas=None, row=1, col=1, projection='2d', xlabel=None, ylabel=None, zlabel=None, title=None, xlim=None, ylim=None, zlim=None, **kwargs):
def new_canvas(self, canvas=None, row=1, col=1, projection='2d',
xlabel=None, ylabel=None, zlabel=None,
title=None, xlim=None,
ylim=None, zlim=None, **kwargs):
#if 'filename' not in kwargs:
# print('PlotlyWarning: filename was not given, this may clutter your plotly workspace')
# filename = None

View file

@ -1,21 +1,21 @@
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ -32,7 +32,9 @@ import GPy, os
from nose import SkipTest
from ..util.config import config
from ..plotting import change_plotting_library
change_plotting_library('matplotlib')
if config.get('plotting', 'library') != 'matplotlib':
raise SkipTest("Matplotlib not installed, not testing plots")
@ -64,7 +66,7 @@ def _image_directories():
def _sequenceEqual(a, b):
assert len(a) == len(b), "Sequences not same length"
for i, [x, y], in enumerate(zip(a, b)):
assert x == y, "element not matching {}".format(i)
assert x == y, "element not matching {}".format(i)
def _notFound(path):
raise IOError('File {} not in baseline')
@ -89,7 +91,17 @@ def _image_comparison(baseline_images, extensions=['pdf','svg','ong'], tol=11):
raise ImageComparisonFailure("Error between {} and {} is {:.5f}, which is bigger then the tolerance of {:.5f}".format(actual, expected, err['rms'], tol))
yield do_test
plt.close('all')
def test_kernel():
np.random.seed(1239847)
k = GPy.kern.RBF(5, ARD=True) + GPy.kern.Linear(3, active_dims=[0,2,4], ARD=True) + GPy.kern.Bias(2)
k.randomize()
k.plot_ARD(legend=True)
for do_test in _image_comparison(
baseline_images=['kern_{}'.format(sub) for sub in ["ARD",]],
extensions=extensions):
yield (do_test, )
def test_plot():
np.random.seed(11111)
X = np.random.uniform(-2, 2, (40, 1))
@ -162,7 +174,7 @@ def test_classification():
for do_test in _image_comparison(baseline_images=['gp_class_{}'.format(sub) for sub in ["likelihood", "raw", 'raw_link']], extensions=extensions):
yield (do_test, )
def test_sparse_classification():
np.random.seed(11111)
X = np.random.uniform(-2, 2, (40, 1))
@ -218,7 +230,7 @@ def test_bayesian_gplvm():
m.plot_steepest_gradient_map(resolution=7)
for do_test in _image_comparison(baseline_images=['bayesian_gplvm_{}'.format(sub) for sub in ["inducing", "inducing_3d", "latent_3d", "magnification", 'gradient']], extensions=extensions):
yield (do_test, )
if __name__ == '__main__':
import nose
nose.main()

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.8 KiB

View file

@ -2,21 +2,21 @@
# Copyright (c) 2015, Max Zwiessele
#
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@ -31,12 +31,12 @@
#!/usr/bin/env python
import matplotlib
import matplotlib
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
matplotlib.use('agg')
matplotlib.rcParams[u'figure.figsize'] = (4,3)
matplotlib.rcParams[u'text.usetex'] = False
import nose
nose.main('GPy', defaultTest='GPy/testing/plotting_tests.py')
import nose
nose.main('GPy', defaultTest='GPy/testing/plotting_tests.py')