[plotly] todos: fill_gradient

This commit is contained in:
mzwiessele 2015-10-08 14:05:20 +01:00
parent 7f84bec6fb
commit b3154e43b4
10 changed files with 196 additions and 149 deletions

View file

@ -1,3 +1,3 @@
from .. import plotting_library as pl
from .. import plotting_library
pl = plotting_library
from . import data_plots, gp_plots, latent_plots, kernel_plots, plot_util, inference_plots

View file

@ -123,7 +123,7 @@ def plot_data_error(self, which_data_rows='all',
def _plot_data_error(self, canvas, which_data_rows='all',
which_data_ycols='all', visible_dims=None,
projection='2d', **error_kwargs):
projection='2d', label=None, **error_kwargs):
ycols = get_which_data_ycols(self, which_data_ycols)
rows = get_which_data_rows(self, which_data_rows)
@ -139,17 +139,17 @@ def _plot_data_error(self, canvas, which_data_rows='all',
for d in ycols:
update_not_existing_kwargs(error_kwargs, pl.defaults.xerrorbar)
plots['xerrorplot'].append(pl.xerrorbar(canvas, X[rows, free_dims].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims].flatten()),
2 * np.sqrt(X_variance[rows, free_dims].flatten()), label=label,
**error_kwargs))
#2D plotting
elif len(free_dims) == 2:
update_not_existing_kwargs(error_kwargs, pl.defaults.xerrorbar) # @UndefinedVariable
for d in ycols:
plots['xerrorplot'].append(pl.xerrorbar(canvas, X[rows, free_dims[0]].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims[0]].flatten()),
2 * np.sqrt(X_variance[rows, free_dims[0]].flatten()), label=label,
**error_kwargs))
plots['yerrorplot'].append(pl.xerrorbar(canvas, X[rows, free_dims[1]].flatten(), Y[rows, d].flatten(),
2 * np.sqrt(X_variance[rows, free_dims[1]].flatten()),
2 * np.sqrt(X_variance[rows, free_dims[1]].flatten()), label=label,
**error_kwargs))
elif len(free_dims) == 0:
pass #Nothing to plot!

View file

@ -40,7 +40,7 @@ def plot_mean(self, plot_limits=None, fixed_inputs=None,
apply_link=False, visible_dims=None,
which_data_ycols='all',
levels=20, projection='2d',
label=None,
label='gp mean',
predict_kw=None,
**kwargs):
"""
@ -70,8 +70,7 @@ def plot_mean(self, plot_limits=None, fixed_inputs=None,
predict_kw)
plots = _plot_mean(self, canvas, helper_data, helper_prediction,
levels, projection, label, **kwargs)
pl.add_to_canvas(canvas, plots)
return pl.show_canvas(canvas)
return pl.add_to_canvas(canvas, plots)
def _plot_mean(self, canvas, helper_data, helper_prediction,
levels=20, projection='2d', label=None,
@ -87,7 +86,7 @@ def _plot_mean(self, canvas, helper_data, helper_prediction,
else:
if projection == '2d':
update_not_existing_kwargs(kwargs, pl.defaults.meanplot_2d) # @UndefinedVariable
plots = dict(gpmean=[pl.contour(canvas, x, y,
plots = dict(gpmean=[pl.contour(canvas, x[:,0], y[0,:],
mu.reshape(resolution, resolution),
levels=levels, label=label, **kwargs)])
elif projection == '3d':
@ -105,7 +104,7 @@ def _plot_mean(self, canvas, helper_data, helper_prediction,
def plot_confidence(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=False,
apply_link=False, visible_dims=None,
which_data_ycols='all', label=None,
which_data_ycols='all', label='gp confidence',
predict_kw=None,
**kwargs):
"""
@ -157,7 +156,7 @@ def plot_samples(self, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=True,
apply_link=False, visible_dims=None,
which_data_ycols='all',
samples=3, projection='2d', label=None,
samples=3, projection='2d', label='gp_samples',
predict_kw=None,
**kwargs):
"""
@ -214,7 +213,7 @@ def plot_density(self, plot_limits=None, fixed_inputs=None,
resolution=None, plot_raw=False,
apply_link=False, visible_dims=None,
which_data_ycols='all',
levels=35, label=None,
levels=35, label='gp density',
predict_kw=None,
**kwargs):
"""
@ -270,7 +269,7 @@ def plot(self, plot_limits=None, fixed_inputs=None,
visible_dims=None,
levels=20, samples=0, samples_likelihood=0, lower=2.5, upper=97.5,
plot_data=True, plot_inducing=True, plot_density=False,
predict_kw=None, projection='2d', **kwargs):
predict_kw=None, projection='2d', legend=False, **kwargs):
"""
Convinience function for plotting the fit of a GP.
@ -311,18 +310,18 @@ def plot(self, plot_limits=None, fixed_inputs=None,
plot_data = False
plots = {}
if plot_data:
plots.update(_plot_data(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection))
plots.update(_plot_data_error(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection))
plots.update(_plot_data(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, "Data"))
plots.update(_plot_data_error(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection, "Data Error"))
plots.update(_plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_inducing, plot_density, projection))
if plot_raw and (samples_likelihood > 0):
helper_prediction = helper_predict_with_model(self, helper_data[5], False,
apply_link, None,
get_which_data_ycols(self, which_data_ycols),
predict_kw, samples_likelihood)
plots.update(_plot_samples(canvas, helper_data, helper_prediction, projection))
plots.update(_plot_samples(canvas, helper_data, helper_prediction, projection, "Lik Samples"))
if hasattr(self, 'Z') and plot_inducing:
plots.update(_plot_inducing(self, canvas, visible_dims, projection, None))
return pl.add_to_canvas(canvas, plots)
plots.update(_plot_inducing(self, canvas, visible_dims, projection, 'Inducing'))
return pl.add_to_canvas(canvas, plots, legend=legend)
def plot_f(self, plot_limits=None, fixed_inputs=None,
@ -333,7 +332,7 @@ def plot_f(self, plot_limits=None, fixed_inputs=None,
levels=20, samples=0, lower=2.5, upper=97.5,
plot_density=False,
plot_data=True, plot_inducing=True,
projection='2d',
projection='2d', legend=False,
predict_kw=None,
**kwargs):
"""
@ -366,35 +365,27 @@ def plot_f(self, plot_limits=None, fixed_inputs=None,
:param dict error_kwargs: kwargs for the error plot for the plotting library you are using
:param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
"""
canvas, _ = pl.new_canvas(projection=projection, **kwargs)
helper_data = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution)
helper_prediction = helper_predict_with_model(self, helper_data[5], True,
apply_link, np.linspace(2.5, 97.5, levels*2) if plot_density else (lower,upper),
get_which_data_ycols(self, which_data_ycols),
predict_kw, samples)
if not apply_link:
# It does not make sense to plot the data (which lives not in the latent function space) into latent function space.
plot_data = False
plots = {}
if plot_data:
plots.update(_plot_data(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection))
plots.update(_plot_data_error(self, canvas, which_data_rows, which_data_ycols, visible_dims, projection))
plots.update(_plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_inducing, plot_density, projection))
if hasattr(self, 'Z') and plot_inducing:
plots.update(_plot_inducing(self, canvas, visible_dims, projection, None))
return pl.add_to_canvas(canvas, plots)
plot(self, plot_limits, fixed_inputs, resolution, True,
apply_link, which_data_ycols, which_data_rows,
visible_dims, levels, samples, 0,
lower, upper, plot_data, plot_inducing,
plot_density, predict_kw, projection, legend)
def _plot(self, canvas, plots, helper_data, helper_prediction, levels, plot_inducing=True, plot_density=False, projection='2d'):
plots.update(_plot_mean(self, canvas, helper_data, helper_prediction, levels, projection, None))
plots.update(_plot_mean(self, canvas, helper_data, helper_prediction, levels, projection, 'Mean'))
if projection=='2d':
if not plot_density:
plots.update(_plot_confidence(self, canvas, helper_data, helper_prediction, None))
else:
plots.update(_plot_density(self, canvas, helper_data, helper_prediction, None))
try:
if projection=='2d':
if not plot_density:
plots.update(_plot_confidence(self, canvas, helper_data, helper_prediction, "Confidence"))
else:
plots.update(_plot_density(self, canvas, helper_data, helper_prediction, "Density"))
except RuntimeError:
#plotting in 2d
pass
if helper_prediction[2] is not None:
plots.update(_plot_samples(self, canvas, helper_data, helper_prediction, projection, None))
plots.update(_plot_samples(self, canvas, helper_data, helper_prediction, projection, "Samples"))
return plots

View file

@ -35,16 +35,20 @@ from .plot_util import get_x_y_var,\
find_best_layout_for_subplots
def _wait_for_updates(view, updates):
try:
if updates:
clear = raw_input('yes or enter to deactivate updates - otherwise still do updates - use plots[imshow].deactivate() to clear')
if clear.lower() in 'yes' or clear == '':
if view is not None:
try:
if updates:
clear = raw_input('yes or enter to deactivate updates - otherwise still do updates - use plots[imshow].deactivate() to clear')
if clear.lower() in 'yes' or clear == '':
view.deactivate()
else:
view.deactivate()
else:
view.deactivate()
except AttributeError:
# No updateable view:
pass
except AttributeError:
# No updateable view:
pass
except TypeError:
# No updateable view:
pass
def _plot_latent_scatter(canvas, X, visible_dims, labels, marker, num_samples, projection='2d', **kwargs):

View file

@ -282,12 +282,12 @@ def get_x_y_var(model):
:returns: (X, X_variance, Y)
"""
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
X = model.X.mean
X_variance = model.X.variance
X = model.X.mean.values
X_variance = model.X.variance.values
else:
X = model.X
X = model.X.values
X_variance = None
Y = model.Y
Y = model.Y.values
if sparse.issparse(Y): Y = Y.todense().view(np.ndarray)
return X, X_variance, Y