diff --git a/GPy/core/gp.py b/GPy/core/gp.py index ef71ea2c..01e09739 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -504,6 +504,9 @@ class GP(Model): """ return self.kern.input_sensitivity(summarize=summarize) + def get_most_significant_input_dimensions(self, which_indices=None): + return self.kern.get_most_significant_input_dimensions(which_indices) + def optimize(self, optimizer=None, start=None, **kwargs): """ Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index 0bb7f787..49f2e2fb 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -214,6 +214,36 @@ class Kern(Parameterized): """ return np.zeros(self.input_dim) + def get_most_significant_input_dimensions(self, which_indices=None): + """ + Determine which dimensions should be plotted + + :param which_indices: force the indices to be the given indices. + :type which_indices: int or tuple(int,int) + """ + if which_indices is None: + if self.input_dim == 1: + input_1 = 0 + input_2 = None + if self.input_dim == 2: + input_1, input_2 = 0, 1 + else: + try: + which_indices = np.argsort(self.input_sensitivity())[::-1][:2] + except: + raise ValueError("cannot automatically determine which dimensions to plot, please pass 'which_indices'") + try: + input_1, input_2 = which_indices + except TypeError: + # which_indices was an int + input_1, input_2 = which_indices, None + except ValueError: + # which_indices was a list or array like with only one int + input_1, input_2 = which_indices[0], None + + return input_1, input_2 + + def __add__(self, other): """ Overloading of the '+' operator. for more control, see self.add """ return self.add(other) diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index 5bef5be5..b0034663 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -44,31 +44,6 @@ class GPLVM(GP): super(GPLVM, self).parameters_changed() self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None) - #def jacobian(self,X): - # J = np.zeros((X.shape[0],X.shape[1],self.output_dim)) - # for i in range(self.output_dim): - # J[:,:,i] = self.kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1], X, self.X) - # return J - - #def magnification(self,X): - # target=np.zeros(X.shape[0]) - # #J = np.zeros((X.shape[0],X.shape[1],self.output_dim)) - ## J = self.jacobian(X) - # for i in range(X.shape[0]): - # target[i]=np.sqrt(np.linalg.det(np.dot(J[i,:,:],np.transpose(J[i,:,:])))) - # return target - - def plot(self): - assert self.Y.shape[1] == 2, "too high dimensional to plot. Try plot_latent" - from matplotlib import pyplot as plt - plt.scatter(self.Y[:, 0], - self.Y[:, 1], - 40, self.X[:, 0].copy(), - linewidth=0, cmap=plt.cm.jet) - Xnew = np.linspace(self.X.min(), self.X.max(), 200)[:, None] - mu, _ = self.predict(Xnew) - plt.plot(mu[:, 0], mu[:, 1], 'k', linewidth=1.5) - def plot_latent(self, labels=None, which_indices=None, resolution=50, ax=None, marker='o', s=40, fignum=None, legend=True, diff --git a/GPy/plotting/__init__.py b/GPy/plotting/__init__.py index 74fccfb0..beb6bf7e 100644 --- a/GPy/plotting/__init__.py +++ b/GPy/plotting/__init__.py @@ -35,23 +35,24 @@ if config.get('plotting', 'library') is not 'none': GP.plot_density = gpy_plot.gp_plots.plot_density GP.plot_samples = gpy_plot.gp_plots.plot_samples GP.plot = gpy_plot.gp_plots.plot - GP.plot_magnificaion = gpy_plot.latent_plots.plot_magnification + #GP.plot_magnificaion = gpy_plot.latent_plots.plot_magnification from ..core import SparseGP SparseGP.plot_inducing = gpy_plot.data_plots.plot_inducing - from ..core import GPLVM - GPLVM.plot_latent = gpy_plot.latent_plots.plot_latent + from ..models import GPLVM + GPLVM.plot_prediction_fit = gpy_plot.latent_plots.plot_prediction_fit + #GPLVM.plot_latent = gpy_plot.latent_plots.plot_latent from ..kern import Kern - Kern.plot_covariance = gpy_plot.kern_plots.plot_kern + #Kern.plot_covariance = gpy_plot.kern_plots.plot_kern # Variational plot! from . import matplot_dep # Still to convert to new style: - GP.plot = matplot_dep.models_plots.plot_fit - GP.plot_f = matplot_dep.models_plots.plot_fit_f + #GP.plot = matplot_dep.models_plots.plot_fit + #GP.plot_f = matplot_dep.models_plots.plot_fit_f GP.plot_magnification = matplot_dep.dim_reduction_plots.plot_magnification diff --git a/GPy/plotting/abstract_plotting_library.py b/GPy/plotting/abstract_plotting_library.py index 8911e8de..15470786 100644 --- a/GPy/plotting/abstract_plotting_library.py +++ b/GPy/plotting/abstract_plotting_library.py @@ -57,7 +57,7 @@ class AbstractPlottingLibrary(object): return self.__defaults #=============================================================================== - def get_new_canvas(self, **kwargs): + def get_new_canvas(self, plot_3d=False, **kwargs): """ Return a canvas, kwargupdate for your plotting library. @@ -65,38 +65,56 @@ class AbstractPlottingLibrary(object): and updates the kwargs (deletes the unnecessary kwargs) for further usage in normal plotting. + the kwargs are plotting library specific kwargs! + + :param bool plot_3d: whether to plot in 3d. + E.g. in matplotlib this means it deletes references to ax, as plotting is done on the axis itself and is not a kwarg. """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def show_canvas(self, canvas, plots): + def show_canvas(self, canvas, plots, xlabel=None, ylabel=None, zlabel=None, title=None, xlim=None, ylim=None, zlim=None, legend=True, **kwargs): """ Show the canvas given. - plots is either a list of plots or a dictionary with the plots + plots is a dictionary with the plots as the items. + the kwargs are plotting library specific kwargs! + + :param xlabel: the label to put on the xaxis + :param ylabel: the label to put on the yaxis + :param zlabel: the label to put on the zaxis (if plotting in 3d) + :param title: the title of the plot + :param (float, float) xlim: the limits for the xaxis + :param (float, float) ylim: the limits for the yaxis + :param (float, float) zlim: the limits for the zaxis (if plotting in 3d) + :param legend: whether to put a legend on + E.g. in matplotlib this does not have to do anything, we make the tight plot, though. """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def plot(self, cavas, X, Y, **kwargs): + def plot(self, cavas, X, Y, Z=None, color=None, label=None, **kwargs): """ Make a line plot from for Y on X (Y = f(X)) on the canvas. + If Z is not None, plot in 3d! the kwargs are plotting library specific kwargs! """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def plot_axis_lines(self, ax, X, **kwargs): + def plot_axis_lines(self, ax, X, color=None, label=None, **kwargs): """ - Plot lines at the bottom of the axis at input location X. + Plot lines at the bottom (lower boundary of yaxis) of the axis at input location X. + + If X is two dimensional, plot in 3d and connect the axis lines to the bottom of the Z axis. the kwargs are plotting library specific kwargs! """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def scatter(self, canvas, X, Y, c=None, vmin=None, vmax=None, **kwargs): + def scatter(self, canvas, X, Y, Z=None, c=None, vmin=None, vmax=None, label=None, **kwargs): """ Make a scatter plot between X and Y on the canvas given. @@ -105,13 +123,30 @@ class AbstractPlottingLibrary(object): :param canvas: the plotting librarys specific canvas to plot on. :param array-like X: the inputs to plot. :param array-like Y: the outputs to plot. + :param array-like Z: the Z level to plot (if plotting 3d). :param array-like c: the colorlevel for each point. :param float vmin: minimum colorscale :param float vmax: maximum colorscale + :param kwargs: the specific kwargs for your plotting library """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def xerrorbar(self, canvas, X, Y, error, **kwargs): + def barplot(self, canvas, x, height, width=0.8, bottom=0, color=None, label=None, **kwargs): + """ + Plot vertical bar plot centered at x with height + and width of bars. The y level is at bottom. + + the kwargs are plotting library specific kwargs! + + :param array-like x: the center points of the bars + :param array-like height: the height of the bars + :param array-like width: the width of the bars + :param array-like bottom: the start y level of the bars + :param kwargs: kwargs for the specific library you are using. + """ + raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") + + def xerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs): """ Make an errorbar along the xaxis for points at (X,Y) on the canvas. if error is two dimensional, the lower error is error[:,0] and @@ -121,7 +156,7 @@ class AbstractPlottingLibrary(object): """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def yerrorbar(self, canvas, X, Y, error, **kwargs): + def yerrorbar(self, canvas, X, Y, error, color=None, label=None, **kwargs): """ Make errorbars along the yaxis on the canvas given. if error is two dimensional, the lower error is error[:,0] and @@ -131,7 +166,7 @@ class AbstractPlottingLibrary(object): """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def imshow(self, canvas, X, **kwargs): + def imshow(self, canvas, X, label=None, color=None, **kwargs): """ Show the image stored in X on the canvas/ @@ -139,7 +174,7 @@ class AbstractPlottingLibrary(object): """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def contour(self, canvas, X, Y, C, **kwargs): + def contour(self, canvas, X, Y, C, color=None, label=None, **kwargs): """ Make a contour plot at (X, Y) with heights stored in C on the canvas. @@ -147,7 +182,7 @@ class AbstractPlottingLibrary(object): """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def fill_between(self, canvas, X, lower, upper, **kwargs): + def fill_between(self, canvas, X, lower, upper, color=None, label=None, **kwargs): """ Fill along the xaxis between lower and upper. @@ -155,7 +190,7 @@ class AbstractPlottingLibrary(object): """ raise NotImplementedError("Implement all plot functions in AbstractPlottingLibrary in order to use your own plotting library") - def fill_gradient(self, canvas, X, percentiles, **kwargs): + def fill_gradient(self, canvas, X, percentiles, color=None, label=None, **kwargs): """ Plot a gradient (in alpha values) for the given percentiles. diff --git a/GPy/plotting/matplot_dep/Tango.py b/GPy/plotting/gpy_plot/Tango.py similarity index 69% rename from GPy/plotting/matplot_dep/Tango.py rename to GPy/plotting/gpy_plot/Tango.py index 5c004519..1fa1f35d 100644 --- a/GPy/plotting/matplot_dep/Tango.py +++ b/GPy/plotting/gpy_plot/Tango.py @@ -1,29 +1,7 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) - -import matplotlib as mpl -from matplotlib import pyplot as pb import sys -#sys.path.append('/home/james/mlprojects/sitran_cluster/') -#from switch_pylab_backend import * - - -#this stuff isn;t really Tango related: maybe it could be moved out? TODO -def removeRightTicks(ax=None): - ax = ax or pb.gca() - for i, line in enumerate(ax.get_yticklines()): - if i%2 == 1: # odd indices - line.set_visible(False) -def removeUpperTicks(ax=None): - ax = ax or pb.gca() - for i, line in enumerate(ax.get_xticklines()): - if i%2 == 1: # odd indices - line.set_visible(False) -def fewerXticks(ax=None,divideby=2): - ax = ax or pb.gca() - ax.set_xticks(ax.get_xticks()[::divideby]) - colorsHex = {\ "Aluminium6":"#2e3436",\ @@ -83,32 +61,6 @@ def reset(): while not lightList[0]==colorsHex['lightBlue']: lightList.append(lightList.pop(0)) -def setLightFigures(): - mpl.rcParams['axes.edgecolor']=colorsHex['Aluminium6'] - mpl.rcParams['axes.facecolor']=colorsHex['Aluminium2'] - mpl.rcParams['axes.labelcolor']=colorsHex['Aluminium6'] - mpl.rcParams['figure.edgecolor']=colorsHex['Aluminium6'] - mpl.rcParams['figure.facecolor']=colorsHex['Aluminium2'] - mpl.rcParams['grid.color']=colorsHex['Aluminium6'] - mpl.rcParams['savefig.edgecolor']=colorsHex['Aluminium2'] - mpl.rcParams['savefig.facecolor']=colorsHex['Aluminium2'] - mpl.rcParams['text.color']=colorsHex['Aluminium6'] - mpl.rcParams['xtick.color']=colorsHex['Aluminium6'] - mpl.rcParams['ytick.color']=colorsHex['Aluminium6'] - -def setDarkFigures(): - mpl.rcParams['axes.edgecolor']=colorsHex['Aluminium2'] - mpl.rcParams['axes.facecolor']=colorsHex['Aluminium6'] - mpl.rcParams['axes.labelcolor']=colorsHex['Aluminium2'] - mpl.rcParams['figure.edgecolor']=colorsHex['Aluminium2'] - mpl.rcParams['figure.facecolor']=colorsHex['Aluminium6'] - mpl.rcParams['grid.color']=colorsHex['Aluminium2'] - mpl.rcParams['savefig.edgecolor']=colorsHex['Aluminium6'] - mpl.rcParams['savefig.facecolor']=colorsHex['Aluminium6'] - mpl.rcParams['text.color']=colorsHex['Aluminium2'] - mpl.rcParams['xtick.color']=colorsHex['Aluminium2'] - mpl.rcParams['ytick.color']=colorsHex['Aluminium2'] - def hex2rgb(hexcolor): hexcolor = [hexcolor[1+2*i:1+2*(i+1)] for i in range(3)] r,g,b = [int(n,16) for n in hexcolor] @@ -154,13 +106,4 @@ cdict_Alu = {'red' :((0./5,colorsRGB['Aluminium1'][0]/256.,colorsRGB['Aluminium1 (2./5,colorsRGB['Aluminium3'][2]/256.,colorsRGB['Aluminium3'][2]/256.), (3./5,colorsRGB['Aluminium4'][2]/256.,colorsRGB['Aluminium4'][2]/256.), (4./5,colorsRGB['Aluminium5'][2]/256.,colorsRGB['Aluminium5'][2]/256.), - (5./5,colorsRGB['Aluminium6'][2]/256.,colorsRGB['Aluminium6'][2]/256.))} -# cmap_Alu = mpl.colors.LinearSegmentedColormap('TangoAluminium',cdict_Alu,256) -# cmap_BGR = mpl.colors.LinearSegmentedColormap('TangoRedBlue',cdict_BGR,256) -# cmap_RB = mpl.colors.LinearSegmentedColormap('TangoRedBlue',cdict_RB,256) -if __name__=='__main__': - from matplotlib import pyplot as pb - pb.figure() - pb.pcolor(pb.rand(10,10),cmap=cmap_RB) - pb.colorbar() - pb.show() + (5./5,colorsRGB['Aluminium6'][2]/256.,colorsRGB['Aluminium6'][2]/256.))} \ No newline at end of file diff --git a/GPy/plotting/gpy_plot/__init__.py b/GPy/plotting/gpy_plot/__init__.py index aa4f7d92..cec60bcf 100644 --- a/GPy/plotting/gpy_plot/__init__.py +++ b/GPy/plotting/gpy_plot/__init__.py @@ -1,3 +1,3 @@ from .. import plotting_library as pl -from . import data_plots, gp_plots +from . import data_plots, gp_plots, latent_plots diff --git a/GPy/plotting/gpy_plot/data_plots.py b/GPy/plotting/gpy_plot/data_plots.py index f63aa742..0bcb66ef 100644 --- a/GPy/plotting/gpy_plot/data_plots.py +++ b/GPy/plotting/gpy_plot/data_plots.py @@ -27,10 +27,8 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #=============================================================================== - -from . import pl - import numpy as np +from . import pl from .plot_util import get_x_y_var, get_free_dims, get_which_data_ycols,\ get_which_data_rows, update_not_existing_kwargs, helper_predict_with_model @@ -47,12 +45,14 @@ def _plot_data(self, canvas, which_data_rows='all', plots = {} plots['dataplot'] = [] - plots['xerrorplot'] = [] + + if X_variance is not None: plots['xerrorplot'] = [] + #one dimensional plotting if len(free_dims) == 1: for d in ycols: - update_not_existing_kwargs(plot_kwargs, pl.defaults.data_1d) + update_not_existing_kwargs(plot_kwargs, pl.defaults.data_1d) # @UndefinedVariable plots['dataplot'].append(pl.scatter(canvas, X[rows, free_dims], Y[rows, d], **plot_kwargs)) if X_variance is not None: update_not_existing_kwargs(error_kwargs, pl.defaults.xerrorbar) @@ -62,7 +62,7 @@ def _plot_data(self, canvas, which_data_rows='all', #2D plotting elif len(free_dims) == 2: for d in ycols: - update_not_existing_kwargs(plot_kwargs, pl.defaults.data_2d) + update_not_existing_kwargs(plot_kwargs, pl.defaults.data_2d) # @UndefinedVariable plots['dataplot'].append(pl.scatter(canvas, X[rows, free_dims[0]], X[rows, free_dims[1]], c=Y[rows, d], vmin=Y.min(), vmax=Y.max(), **plot_kwargs)) elif len(free_dims) == 0: @@ -84,7 +84,7 @@ def plot_data(self, which_data_rows='all', :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice self.X, self.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these - :type which_data_rows: 'all' or a list of integers + :type which_data_ycols: 'all' or a list of integers :param visible_dims: an array specifying the input dimensions to plot (maximum two) :type visible_dims: a numpy array :param dict error_kwargs: kwargs for the error plot for the plotting library you are using @@ -94,7 +94,7 @@ def plot_data(self, which_data_rows='all', """ canvas, kwargs = pl.get_new_canvas(plot_kwargs) plots = _plot_data(self, canvas, which_data_rows, which_data_ycols, visible_dims, error_kwargs, **kwargs) - return pl.show_canvas(canvas, plots) + return pl.show_canvas(canvas, plots, xlabel='x', ylabel='y', legend='dataplot') def plot_inducing(self, visible_dims=None, **plot_kwargs): @@ -116,11 +116,11 @@ def _plot_inducing(self, canvas, visible_dims, **plot_kwargs): #one dimensional plotting if len(free_dims) == 1: - update_not_existing_kwargs(plot_kwargs, pl.defaults.inducing_1d) + update_not_existing_kwargs(plot_kwargs, pl.defaults.inducing_1d) # @UndefinedVariable plots['inducing'] = pl.plot_axis_lines(canvas, Z[:, free_dims], **plot_kwargs) #2D plotting elif len(free_dims) == 2: - update_not_existing_kwargs(plot_kwargs, pl.defaults.inducing_2d) + update_not_existing_kwargs(plot_kwargs, pl.defaults.inducing_2d) # @UndefinedVariable plots['inducing'] = pl.scatter(canvas, Z[:, free_dims[0]], Z[:, free_dims[1]], **plot_kwargs) elif len(free_dims) == 0: @@ -184,15 +184,16 @@ def _plot_errorbars_trainset(self, canvas, if 'Y_metadata' not in predict_kw: predict_kw['Y_metadata'] = self.Y_metadata or {} _, percs, _ = helper_predict_with_model(self, Xgrid, plot_raw, - apply_link, (0, 100), + apply_link, (2.5, 97.5), ycols, predict_kw) for d in ycols: plots.append(pl.yerrorbar(canvas, X[rows,free_dims[0]], Y[rows,d], np.vstack([Y[rows,d]-percs[0][rows,d], percs[1][rows,d]-Y[rows,d]]), **plot_kwargs)) - return dict(yerrorbars=plots) else: pass #Nothing to plot! else: raise NotImplementedError("Cannot plot in more then one dimension.") - return plots \ No newline at end of file + return dict(yerrorbars=plots) + + diff --git a/GPy/plotting/gpy_plot/gp_plots.py b/GPy/plotting/gpy_plot/gp_plots.py index a69eaaa9..69b521aa 100644 --- a/GPy/plotting/gpy_plot/gp_plots.py +++ b/GPy/plotting/gpy_plot/gp_plots.py @@ -29,15 +29,15 @@ #=============================================================================== import numpy as np -from functools import wraps from . import pl from .plot_util import helper_for_plot_data, update_not_existing_kwargs, \ helper_predict_with_model, get_which_data_ycols +from .data_plots import _plot_data, _plot_inducing def plot_mean(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=False, - apply_link=False, + apply_link=False, visible_dims=None, which_data_ycols='all', levels=20, predict_kw=None, @@ -54,7 +54,6 @@ def plot_mean(self, plot_limits=None, fixed_inputs=None, :type fixed_inputs: a list of tuples :param int resolution: The resolution of the prediction [defaults are 1D:200, 2D:50] :param bool plot_raw: plot the latent function (usually denoted f) only? - :param dict Y_metadata: the Y_metadata (for e.g. heteroscedastic GPs) :param bool apply_link: whether to apply the link function of the GP to the raw prediction. :param array-like which_data_ycols: which columns of y to plot (array-like or list of ints) :param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=) in here @@ -63,17 +62,17 @@ def plot_mean(self, plot_limits=None, fixed_inputs=None, canvas, kwargs = pl.get_new_canvas(kwargs) plots = _plot_mean(self, canvas, plot_limits, fixed_inputs, resolution, plot_raw, - apply_link, which_data_ycols, levels, + apply_link, visible_dims, which_data_ycols, levels, predict_kw, **kwargs) return pl.show_canvas(canvas, plots) def _plot_mean(self, canvas, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=False, - apply_link=False, + apply_link=False, visible_dims=None, which_data_ycols=None, levels=20, predict_kw=None, **kwargs): - _, _, _, _, free_dims, Xgrid, x, y, _, _, resolution = helper_for_plot_data(self, plot_limits, fixed_inputs, resolution) + _, _, _, _, free_dims, Xgrid, x, y, _, _, resolution = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution) if len(free_dims)<=2: mu, _, _ = helper_predict_with_model(self, Xgrid, plot_raw, @@ -82,10 +81,10 @@ def _plot_mean(self, canvas, plot_limits=None, fixed_inputs=None, predict_kw) if len(free_dims)==1: # 1D plotting: - update_not_existing_kwargs(kwargs, pl.defaults.meanplot_1d) + update_not_existing_kwargs(kwargs, pl.defaults.meanplot_1d) # @UndefinedVariable return dict(gpmean=[pl.plot(canvas, Xgrid[:, free_dims], mu, **kwargs)]) else: - update_not_existing_kwargs(kwargs, pl.defaults.meanplot_2d) + update_not_existing_kwargs(kwargs, pl.defaults.meanplot_2d) # @UndefinedVariable return dict(gpmean=[pl.contour(canvas, x, y, mu.reshape(resolution, resolution), levels=levels, **kwargs)]) @@ -96,7 +95,7 @@ def _plot_mean(self, canvas, plot_limits=None, fixed_inputs=None, def plot_confidence(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=False, - apply_link=False, + apply_link=False, visible_dims=None, which_data_ycols='all', predict_kw=None, **kwargs): @@ -107,36 +106,37 @@ def plot_confidence(self, lower=2.5, upper=97.5, plot_limits=None, fixed_inputs= Give the Y_metadata in the predict_kw if you need it. - + :param float lower: the lower percentile to plot + :param float upper: the upper percentile to plot :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :type plot_limits: np.array :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v. :type fixed_inputs: a list of tuples :param int resolution: The resolution of the prediction [default:200] :param bool plot_raw: plot the latent function (usually denoted f) only? - :param dict Y_metadata: the Y_metadata (for e.g. heteroscedastic GPs) :param bool apply_link: whether to apply the link function of the GP to the raw prediction. - :param array-like which_data_ycols: which columns of y to plot (array-like or list of ints) + :param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints) + :param array-like which_data_ycols: which columns of the output y (!) to plot (array-like or list of ints) :param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=) in here """ canvas, kwargs = pl.get_new_canvas(kwargs) plots = _plot_confidence(self, canvas, lower, upper, plot_limits, fixed_inputs, resolution, plot_raw, - apply_link, which_data_ycols, + apply_link, visible_dims, which_data_ycols, predict_kw, **kwargs) return pl.show_canvas(canvas, plots) def _plot_confidence(self, canvas, lower, upper, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=False, - apply_link=False, + apply_link=False, visible_dims=None, which_data_ycols=None, predict_kw=None, **kwargs): - _, _, _, _, free_dims, Xgrid, _, _, _, _, _ = helper_for_plot_data(self, plot_limits, fixed_inputs, resolution) + _, _, _, _, free_dims, Xgrid, _, _, _, _, _ = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution) ycols = get_which_data_ycols(self, which_data_ycols) - update_not_existing_kwargs(kwargs, pl.defaults.confidence_interval) + update_not_existing_kwargs(kwargs, pl.defaults.confidence_interval) # @UndefinedVariable if len(free_dims)<=1: if len(free_dims)==1: @@ -156,7 +156,7 @@ def _plot_confidence(self, canvas, lower, upper, plot_limits=None, fixed_inputs= def plot_samples(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=True, - apply_link=False, + apply_link=False, visible_dims=None, which_data_ycols='all', samples=3, predict_kw=None, **kwargs): @@ -173,6 +173,7 @@ def plot_samples(self, plot_limits=None, fixed_inputs=None, :param int resolution: The resolution of the prediction [defaults are 1D:200, 2D:50] :param bool plot_raw: plot the latent function (usually denoted f) only? This is usually what you want! :param bool apply_link: whether to apply the link function of the GP to the raw prediction. + :param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints) :param array-like which_data_ycols: which columns of y to plot (array-like or list of ints) :param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=) in here :param int levels: for 2D plotting, the number of contour levels to use is @@ -180,17 +181,17 @@ def plot_samples(self, plot_limits=None, fixed_inputs=None, canvas, kwargs = pl.get_new_canvas(kwargs) plots = _plot_samples(self, canvas, plot_limits, fixed_inputs, resolution, plot_raw, - apply_link, which_data_ycols, samples, + apply_link, visible_dims, which_data_ycols, samples, predict_kw, **kwargs) return pl.show_canvas(canvas, plots) def _plot_samples(self, canvas, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=False, - apply_link=False, + apply_link=False, visible_dims=None, which_data_ycols=None, samples=3, predict_kw=None, **kwargs): - _, _, _, _, free_dims, Xgrid, x, y, _, _, resolution = helper_for_plot_data(self, plot_limits, fixed_inputs, resolution) + _, _, _, _, free_dims, Xgrid, _, _, _, _, resolution = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution) if len(free_dims)<2: @@ -198,7 +199,7 @@ def _plot_samples(self, canvas, plot_limits=None, fixed_inputs=None, # 1D plotting: _, _, samples = helper_predict_with_model(self, Xgrid, plot_raw, apply_link, None, get_which_data_ycols(self, which_data_ycols), predict_kw, samples) - update_not_existing_kwargs(kwargs, pl.defaults.samples_1d) + update_not_existing_kwargs(kwargs, pl.defaults.samples_1d) # @UndefinedVariable return dict(gpmean=[pl.plot(canvas, Xgrid[:, free_dims], samples, **kwargs)]) else: pass # Nothing to plot! @@ -208,7 +209,7 @@ def _plot_samples(self, canvas, plot_limits=None, fixed_inputs=None, def plot_density(self, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=False, - apply_link=False, + apply_link=False, visible_dims=None, which_data_ycols='all', levels=35, predict_kw=None, @@ -226,8 +227,8 @@ def plot_density(self, plot_limits=None, fixed_inputs=None, :type fixed_inputs: a list of tuples :param int resolution: The resolution of the prediction [default:200] :param bool plot_raw: plot the latent function (usually denoted f) only? - :param dict Y_metadata: the Y_metadata (for e.g. heteroscedastic GPs) :param bool apply_link: whether to apply the link function of the GP to the raw prediction. + :param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints) :param array-like which_data_ycols: which columns of y to plot (array-like or list of ints) :param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you. :param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=) in here @@ -235,22 +236,22 @@ def plot_density(self, plot_limits=None, fixed_inputs=None, canvas, kwargs = pl.get_new_canvas(kwargs) plots = _plot_density(self, canvas, plot_limits, fixed_inputs, resolution, plot_raw, - apply_link, which_data_ycols, + apply_link, visible_dims, which_data_ycols, levels, predict_kw, **kwargs) return pl.show_canvas(canvas, plots) def _plot_density(self, canvas, plot_limits=None, fixed_inputs=None, resolution=None, plot_raw=False, - apply_link=False, + apply_link=False, visible_dims=None, which_data_ycols=None, levels=35, predict_kw=None, **kwargs): - _, _, _, _, free_dims, Xgrid, x, y, _, _, resolution = helper_for_plot_data(self, plot_limits, fixed_inputs, resolution) + _, _, _, _, free_dims, Xgrid, x, y, _, _, resolution = helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution) ycols = get_which_data_ycols(self, which_data_ycols) - update_not_existing_kwargs(kwargs, pl.defaults.density) + update_not_existing_kwargs(kwargs, pl.defaults.density) # @UndefinedVariable if len(free_dims)<=1: if len(free_dims)==1: @@ -269,11 +270,126 @@ def _plot_density(self, canvas, plot_limits=None, fixed_inputs=None, raise RuntimeError('Can only plot density in one input dimension') def plot(self, plot_limits=None, fixed_inputs=None, - resolution=None, plot_inducing=True, + resolution=None, plot_raw=False, apply_link=False, which_data_ycols='all', which_data_rows='all', - levels=20, samples=0, - predict_kw=None, + visible_dims=None, + levels=20, samples=0, samples_likelihood=0, lower=2.5, upper=97.5, + plot_data=True, plot_inducing=True, plot_density=False, + predict_kw=None, error_kwargs=None, **kwargs): - #maybe get the prediction to be only done once here - pass #for now + """ + Convinience function for plotting the fit of a GP. + + Give the Y_metadata in the predict_kw if you need it. + + :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits + :type plot_limits: np.array + :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v. + :type fixed_inputs: a list of tuples + :param int resolution: The resolution of the prediction [default:200] + :param bool plot_raw: plot the latent function (usually denoted f) only? + :param bool apply_link: whether to apply the link function of the GP to the raw prediction. + :param which_data_ycols: when the data has several columns (independant outputs), only plot these + :type which_data_ycols: 'all' or a list of integers + :param which_data_rows: which of the training data to plot (default all) + :type which_data_rows: 'all' or a slice object to slice self.X, self.Y + :param array-like visible_dims: which columns of the input X (!) to plot (array-like or list of ints) + :param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you. + :param int samples: the number of samples to draw from the GP and plot into the plot. This will allways be samples from the latent function. + :param int samples_likelihood: the number of samples to draw from the GP and apply the likelihood noise. This is usually not what you want! + :param float lower: the lower percentile to plot + :param float upper: the upper percentile to plot + :param bool plot_data: plot the data into the plot? + :param bool plot_inducing: plot inducing inputs? + :param bool plot_density: plot density instead of the confidence interval? + :param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=) in here + :param dict error_kwargs: kwargs for the error plot for the plotting library you are using + :param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using + """ + canvas, kwargs = pl.get_new_canvas(kwargs) + plots = _plot(self, canvas, plot_limits, fixed_inputs, resolution, plot_raw, + apply_link, which_data_ycols, which_data_rows, visible_dims, + levels, samples, samples_likelihood, lower, upper, plot_data, + plot_inducing, plot_density, predict_kw, error_kwargs) + return pl.show_canvas(canvas, plots) + + +def plot_f(self, plot_limits=None, fixed_inputs=None, + resolution=None, + apply_link=False, + which_data_ycols='all', which_data_rows='all', + visible_dims=None, + levels=20, samples=0, lower=2.5, upper=97.5, + plot_density=False, + plot_data=True, plot_inducing=True, + predict_kw=None, error_kwargs=None, + **kwargs): + """ + Convinience function for plotting the fit of a GP. + + This is the same as plot, except it plots the latent function fit of the GP! + + Give the Y_metadata in the predict_kw if you need it. + + :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits + :type plot_limits: np.array + :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v. + :type fixed_inputs: a list of tuples + :param int resolution: The resolution of the prediction [default:200] + :param bool apply_link: whether to apply the link function of the GP to the raw prediction. + :param which_data_ycols: when the data has several columns (independant outputs), only plot these + :type which_data_ycols: 'all' or a list of integers + :param which_data_rows: which of the training data to plot (default all) + :type which_data_rows: 'all' or a slice object to slice self.X, self.Y + :param array-like visible_dims: an array specifying the input dimensions to plot (maximum two) + :param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you. + :param int samples: the number of samples to draw from the GP and plot into the plot. This will allways be samples from the latent function. + :param float lower: the lower percentile to plot + :param float upper: the upper percentile to plot + :param bool plot_data: plot the data into the plot? + :param bool plot_inducing: plot inducing inputs? + :param bool plot_density: plot density instead of the confidence interval? + :param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=) in here + :param dict error_kwargs: kwargs for the error plot for the plotting library you are using + :param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using + """ + canvas, kwargs = pl.get_new_canvas(kwargs) + plots = _plot(self, canvas, plot_limits, fixed_inputs, resolution, + True, apply_link, which_data_ycols, which_data_rows, + visible_dims, levels, samples, 0, lower, upper, + plot_data, plot_inducing, plot_density, + predict_kw, error_kwargs) + return pl.show_canvas(canvas, plots) + + + +def _plot(self, canvas, plot_limits=None, fixed_inputs=None, + resolution=None, + plot_raw=False, apply_link=False, + which_data_ycols='all', which_data_rows='all', + visible_dims=None, + levels=20, samples=0, samples_likelihood=0, lower=2.5, upper=97.5, + plot_data=True, plot_inducing=True, plot_density=False, + predict_kw=None, error_kwargs=None, + **kwargs): + + plots = {} + if plot_data: + plots.update(_plot_data(self, canvas, which_data_rows, which_data_ycols, visible_dims, error_kwargs)) + + plots.update(_plot_mean(self, canvas, plot_limits, fixed_inputs, resolution, plot_raw, apply_link, visible_dims, which_data_ycols, levels, predict_kw)) + if not plot_density: + plots.update(_plot_confidence(self, canvas, lower, upper, plot_limits, fixed_inputs, resolution, plot_raw, apply_link, visible_dims, which_data_ycols, predict_kw)) + else: + plots.update(_plot_density(self, canvas, plot_limits, fixed_inputs, resolution, plot_raw, apply_link, visible_dims, which_data_ycols, levels, predict_kw)) + + if samples > 0: + plots.update(_plot_samples(self, canvas, plot_limits, fixed_inputs, resolution, True, apply_link, visible_dims, which_data_ycols, samples, predict_kw)) + if samples_likelihood > 0: + plots.update(_plot_samples(self, canvas, plot_limits, fixed_inputs, resolution, False, apply_link, visible_dims, which_data_ycols, samples, predict_kw)) + + if hasattr(self, 'Z') and plot_inducing: + plots.update(_plot_inducing(self, canvas, visible_dims)) + + return plots \ No newline at end of file diff --git a/GPy/plotting/gpy_plot/plot_util.py b/GPy/plotting/gpy_plot/plot_util.py index 8177ea63..1a9b2a92 100644 --- a/GPy/plotting/gpy_plot/plot_util.py +++ b/GPy/plotting/gpy_plot/plot_util.py @@ -84,7 +84,7 @@ def helper_predict_with_model(self, Xgrid, plot_raw, apply_link, percentiles, wh fsamples[:, s] = self.likelihood.gp_link.transf(fsamples[:, s]) return retmu, percs, fsamples -def helper_for_plot_data(self, plot_limits, fixed_inputs, resolution): +def helper_for_plot_data(self, plot_limits, visible_dims, fixed_inputs, resolution): """ Figure out the data, free_dims and create an Xgrid for the prediction. @@ -95,7 +95,7 @@ def helper_for_plot_data(self, plot_limits, fixed_inputs, resolution): if fixed_inputs is None: fixed_inputs = [] fixed_dims = get_fixed_dims(self, fixed_inputs) - free_dims = get_free_dims(self, None, fixed_dims) + free_dims = get_free_dims(self, visible_dims, fixed_dims) if len(free_dims) == 1: #define the frame on which to plot @@ -157,11 +157,10 @@ def get_free_dims(model, visible_dims, fixed_dims): """ if visible_dims is None: visible_dims = np.arange(model.input_dim) - assert visible_dims.size <= 2, "Visible inputs cannot be larger than two" - if fixed_dims is None: - return visible_dims - else: + visible_dims = np.asanyarray(visible_dims) + if fixed_dims is not None: return np.setdiff1d(visible_dims, fixed_dims) + return visible_dims def get_fixed_dims(model, fixed_inputs): """ diff --git a/GPy/plotting/matplot_dep/__init__.py b/GPy/plotting/matplot_dep/__init__.py index 017396a6..693e2081 100644 --- a/GPy/plotting/matplot_dep/__init__.py +++ b/GPy/plotting/matplot_dep/__init__.py @@ -8,7 +8,7 @@ from . import variational_plots from . import kernel_plots from . import dim_reduction_plots from . import mapping_plots -from . import Tango +from GPy.plotting.gpy_plot import Tango from . import visualize from . import latent_space_visualizations from . import inference_plots diff --git a/GPy/plotting/matplot_dep/defaults.py b/GPy/plotting/matplot_dep/defaults.py index 00c1b839..174a55e4 100644 --- a/GPy/plotting/matplot_dep/defaults.py +++ b/GPy/plotting/matplot_dep/defaults.py @@ -30,7 +30,7 @@ from matplotlib.colors import LinearSegmentedColormap from matplotlib import cm -from . import Tango +from GPy.plotting.gpy_plot import Tango ''' This file is for defaults for the gpy plot, specific to the plotting library. @@ -43,17 +43,24 @@ In the code, always ise plotting.gpy_plots.defaults to get the defaults, as it gives back an empty default, when defaults are not defined. ''' -# Data: +# Data plots: data_1d = dict(lw=1.5, marker='x', edgecolor='k') data_2d = dict(s=35, edgecolors='none', linewidth=0., cmap=cm.get_cmap('hot'), alpha=.5) inducing_1d = dict(lw=0, s=500, facecolors=Tango.colorsHex['darkRed']) inducing_2d = dict(s=14, edgecolors='k', linewidth=.4, facecolors='white', alpha=.5) -xerrorbar = dict(ecolor='k', fmt='none', elinewidth=.5, alpha=.5) -yerrorbar = dict(ecolor=Tango.colorsHex['darkRed'], fmt='none', elinewidth=.5, alpha=.5) +xerrorbar = dict(color='k', fmt='none', elinewidth=.5, alpha=.5) +yerrorbar = dict(color=Tango.colorsHex['darkRed'], fmt='none', elinewidth=.5, alpha=.5) -# GP plots +# GP plots: meanplot_1d = dict(color=Tango.colorsHex['mediumBlue'], linewidth=2) meanplot_2d = dict(cmap='hot', linewidth=.5) samples_1d = dict(color=Tango.colorsHex['mediumBlue'], linewidth=.3) -confidence_interval = dict(edgecolor=Tango.colorsHex['darkBlue'],linewidth=.5,facecolor=Tango.colorsHex['lightBlue'],alpha=.2) -density = dict(alpha=.5, facecolor=Tango.colorsHex['mediumBlue'], edgecolors='none') \ No newline at end of file +confidence_interval = dict(edgecolor=Tango.colorsHex['darkBlue'], linewidth=.5, color=Tango.colorsHex['lightBlue'],alpha=.2) +density = dict(alpha=.5, color=Tango.colorsHex['mediumBlue']) + +# GPLVM plots: +data_y_1d = dict(linewidth=0, cmap='RdBu', s=40) +data_y_1d_plot = dict(color='k', linewidth=1.5) + +# Kernel plots: +ard = dict(edgecolor='k', linewidth=1.2) \ No newline at end of file diff --git a/GPy/plotting/matplot_dep/dim_reduction_plots.py b/GPy/plotting/matplot_dep/dim_reduction_plots.py index 40da7cfd..fd6c50b0 100644 --- a/GPy/plotting/matplot_dep/dim_reduction_plots.py +++ b/GPy/plotting/matplot_dep/dim_reduction_plots.py @@ -7,31 +7,13 @@ from ...core.parameterization.variational import VariationalPosterior from .base_plots import x_frame2D import itertools try: - from . import Tango +from GPy.plotting.gpy_plot import Tango from matplotlib.cm import get_cmap from matplotlib import pyplot as pb from matplotlib import cm except: pass -def most_significant_input_dimensions(model, which_indices): - """ - Determine which dimensions should be plotted - """ - if which_indices is None: - if model.input_dim == 1: - input_1 = 0 - input_2 = None - if model.input_dim == 2: - input_1, input_2 = 0, 1 - else: - try: - input_1, input_2 = np.argsort(model.input_sensitivity())[::-1][:2] - except: - raise ValueError("cannot automatically determine which dimensions to plot, please pass 'which_indices'") - else: - input_1, input_2 = which_indices - return input_1, input_2 def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, marker='o', s=40, @@ -52,7 +34,7 @@ def plot_latent(model, labels=None, which_indices=None, if labels is None: labels = np.ones(model.num_data) - input_1, input_2 = most_significant_input_dimensions(model, which_indices) + input_1, input_2 = model.get_most_significant_input_dimensions(which_indices) #fethch the data points X that we'd like to plot X = model.X @@ -219,7 +201,7 @@ def plot_magnification(model, labels=None, which_indices=None, if labels is None: labels = np.ones(model.num_data) - input_1, input_2 = most_significant_input_dimensions(model, which_indices) + input_1, input_2 = model.get_most_significant_input_dimensions(which_indices) #fethch the data points X that we'd like to plot X = model.X @@ -366,7 +348,7 @@ def plot_magnification(model, labels=None, which_indices=None, def plot_steepest_gradient_map(model, fignum=None, ax=None, which_indices=None, labels=None, data_labels=None, data_marker='o', data_s=40, resolution=20, aspect='auto', updates=False, ** kwargs): - input_1, input_2 = significant_dims = most_significant_input_dimensions(model, which_indices) + input_1, input_2 = significant_dims = model.get_most_significant_input_dimensions(which_indices) X = np.zeros((resolution ** 2, model.input_dim)) indices = np.r_[:X.shape[0]] diff --git a/GPy/plotting/matplot_dep/kernel_plots.py b/GPy/plotting/matplot_dep/kernel_plots.py index a7026e4f..8e86513e 100644 --- a/GPy/plotting/matplot_dep/kernel_plots.py +++ b/GPy/plotting/matplot_dep/kernel_plots.py @@ -3,13 +3,10 @@ import numpy as np from matplotlib import pyplot as pb -from . import Tango from matplotlib.textpath import TextPath from matplotlib.transforms import offset_copy from .base_plots import ax_default - - def add_bar_labels(fig, ax, bars, bottom=0): transOffset = offset_copy(ax.transData, fig=fig, x=0., y= -2., units='points') @@ -40,63 +37,6 @@ def plot_bars(fig, ax, x, ard_params, color, name, bottom=0): color=color, edgecolor='k', linewidth=1.2, label=name.replace("_"," ")) -def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False, filtering=None): - """ - If an ARD kernel is present, plot a bar representation using matplotlib - - :param fignum: figure number of the plot - :param ax: matplotlib axis to plot on - :param title: - title of the plot, - pass '' to not print a title - pass None for a generic title - :param filtering: list of names, which to use for plotting ARD parameters. - Only kernels which match names in the list of names in filtering - will be used for plotting. - :type filtering: list of names to use for ARD plot - """ - fig, ax = ax_default(fignum,ax) - - if title is None: - ax.set_title('ARD parameters, %s kernel' % kernel.name) - else: - ax.set_title(title) - - Tango.reset() - bars = [] - - ard_params = np.atleast_2d(kernel.input_sensitivity(summarize=False)) - bottom = 0 - last_bottom = bottom - - x = np.arange(kernel.input_dim) - - if filtering is None: - filtering = kernel.parameter_names(recursive=False) - - for i in range(ard_params.shape[0]): - if kernel.parameters[i].name in filtering: - c = Tango.nextMedium() - bars.append(plot_bars(fig, ax, x, ard_params[i,:], c, kernel.parameters[i].name, bottom=bottom)) - last_bottom = ard_params[i,:] - bottom += last_bottom - else: - print("filtering out {}".format(kernel.parameters[i].name)) - - ax.set_xlim(-.5, kernel.input_dim - .5) - add_bar_labels(fig, ax, [bars[-1]], bottom=bottom-last_bottom) - - if legend: - if title is '': - mode = 'expand' - if len(bars) > 1: - mode = 'expand' - ax.legend(bbox_to_anchor=(0., 1.02, 1., 1.02), loc=3, - ncol=len(bars), mode=mode, borderaxespad=0.) - fig.tight_layout(rect=(0, 0, 1, .9)) - else: - ax.legend() - return ax @@ -111,7 +51,7 @@ def plot(kernel,x=None, fignum=None, ax=None, title=None, plot_limits=None, reso :resolution: the resolution of the lines used in plotting :mpl_kwargs avalid keyword arguments to pass through to matplotlib (e.g. lw=7) """ - fig, ax = ax_default(fignum,ax) + _, ax = ax_default(fignum,ax) if title is None: ax.set_title('%s kernel' % kernel.name) diff --git a/GPy/plotting/matplot_dep/mapping_plots.py b/GPy/plotting/matplot_dep/mapping_plots.py index fd964a93..f1857b45 100644 --- a/GPy/plotting/matplot_dep/mapping_plots.py +++ b/GPy/plotting/matplot_dep/mapping_plots.py @@ -3,7 +3,7 @@ import numpy as np try: - from . import Tango +from GPy.plotting.gpy_plot import Tango from matplotlib import pyplot as pb except: pass diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 640b8960..1fc49a23 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -2,7 +2,6 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np -from . import Tango from .base_plots import gpplot, x_frame1D, x_frame2D,gperrors from ...models.gp_coregionalized_regression import GPCoregionalizedRegression from ...models.sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression @@ -11,7 +10,7 @@ from ...core.parameterization.variational import VariationalPosterior from matplotlib import pyplot as plt from .base_plots import gradient_fill from functools import wraps - +from .gpy_plot import Tango def plot_data(self, which_data_rows='all', which_data_ycols='all', visible_dims=None, diff --git a/GPy/plotting/matplot_dep/plot_definitions.py b/GPy/plotting/matplot_dep/plot_definitions.py index 919f26e4..af0955fb 100644 --- a/GPy/plotting/matplot_dep/plot_definitions.py +++ b/GPy/plotting/matplot_dep/plot_definitions.py @@ -38,22 +38,36 @@ class MatplotlibPlots(AbstractPlottingLibrary): super(MatplotlibPlots, self).__init__() self._defaults = defaults.__dict__ - def get_new_canvas(self, kwargs): + def get_new_canvas(self, plot_3d=False, kwargs): + if plot_3d: + from matplotlib.mplot3d import Axis3D # @UnusedImport + pr = '3d' + else: pr=None if 'ax' in kwargs: ax = kwargs.pop('ax') elif 'num' in kwargs and 'figsize' in kwargs: - ax = plt.figure(num=kwargs.pop('num'), figsize=kwargs.pop('figsize')).add_subplot(111) + ax = plt.figure(num=kwargs.pop('num'), figsize=kwargs.pop('figsize')).add_subplot(111, projection=pr) elif 'num' in kwargs: - ax = plt.figure(num=kwargs.pop('num')).add_subplot(111) + ax = plt.figure(num=kwargs.pop('num')).add_subplot(111, projection=pr) elif 'figsize' in kwargs: - ax = plt.figure(figsize=kwargs.pop('figsize')).add_subplot(111) + ax = plt.figure(figsize=kwargs.pop('figsize')).add_subplot(111, projection=pr) else: - ax = plt.figure().add_subplot(111) + ax = plt.figure().add_subplot(111, projection=pr) # Add ax to kwargs to add all subsequent plots to this axis: #kwargs['ax'] = ax return ax, kwargs - def show_canvas(self, ax, plots): + def show_canvas(self, ax, plots, xlabel=None, ylabel=None, + zlabel=None, title=None, xlim=None, ylim=None, + zlim=None, legend=True, **kwargs): + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + + if zlabel is not None: + ax.set_zlabel(zlabel) + + ax.set_title(title) + try: ax.autoscale_view() ax.figure.canvas.draw() @@ -62,13 +76,13 @@ class MatplotlibPlots(AbstractPlottingLibrary): pass return plots - def scatter(self, ax, X, Y, **kwargs): - return ax.scatter(X, Y, **kwargs) + def scatter(self, ax, X, Y, color=None, label=None, **kwargs): + return ax.scatter(X, Y, c=color, label=label, **kwargs) - def plot(self, ax, X, Y, **kwargs): - return ax.plot(X, Y, **kwargs) + def plot(self, ax, X, Y, color=None, label=None, **kwargs): + return ax.plot(X, Y, color=color, label=label, **kwargs) - def plot_axis_lines(self, ax, X, **kwargs): + def plot_axis_lines(self, ax, X, color=None, label=None, **kwargs): from matplotlib import transforms from matplotlib.path import Path if 'transform' not in kwargs: @@ -76,31 +90,44 @@ class MatplotlibPlots(AbstractPlottingLibrary): if 'marker' not in kwargs: kwargs['marker'] = Path([[-.2,0.], [-.2,.5], [0.,1.], [.2,.5], [.2,0.], [-.2,0.]], [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]) - return ax.scatter(X, np.zeros_like(X), **kwargs) + return ax.scatter(X, np.zeros_like(X), c=color, label=label, **kwargs) - def xerrorbar(self, ax, X, Y, error, **kwargs): + def barplot(self, ax, x, height, width=0.8, bottom=0, color=None, label=None, **kwargs): + if 'align' not in kwargs: + kwargs['align'] = 'center' + return ax.bar(left=x, height=height, width=width, + bottom=bottom, label=label, color=color, + **kwargs) + + def xerrorbar(self, ax, X, Y, error, color=None, label=None, **kwargs): if not('linestyle' in kwargs or 'ls' in kwargs): kwargs['ls'] = 'none' - return ax.errorbar(X, Y, xerr=error, **kwargs) + return ax.errorbar(X, Y, xerr=error, ecolor=color, label=label, **kwargs) - def yerrorbar(self, ax, X, Y, error, **kwargs): + def yerrorbar(self, ax, X, Y, error, color=None, label=None, **kwargs): if not('linestyle' in kwargs or 'ls' in kwargs): kwargs['ls'] = 'none' - return ax.errorbar(X, Y, yerr=error, **kwargs) + return ax.errorbar(X, Y, yerr=error, ecolor=color, label=label, **kwargs) - def imshow(self, ax, X, **kwargs): - return ax.imshow(**kwargs) + def imshow(self, ax, X, label=None, **kwargs): + return ax.imshow(X, label=label, **kwargs) - def contour(self, ax, X, Y, C, levels=20, **kwargs): - return ax.contour(X, Y, C, levels=np.linspace(C.min(), C.max(), levels), **kwargs) + def contour(self, ax, X, Y, C, levels=20, label=None, **kwargs): + return ax.contour(X, Y, C, levels=np.linspace(C.min(), C.max(), levels), label=label, **kwargs) - def fill_between(self, ax, X, lower, upper, **kwargs): - return ax.fill_between(X, lower, upper, **kwargs) + def fill_between(self, ax, X, lower, upper, color=None, label=None, **kwargs): + return ax.fill_between(X, lower, upper, facecolor=color, label=label, **kwargs) - def fill_gradient(self, canvas, X, percentiles, **kwargs): + def fill_gradient(self, canvas, X, percentiles, color=None, label=None, **kwargs): ax = canvas plots = [] + if 'edgecolors' not in kwargs: + kwargs['edgecolors'] = 'none' + + if 'facecolors' not in kwargs: + kwargs['facecolors'] = color + if 'facecolors' in kwargs: kwargs['facecolor'] = kwargs.pop('facecolors') diff --git a/GPy/util/pca.py b/GPy/util/pca.py index 7168a28f..ed61049c 100644 --- a/GPy/util/pca.py +++ b/GPy/util/pca.py @@ -81,7 +81,7 @@ class PCA(object): """ Plot fractions of Eigenvalues sorted in descending order. """ - from GPy.plotting.matplot_dep import Tango + from GPy.plotting.gpy_plot import Tango Tango.reset() col = Tango.nextMedium() if ax is None: