mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-04 01:02:39 +02:00
Merge branch 'params' of github.com:SheffieldML/GPy into params
This commit is contained in:
commit
f4e270ae53
11 changed files with 113 additions and 87 deletions
|
|
@ -340,6 +340,10 @@ class Parameterizable(Constrainable):
|
||||||
if add_self: names = map(lambda x: adjust(self.name) + "." + x, names)
|
if add_self: names = map(lambda x: adjust(self.name) + "." + x, names)
|
||||||
return names
|
return names
|
||||||
|
|
||||||
|
@property
|
||||||
|
def num_params(self):
|
||||||
|
return len(self._parameters_)
|
||||||
|
|
||||||
def _add_parameter_name(self, param):
|
def _add_parameter_name(self, param):
|
||||||
pname = adjust_name_for_printing(param.name)
|
pname = adjust_name_for_printing(param.name)
|
||||||
# and makes sure to not delete programmatically added parameters
|
# and makes sure to not delete programmatically added parameters
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,7 @@ class NormalPosterior(VariationalPosterior):
|
||||||
from ...plotting.matplot_dep import variational_plots
|
from ...plotting.matplot_dep import variational_plots
|
||||||
return variational_plots.plot(self,*args)
|
return variational_plots.plot(self,*args)
|
||||||
|
|
||||||
class SpikeAndSlabPosterior(VariationalPosterior):
|
class SpikeAndSlab(VariationalPosterior):
|
||||||
'''
|
'''
|
||||||
The SpikeAndSlab distribution for variational approximations.
|
The SpikeAndSlab distribution for variational approximations.
|
||||||
'''
|
'''
|
||||||
|
|
@ -71,7 +71,7 @@ class SpikeAndSlabPosterior(VariationalPosterior):
|
||||||
"""
|
"""
|
||||||
binary_prob : the probability of the distribution on the slab part.
|
binary_prob : the probability of the distribution on the slab part.
|
||||||
"""
|
"""
|
||||||
super(SpikeAndSlabPosterior, self).__init__(means, variances, name)
|
super(SpikeAndSlab, self).__init__(means, variances, name)
|
||||||
self.gamma = Param("binary_prob",binary_prob,)
|
self.gamma = Param("binary_prob",binary_prob,)
|
||||||
self.add_parameter(self.gamma)
|
self.add_parameter(self.gamma)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -164,11 +164,10 @@ def bgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40,
|
||||||
_np.random.seed(0)
|
_np.random.seed(0)
|
||||||
data = GPy.util.datasets.oil()
|
data = GPy.util.datasets.oil()
|
||||||
|
|
||||||
kernel = GPy.kern.RBF(Q, 1., [.1] * Q, ARD=True)# + GPy.kern.Bias(Q, _np.exp(-2))
|
kernel = GPy.kern.RBF(Q, 1., _np.random.uniform(0,1,(Q,)), ARD=True)# + GPy.kern.Bias(Q, _np.exp(-2))
|
||||||
Y = data['X'][:N]
|
Y = data['X'][:N]
|
||||||
m = GPy.models.BayesianGPLVM(Y, Q, kernel=kernel, num_inducing=num_inducing, **k)
|
m = GPy.models.BayesianGPLVM(Y, Q, kernel=kernel, num_inducing=num_inducing, **k)
|
||||||
m.data_labels = data['Y'][:N].argmax(axis=1)
|
m.data_labels = data['Y'][:N].argmax(axis=1)
|
||||||
m['.*noise.var'] = Y.var() / 100.
|
|
||||||
|
|
||||||
if optimize:
|
if optimize:
|
||||||
m.optimize('scg', messages=verbose, max_iters=max_iters, gtol=.05)
|
m.optimize('scg', messages=verbose, max_iters=max_iters, gtol=.05)
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,7 @@ class Add(Kern):
|
||||||
from white import White
|
from white import White
|
||||||
from rbf import RBF
|
from rbf import RBF
|
||||||
#from rbf_inv import RBFInv
|
#from rbf_inv import RBFInv
|
||||||
#from bias import Bias
|
from bias import Bias
|
||||||
from linear import Linear
|
from linear import Linear
|
||||||
#ffrom fixed import Fixed
|
#ffrom fixed import Fixed
|
||||||
|
|
||||||
|
|
@ -131,11 +131,11 @@ class Add(Kern):
|
||||||
|
|
||||||
|
|
||||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||||
from white import white
|
from white import White
|
||||||
from rbf import rbf
|
from rbf import RBF
|
||||||
#from rbf_inv import rbfinv
|
#from rbf_inv import rbfinv
|
||||||
#from bias import bias
|
from bias import Bias
|
||||||
from linear import linear
|
from linear import Linear
|
||||||
#ffrom fixed import fixed
|
#ffrom fixed import fixed
|
||||||
|
|
||||||
target = np.zeros(Z.shape)
|
target = np.zeros(Z.shape)
|
||||||
|
|
@ -146,15 +146,15 @@ class Add(Kern):
|
||||||
for p2, is2 in zip(self._parameters_, self.input_slices):
|
for p2, is2 in zip(self._parameters_, self.input_slices):
|
||||||
if p2 is p1:
|
if p2 is p1:
|
||||||
continue
|
continue
|
||||||
if isinstance(p2, white):
|
if isinstance(p2, White):
|
||||||
continue
|
continue
|
||||||
elif isinstance(p2, bias):
|
elif isinstance(p2, Bias):
|
||||||
eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2.
|
eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.variance * 2.
|
||||||
else:
|
else:
|
||||||
eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(z[:,is2], mu[:,is2], s[:,is2]) * 2.
|
eff_dL_dpsi1 += dL_dpsi2.sum(1) * p2.psi1(Z[:,is2], mu[:,is2], S[:,is2]) * 2.
|
||||||
|
|
||||||
|
|
||||||
target += p1.gradients_z_variational(dL_dkmm, dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], s[:,is1], z[:,is1])
|
target += p1.gradients_z_variational(dL_dKmm, dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, mu[:,is1], S[:,is1], Z[:,is1])
|
||||||
return target
|
return target
|
||||||
|
|
||||||
def gradients_muS_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
def gradients_muS_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||||
|
|
@ -195,6 +195,12 @@ class Add(Kern):
|
||||||
from ..plotting.matplot_dep import kernel_plots
|
from ..plotting.matplot_dep import kernel_plots
|
||||||
kernel_plots.plot(self,*args)
|
kernel_plots.plot(self,*args)
|
||||||
|
|
||||||
|
def input_sensitivity(self):
|
||||||
|
in_sen = np.zeros((self.input_dim, self.num_params))
|
||||||
|
for i, [p, i_s] in enumerate(zip(self._parameters_, self.input_slices)):
|
||||||
|
in_sen[i_s, i] = p.input_sensitivity()
|
||||||
|
return in_sen
|
||||||
|
|
||||||
def _getstate(self):
|
def _getstate(self):
|
||||||
"""
|
"""
|
||||||
Get the current state of the class,
|
Get the current state of the class,
|
||||||
|
|
|
||||||
|
|
@ -61,15 +61,19 @@ class Kern(Parameterized):
|
||||||
def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def plot_ARD(self, *args):
|
def plot_ARD(self, *args, **kw):
|
||||||
"""If an ARD kernel is present, plot a bar representation using matplotlib
|
if "matplotlib" in sys.modules:
|
||||||
|
from ...plotting.matplot_dep import kernel_plots
|
||||||
See GPy.plotting.matplot_dep.plot_ARD
|
self.plot_ARD.__doc__ += kernel_plots.plot_ARD.__doc__
|
||||||
"""
|
|
||||||
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
|
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
|
||||||
from ...plotting.matplot_dep import kernel_plots
|
from ...plotting.matplot_dep import kernel_plots
|
||||||
return kernel_plots.plot_ARD(self,*args)
|
return kernel_plots.plot_ARD(self,*args,**kw)
|
||||||
|
|
||||||
|
def input_sensitivity(self):
|
||||||
|
"""
|
||||||
|
Returns the sensitivity for each dimension of this kernel.
|
||||||
|
"""
|
||||||
|
return np.zeros(self.input_dim)
|
||||||
|
|
||||||
def __add__(self, other):
|
def __add__(self, other):
|
||||||
""" Overloading of the '+' operator. for more control, see self.add """
|
""" Overloading of the '+' operator. for more control, see self.add """
|
||||||
|
|
|
||||||
|
|
@ -252,3 +252,6 @@ class Linear(Kern):
|
||||||
|
|
||||||
return np.dot(ZA, inner).swapaxes(0, 1) # NOTE: self.ZAinner \in [num_inducing x N x input_dim]!
|
return np.dot(ZA, inner).swapaxes(0, 1) # NOTE: self.ZAinner \in [num_inducing x N x input_dim]!
|
||||||
|
|
||||||
|
def input_sensitivity(self):
|
||||||
|
if self.ARD: return self.variances
|
||||||
|
else: return self.variances.repeat(self.input_dim)
|
||||||
|
|
|
||||||
|
|
@ -182,7 +182,7 @@ class RBF(Kern):
|
||||||
|
|
||||||
return grad
|
return grad
|
||||||
|
|
||||||
def update_gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
def gradients_q_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, posterior_variational):
|
||||||
mu = posterior_variational.mean
|
mu = posterior_variational.mean
|
||||||
S = posterior_variational.variance
|
S = posterior_variational.variance
|
||||||
self._psi_computations(Z, mu, S)
|
self._psi_computations(Z, mu, S)
|
||||||
|
|
@ -195,8 +195,7 @@ class RBF(Kern):
|
||||||
grad_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * self._psi2_mudist).sum(1).sum(1)
|
grad_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * self._psi2_mudist).sum(1).sum(1)
|
||||||
grad_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*self._psi2_mudist_sq - 1)).sum(1).sum(1)
|
grad_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*self._psi2_mudist_sq - 1)).sum(1).sum(1)
|
||||||
|
|
||||||
posterior_variational.mean.gradient = grad_mu
|
return grad_mu, grad_S
|
||||||
posterior_variational.variance.gradient = grad_S
|
|
||||||
|
|
||||||
def gradients_X(self, dL_dK, X, X2=None):
|
def gradients_X(self, dL_dK, X, X2=None):
|
||||||
#if self._X is None or X.base is not self._X.base or X2 is not None:
|
#if self._X is None or X.base is not self._X.base or X2 is not None:
|
||||||
|
|
@ -383,3 +382,7 @@ class RBF(Kern):
|
||||||
type_converters=weave.converters.blitz, **self.weave_options)
|
type_converters=weave.converters.blitz, **self.weave_options)
|
||||||
|
|
||||||
return mudist, mudist_sq, psi2_exponent, psi2
|
return mudist, mudist_sq, psi2_exponent, psi2
|
||||||
|
|
||||||
|
def input_sensitivity(self):
|
||||||
|
if self.ARD: return 1./self.lengthscale
|
||||||
|
else: return (1./self.lengthscale).repeat(self.input_dim)
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ from ..inference.optimization import SCG
|
||||||
from ..util import linalg
|
from ..util import linalg
|
||||||
from ..core.parameterization.variational import NormalPosterior, NormalPrior
|
from ..core.parameterization.variational import NormalPosterior, NormalPrior
|
||||||
|
|
||||||
class BayesianGPLVM(SparseGP, GPLVM):
|
class BayesianGPLVM(SparseGP):
|
||||||
"""
|
"""
|
||||||
Bayesian Gaussian Process Latent Variable Model
|
Bayesian Gaussian Process Latent Variable Model
|
||||||
|
|
||||||
|
|
@ -25,7 +25,8 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
||||||
def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10,
|
def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10,
|
||||||
Z=None, kernel=None, inference_method=None, likelihood=None, name='bayesian gplvm', **kwargs):
|
Z=None, kernel=None, inference_method=None, likelihood=None, name='bayesian gplvm', **kwargs):
|
||||||
if X == None:
|
if X == None:
|
||||||
X = self.initialise_latent(init, input_dim, Y)
|
from ..util.initialization import initialize_latent
|
||||||
|
X = initialize_latent(init, input_dim, Y)
|
||||||
self.init = init
|
self.init = init
|
||||||
|
|
||||||
if X_variance is None:
|
if X_variance is None:
|
||||||
|
|
@ -63,7 +64,9 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
||||||
super(BayesianGPLVM, self).parameters_changed()
|
super(BayesianGPLVM, self).parameters_changed()
|
||||||
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.q)
|
self._log_marginal_likelihood -= self.variational_prior.KL_divergence(self.q)
|
||||||
|
|
||||||
self.kern.update_gradients_q_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict)
|
# TODO: This has to go into kern
|
||||||
|
# maybe a update_gradients_q_variational?
|
||||||
|
self.q.mean.gradient, self.q.variance.gradient = self.kern.gradients_q_variational(posterior_variational=self.q, Z=self.Z, **self.grad_dict)
|
||||||
|
|
||||||
# update for the KL divergence
|
# update for the KL divergence
|
||||||
self.variational_prior.update_gradients_KL(self.q)
|
self.variational_prior.update_gradients_KL(self.q)
|
||||||
|
|
|
||||||
|
|
@ -28,28 +28,20 @@ class GPLVM(GP):
|
||||||
:type init: 'PCA'|'random'
|
:type init: 'PCA'|'random'
|
||||||
"""
|
"""
|
||||||
if X is None:
|
if X is None:
|
||||||
X = self.initialise_latent(init, input_dim, Y)
|
from ..util.initialization import initialize_latent
|
||||||
|
X = initialize_latent(init, input_dim, Y)
|
||||||
if kernel is None:
|
if kernel is None:
|
||||||
kernel = kern.rbf(input_dim, ARD=input_dim > 1) + kern.bias(input_dim, np.exp(-2))
|
kernel = kern.RBF(input_dim, ARD=input_dim > 1) + kern.Bias(input_dim, np.exp(-2))
|
||||||
|
|
||||||
likelihood = Gaussian()
|
likelihood = Gaussian()
|
||||||
|
|
||||||
super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM')
|
super(GPLVM, self).__init__(X, Y, kernel, likelihood, name='GPLVM')
|
||||||
self.X = Param('X', X)
|
self.X = Param('latent_mean', X)
|
||||||
self.add_parameter(self.X, index=0)
|
self.add_parameter(self.X, index=0)
|
||||||
|
|
||||||
def initialise_latent(self, init, input_dim, Y):
|
|
||||||
Xr = np.random.randn(Y.shape[0], input_dim)
|
|
||||||
if init == 'PCA':
|
|
||||||
PC = PCA(Y, input_dim)[0]
|
|
||||||
Xr[:PC.shape[0], :PC.shape[1]] = PC
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
return Xr
|
|
||||||
|
|
||||||
def parameters_changed(self):
|
def parameters_changed(self):
|
||||||
GP.parameters_changed(self)
|
super(GPLVM, self).parameters_changed()
|
||||||
self.X.gradient = self.kern.gradients_X(self.posterior.dL_dK, self.X)
|
self.X.gradient = self.kern.gradients_X(self._dL_dK, self.X, None)
|
||||||
|
|
||||||
def _getstate(self):
|
def _getstate(self):
|
||||||
return GP._getstate(self)
|
return GP._getstate(self)
|
||||||
|
|
@ -79,7 +71,8 @@ class GPLVM(GP):
|
||||||
pb.plot(mu[:, 0], mu[:, 1], 'k', linewidth=1.5)
|
pb.plot(mu[:, 0], mu[:, 1], 'k', linewidth=1.5)
|
||||||
|
|
||||||
def plot_latent(self, *args, **kwargs):
|
def plot_latent(self, *args, **kwargs):
|
||||||
return util.plot_latent.plot_latent(self, *args, **kwargs)
|
from ..plotting.matplot_dep import dim_reduction_plots
|
||||||
|
|
||||||
|
return dim_reduction_plots.plot_latent(self, *args, **kwargs)
|
||||||
def plot_magnification(self, *args, **kwargs):
|
def plot_magnification(self, *args, **kwargs):
|
||||||
return util.plot_latent.plot_magnification(self, *args, **kwargs)
|
return util.plot_latent.plot_magnification(self, *args, **kwargs)
|
||||||
|
|
|
||||||
|
|
@ -9,8 +9,41 @@ from matplotlib.transforms import offset_copy
|
||||||
from ...kern import Linear
|
from ...kern import Linear
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def add_bar_labels(fig, ax, bars, bottom=0):
|
||||||
|
transOffset = offset_copy(ax.transData, fig=fig,
|
||||||
|
x=0., y= -2., units='points')
|
||||||
|
transOffsetUp = offset_copy(ax.transData, fig=fig,
|
||||||
|
x=0., y=1., units='points')
|
||||||
|
for bar in bars:
|
||||||
|
for i, [patch, num] in enumerate(zip(bar.patches, np.arange(len(bar.patches)))):
|
||||||
|
if len(bottom) == len(bar): b = bottom[i]
|
||||||
|
else: b = bottom
|
||||||
|
height = patch.get_height() + b
|
||||||
|
xi = patch.get_x() + patch.get_width() / 2.
|
||||||
|
va = 'top'
|
||||||
|
c = 'w'
|
||||||
|
t = TextPath((0, 0), "${xi}$".format(xi=xi), rotation=0, usetex=True, ha='center')
|
||||||
|
transform = transOffset
|
||||||
|
if patch.get_extents().height <= t.get_extents().height + 3:
|
||||||
|
va = 'bottom'
|
||||||
|
c = 'k'
|
||||||
|
transform = transOffsetUp
|
||||||
|
ax.text(xi, height, "${xi}$".format(xi=int(num)), color=c, rotation=0, ha='center', va=va, transform=transform)
|
||||||
|
|
||||||
|
ax.set_xticks([])
|
||||||
|
|
||||||
|
|
||||||
|
def plot_bars(fig, ax, x, ard_params, color, name, bottom=0):
|
||||||
|
from ...util.misc import param_to_array
|
||||||
|
return ax.bar(left=x, height=param_to_array(ard_params), width=.8,
|
||||||
|
bottom=bottom, align='center',
|
||||||
|
color=color, edgecolor='k', linewidth=1.2,
|
||||||
|
label=name.replace("_"," "))
|
||||||
|
|
||||||
def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False):
|
def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False):
|
||||||
"""If an ARD kernel is present, plot a bar representation using matplotlib
|
"""
|
||||||
|
If an ARD kernel is present, plot a bar representation using matplotlib
|
||||||
|
|
||||||
:param fignum: figure number of the plot
|
:param fignum: figure number of the plot
|
||||||
:param ax: matplotlib axis to plot on
|
:param ax: matplotlib axis to plot on
|
||||||
|
|
@ -24,50 +57,27 @@ def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False):
|
||||||
ax = fig.add_subplot(111)
|
ax = fig.add_subplot(111)
|
||||||
else:
|
else:
|
||||||
fig = ax.figure
|
fig = ax.figure
|
||||||
Tango.reset()
|
|
||||||
xticklabels = []
|
|
||||||
bars = []
|
|
||||||
x0 = 0
|
|
||||||
#for p in kernel._parameters_:
|
|
||||||
p = kernel
|
|
||||||
c = Tango.nextMedium()
|
|
||||||
if hasattr(p, 'ARD') and p.ARD:
|
|
||||||
if title is None:
|
if title is None:
|
||||||
ax.set_title('ARD parameters, %s kernel' % p.name)
|
ax.set_title('ARD parameters, %s kernel' % kernel.name)
|
||||||
else:
|
else:
|
||||||
ax.set_title(title)
|
ax.set_title(title)
|
||||||
if isinstance(p, Linear):
|
|
||||||
ard_params = p.variances
|
Tango.reset()
|
||||||
else:
|
bars = []
|
||||||
ard_params = 1. / p.lengthscale
|
|
||||||
x = np.arange(x0, x0 + len(ard_params))
|
ard_params = np.atleast_2d(kernel.input_sensitivity())
|
||||||
from ...util.misc import param_to_array
|
bottom = 0
|
||||||
bars.append(ax.bar(x, param_to_array(ard_params), align='center', color=c, edgecolor='k', linewidth=1.2, label=p.name.replace("_"," ")))
|
x = np.arange(kernel.input_dim)
|
||||||
xticklabels.extend([r"$\mathrm{{{name}}}\ {x}$".format(name=p.name, x=i) for i in np.arange(len(ard_params))])
|
|
||||||
x0 += len(ard_params)
|
for i in range(ard_params.shape[-1]):
|
||||||
x = np.arange(x0)
|
c = Tango.nextMedium()
|
||||||
transOffset = offset_copy(ax.transData, fig=fig,
|
bars.append(plot_bars(fig, ax, x, ard_params[:,i], c, kernel._parameters_[i].name, bottom=bottom))
|
||||||
x=0., y= -2., units='points')
|
bottom += ard_params[:,i]
|
||||||
transOffsetUp = offset_copy(ax.transData, fig=fig,
|
|
||||||
x=0., y=1., units='points')
|
ax.set_xlim(-.5, kernel.input_dim - .5)
|
||||||
for bar in bars:
|
add_bar_labels(fig, ax, [bars[-1]], bottom=bottom-ard_params[:,i])
|
||||||
for patch, num in zip(bar.patches, np.arange(len(bar.patches))):
|
|
||||||
height = patch.get_height()
|
|
||||||
xi = patch.get_x() + patch.get_width() / 2.
|
|
||||||
va = 'top'
|
|
||||||
c = 'w'
|
|
||||||
t = TextPath((0, 0), "${xi}$".format(xi=xi), rotation=0, usetex=True, ha='center')
|
|
||||||
transform = transOffset
|
|
||||||
if patch.get_extents().height <= t.get_extents().height + 3:
|
|
||||||
va = 'bottom'
|
|
||||||
c = 'k'
|
|
||||||
transform = transOffsetUp
|
|
||||||
ax.text(xi, height, "${xi}$".format(xi=int(num)), color=c, rotation=0, ha='center', va=va, transform=transform)
|
|
||||||
# for xi, t in zip(x, xticklabels):
|
|
||||||
# ax.text(xi, maxi / 2, t, rotation=90, ha='center', va='center')
|
|
||||||
# ax.set_xticklabels(xticklabels, rotation=17)
|
|
||||||
ax.set_xticks([])
|
|
||||||
ax.set_xlim(-.5, x0 - .5)
|
|
||||||
if legend:
|
if legend:
|
||||||
if title is '':
|
if title is '':
|
||||||
mode = 'expand'
|
mode = 'expand'
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ import classification
|
||||||
import subarray_and_sorting
|
import subarray_and_sorting
|
||||||
import caching
|
import caching
|
||||||
import diag
|
import diag
|
||||||
|
import initialization
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import sympy
|
import sympy
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue