mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-11 21:12:38 +02:00
Merge branch 'devel' of github.com:SheffieldML/GPy into devel
This commit is contained in:
commit
7f632cfc2d
7 changed files with 130 additions and 211 deletions
|
|
@ -251,7 +251,18 @@ class parameterised(object):
|
|||
|
||||
def _set_params_transformed(self, x):
|
||||
""" takes the vector x, which is then modified (by untying, reparameterising or inserting fixed values), and then call self._set_params"""
|
||||
self._set_params(self._untransform_params(x))
|
||||
|
||||
def _untransform_params(self,x):
|
||||
"""
|
||||
The transformation required for _set_params_transformed.
|
||||
|
||||
This moves the vector x seen by the optimiser (unconstrained) to the
|
||||
valid parameter vector seen by the model
|
||||
|
||||
Note:
|
||||
- This function is separate from _set_params_transformed for downstream flexibility
|
||||
"""
|
||||
# work out how many places are fixed, and where they are. tricky logic!
|
||||
fix_places = self.fixed_indices + [t[1:] for t in self.tied_indices]
|
||||
if len(fix_places):
|
||||
|
|
@ -272,7 +283,8 @@ class parameterised(object):
|
|||
[np.put(xx,i,t.f(xx[i])) for i,t in zip(self.constrained_indices, self.constraints)]
|
||||
if hasattr(self,'debug'):
|
||||
stop
|
||||
self._set_params(xx)
|
||||
|
||||
return xx
|
||||
|
||||
def _get_param_names_transformed(self):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -118,9 +118,9 @@ def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False):
|
|||
return m
|
||||
|
||||
def BGPLVM_oil(optimize=True, N=100, Q=5, M=25, max_f_eval=4e3, plot=False, **k):
|
||||
np.random.seed(0)
|
||||
data = GPy.util.datasets.oil()
|
||||
from GPy.core.transformations import logexp_clipped
|
||||
np.random.seed(0)
|
||||
|
||||
# create simple GP model
|
||||
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
|
||||
|
|
@ -131,8 +131,7 @@ def BGPLVM_oil(optimize=True, N=100, Q=5, M=25, max_f_eval=4e3, plot=False, **k)
|
|||
m = GPy.models.Bayesian_GPLVM(Yn, Q, kernel=kernel, M=M, **k)
|
||||
m.data_labels = data['Y'][:N].argmax(axis=1)
|
||||
|
||||
# m.constrain('variance', logexp_clipped())
|
||||
# m.constrain('length', logexp_clipped())
|
||||
m.constrain('variance|leng', logexp_clipped())
|
||||
m['lengt'] = m.X.var(0).max() / m.X.var(0)
|
||||
m['noise'] = Yn.var() / 100.
|
||||
|
||||
|
|
@ -140,10 +139,6 @@ def BGPLVM_oil(optimize=True, N=100, Q=5, M=25, max_f_eval=4e3, plot=False, **k)
|
|||
|
||||
# optimize
|
||||
if optimize:
|
||||
# m.unconstrain('noise'); m.constrain_fixed('noise')
|
||||
# m.optimize('scg', messages=1, max_f_eval=200)
|
||||
# m.unconstrain('noise')
|
||||
# m.constrain('noise', logexp_clipped())
|
||||
m.optimize('scg', messages=1, max_f_eval=max_f_eval)
|
||||
|
||||
if plot:
|
||||
|
|
@ -155,11 +150,6 @@ def BGPLVM_oil(optimize=True, N=100, Q=5, M=25, max_f_eval=4e3, plot=False, **k)
|
|||
lvm_visualizer = GPy.util.visualize.lvm_dimselect(m.X[0, :].copy(), m, data_show, latent_axes=latent_axes) # , sense_axes=sense_axes)
|
||||
raw_input('Press enter to finish')
|
||||
plt.close('all')
|
||||
# # plot
|
||||
# print(m)
|
||||
# m.plot_latent(labels=m.data_labels)
|
||||
# pb.figure()
|
||||
# pb.bar(np.arange(m.kern.D), 1. / m.input_sensitivity())
|
||||
return m
|
||||
|
||||
def oil_100():
|
||||
|
|
@ -189,15 +179,6 @@ def _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim=False):
|
|||
s3 = s3(x)
|
||||
sS = sS(x)
|
||||
|
||||
# s1 -= s1.mean()
|
||||
# s2 -= s2.mean()
|
||||
# s3 -= s3.mean()
|
||||
# sS -= sS.mean()
|
||||
# s1 /= .5 * (np.abs(s1).max() - np.abs(s1).min())
|
||||
# s2 /= .5 * (np.abs(s2).max() - np.abs(s2).min())
|
||||
# s3 /= .5 * (np.abs(s3).max() - np.abs(s3).min())
|
||||
# sS /= .5 * (np.abs(sS).max() - np.abs(sS).min())
|
||||
|
||||
S1 = np.hstack([s1, sS])
|
||||
S2 = np.hstack([s2, sS])
|
||||
S3 = np.hstack([s3, sS])
|
||||
|
|
@ -217,16 +198,17 @@ def _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim=False):
|
|||
Y2 /= Y2.std(0)
|
||||
Y3 /= Y3.std(0)
|
||||
|
||||
slist = [s1, s2, s3, sS]
|
||||
slist = [sS, s1, s2, s3]
|
||||
slist_names = ["sS", "s1", "s2", "s3"]
|
||||
Ylist = [Y1, Y2, Y3]
|
||||
|
||||
if plot_sim:
|
||||
import pylab
|
||||
import itertools
|
||||
fig = pylab.figure("MRD Simulation", figsize=(8, 6))
|
||||
fig = pylab.figure("MRD Simulation Data", figsize=(8, 6))
|
||||
fig.clf()
|
||||
ax = fig.add_subplot(2, 1, 1)
|
||||
labls = sorted(filter(lambda x: x.startswith("s"), locals()))
|
||||
labls = slist_names
|
||||
for S, lab in itertools.izip(slist, labls):
|
||||
ax.plot(S, label=lab)
|
||||
ax.legend()
|
||||
|
|
@ -250,7 +232,6 @@ def bgplvm_simulation_matlab_compare():
|
|||
from GPy.models import mrd
|
||||
from GPy import kern
|
||||
reload(mrd); reload(kern)
|
||||
# k = kern.rbf(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||
k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||
m = Bayesian_GPLVM(Y, Q, init="PCA", M=M, kernel=k,
|
||||
# X=mu,
|
||||
|
|
@ -260,26 +241,14 @@ def bgplvm_simulation_matlab_compare():
|
|||
m.auto_scale_factor = True
|
||||
m['noise'] = Y.var() / 100.
|
||||
m['linear_variance'] = .01
|
||||
|
||||
# lscstr = 'X_variance'
|
||||
# m[lscstr] = .01
|
||||
# m.unconstrain(lscstr); m.constrain_fixed(lscstr, .1)
|
||||
|
||||
# cstr = 'white'
|
||||
# m.unconstrain(cstr); m.constrain_bounded(cstr, .01, 1.)
|
||||
|
||||
# cstr = 'noise'
|
||||
# m.unconstrain(cstr); m.constrain_bounded(cstr, .01, 1.)
|
||||
return m
|
||||
|
||||
def bgplvm_simulation(burnin='scg', plot_sim=False,
|
||||
max_burnin=100, true_X=False,
|
||||
do_opt=True,
|
||||
max_f_eval=1000):
|
||||
def bgplvm_simulation(optimize='scg',
|
||||
plot=True,
|
||||
max_f_eval=2e4):
|
||||
from GPy.core.transformations import logexp_clipped
|
||||
|
||||
D1, D2, D3, N, M, Q = 15, 8, 8, 350, 3, 6
|
||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim)
|
||||
D1, D2, D3, N, M, Q = 15, 8, 8, 100, 3, 5
|
||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot)
|
||||
|
||||
from GPy.models import mrd
|
||||
from GPy import kern
|
||||
|
|
@ -289,94 +258,22 @@ def bgplvm_simulation(burnin='scg', plot_sim=False,
|
|||
Y = Ylist[0]
|
||||
|
||||
k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q)
|
||||
# k = kern.white(Q, .00001) + kern.bias(Q)
|
||||
m = Bayesian_GPLVM(Y, Q, init="PCA", M=M, kernel=k, _debug=True)
|
||||
# m.set('noise',)
|
||||
m.constrain('variance', logexp_clipped())
|
||||
|
||||
m.ensure_default_constraints()
|
||||
m.constrain('variance|noise', logexp_clipped())
|
||||
# m.ensure_default_constraints()
|
||||
m['noise'] = Y.var() / 100.
|
||||
m['linear_variance'] = .001
|
||||
# m.auto_scale_factor = True
|
||||
# m.scale_factor = 1.
|
||||
m['linear_variance'] = .01
|
||||
|
||||
|
||||
if burnin:
|
||||
print "initializing beta"
|
||||
cstr = "noise"
|
||||
m.unconstrain(cstr); m.constrain_fixed(cstr, Y.var() / 70.)
|
||||
m.optimize(burnin, messages=1, max_f_eval=max_burnin)
|
||||
|
||||
print "releasing beta"
|
||||
cstr = "noise"
|
||||
m.unconstrain(cstr); m.constrain_positive(cstr)
|
||||
|
||||
if true_X:
|
||||
true_X = np.hstack((slist[0], slist[3], 0. * np.ones((N, Q - 2))))
|
||||
m.set('X_\d', true_X)
|
||||
m.constrain_fixed("X_\d")
|
||||
|
||||
cstr = 'X_variance'
|
||||
# m.unconstrain(cstr), m.constrain_fixed(cstr, .0001)
|
||||
m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-7, .1)
|
||||
|
||||
# cstr = 'X_variance'
|
||||
# m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-3, 1.)
|
||||
|
||||
# m['X_var'] = np.ones(N * Q) * .5 + np.random.randn(N * Q) * .01
|
||||
|
||||
# cstr = "iip"
|
||||
# m.unconstrain(cstr); m.constrain_fixed(cstr)
|
||||
|
||||
# cstr = 'variance'
|
||||
# m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-10, 1.)
|
||||
# cstr = 'X_\d'
|
||||
# m.unconstrain(cstr), m.constrain_bounded(cstr, -10., 10.)
|
||||
#
|
||||
# cstr = 'noise'
|
||||
# m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-5, 1.)
|
||||
#
|
||||
# cstr = 'white'
|
||||
# m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-6, 1.)
|
||||
#
|
||||
# cstr = 'linear_variance'
|
||||
# m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-10, 10.)
|
||||
|
||||
# cstr = 'variance'
|
||||
# m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-10, 10.)
|
||||
|
||||
# np.seterr(all='call')
|
||||
# def ipdbonerr(errtype, flags):
|
||||
# import ipdb; ipdb.set_trace()
|
||||
# np.seterrcall(ipdbonerr)
|
||||
|
||||
if do_opt and burnin:
|
||||
try:
|
||||
m.optimize(burnin, messages=1, max_f_eval=max_f_eval)
|
||||
except:
|
||||
pass
|
||||
finally:
|
||||
return m
|
||||
if optimize:
|
||||
print "Optimizing model:"
|
||||
m.optimize('scg', max_iters=max_f_eval, max_f_eval=max_f_eval, messages=True)
|
||||
if plot:
|
||||
import pylab
|
||||
m.plot_X_1d()
|
||||
pylab.figure(); pylab.axis(); m.kern.plot_ARD()
|
||||
return m
|
||||
|
||||
def mrd_simulation(plot_sim=False):
|
||||
# num = 2
|
||||
# ard1 = np.array([1., 1, 0, 0], dtype=float)
|
||||
# ard2 = np.array([0., 1, 1, 0], dtype=float)
|
||||
# ard1[ard1 == 0] = 1E-10
|
||||
# ard2[ard2 == 0] = 1E-10
|
||||
|
||||
# ard1i = 1. / ard1
|
||||
# ard2i = 1. / ard2
|
||||
|
||||
# k = GPy.kern.rbf(Q, ARD=True, lengthscale=ard1i) + GPy.kern.bias(Q, 0) + GPy.kern.white(Q, 0.0001)
|
||||
# Y1 = np.random.multivariate_normal(np.zeros(N), k.K(X), D1).T
|
||||
# Y1 -= Y1.mean(0)
|
||||
#
|
||||
# k = GPy.kern.rbf(Q, ARD=True, lengthscale=ard2i) + GPy.kern.bias(Q, 0) + GPy.kern.white(Q, 0.0001)
|
||||
# Y2 = np.random.multivariate_normal(np.zeros(N), k.K(X), D2).T
|
||||
# Y2 -= Y2.mean(0)
|
||||
# make_params = lambda ard: np.hstack([[1], ard, [1, .3]])
|
||||
D1, D2, D3, N, M, Q = 150, 250, 300, 700, 3, 7
|
||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim)
|
||||
|
||||
|
|
@ -386,59 +283,35 @@ def mrd_simulation(plot_sim=False):
|
|||
|
||||
reload(mrd); reload(kern)
|
||||
|
||||
# k = kern.rbf(2, ARD=True) + kern.bias(2) + kern.white(2)
|
||||
# Y1 = np.random.multivariate_normal(np.zeros(N), k.K(S1), D1).T
|
||||
# Y2 = np.random.multivariate_normal(np.zeros(N), k.K(S2), D2).T
|
||||
# Y3 = np.random.multivariate_normal(np.zeros(N), k.K(S3), D3).T
|
||||
|
||||
# Ylist = Ylist[0:2]
|
||||
|
||||
# k = kern.rbf(Q, ARD=True) + kern.bias(Q) + kern.white(Q)
|
||||
|
||||
k = kern.linear(Q, [0.01] * Q, True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||
m = mrd.MRD(*Ylist, Q=Q, M=M, kernel=k, initx="concat", initz='permute', _debug=False)
|
||||
m = mrd.MRD(*Ylist, Q=Q, M=M, kernel=k, initx="concat", initz='permute')
|
||||
|
||||
for i, Y in enumerate(Ylist):
|
||||
m['{}_noise'.format(i + 1)] = Y.var() / 100.
|
||||
|
||||
m.constrain('variance', logexp_clipped())
|
||||
m.constrain('variance|noise', logexp_clipped())
|
||||
m.ensure_default_constraints()
|
||||
# m.auto_scale_factor = True
|
||||
|
||||
# cstr = 'variance'
|
||||
# m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-12, 1.)
|
||||
#
|
||||
# cstr = 'linear_variance'
|
||||
# m.unconstrain(cstr), m.constrain_positive(cstr)
|
||||
|
||||
print "initializing beta"
|
||||
cstr = "noise"
|
||||
m.unconstrain(cstr); m.constrain_fixed(cstr)
|
||||
m.optimize('scg', messages=1, max_f_eval=2e3, gtol=100)
|
||||
|
||||
print "releasing beta"
|
||||
cstr = "noise"
|
||||
m.unconstrain(cstr); m.constrain(cstr, logexp_clipped())
|
||||
|
||||
# np.seterr(all='call')
|
||||
# def ipdbonerr(errtype, flags):
|
||||
# import ipdb; ipdb.set_trace()
|
||||
# np.seterrcall(ipdbonerr)
|
||||
|
||||
return m # , mtest
|
||||
|
||||
def mrd_silhouette():
|
||||
|
||||
pass
|
||||
return m
|
||||
|
||||
def brendan_faces():
|
||||
from GPy import kern
|
||||
data = GPy.util.datasets.brendan_faces()
|
||||
Y = data['Y'][0:-1:10, :]
|
||||
m = GPy.models.GPLVM(data['Y'], 2)
|
||||
Q = 2
|
||||
# Y = data['Y'][0:-1:2, :]
|
||||
Y = data['Y']
|
||||
Yn = Y - Y.mean()
|
||||
Yn /= Yn.std()
|
||||
|
||||
m = GPy.models.GPLVM(Yn, Q)#, M=Y.shape[0]/4)
|
||||
|
||||
# optimize
|
||||
# m.constrain_fixed('white', 1e-2)
|
||||
# m.constrain_bounded('noise', 1e-6, 10)
|
||||
m.constrain('rbf', GPy.core.transformations.logexp_clipped())
|
||||
|
||||
m.ensure_default_constraints()
|
||||
m.optimize(messages=1, max_f_eval=10000)
|
||||
m.optimize('scg', messages=1, max_f_eval=10000)
|
||||
|
||||
ax = m.plot_latent()
|
||||
y = m.likelihood.Y[0, :]
|
||||
|
|
|
|||
|
|
@ -36,7 +36,8 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto
|
|||
Returns
|
||||
x the optimal value for x
|
||||
flog : a list of all the objective values
|
||||
|
||||
function_eval number of fn evaluations
|
||||
status: string describing convergence status
|
||||
"""
|
||||
if xtol is None:
|
||||
xtol = 1e-6
|
||||
|
|
@ -153,5 +154,6 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto
|
|||
# iterations.
|
||||
status = "maxiter exceeded"
|
||||
|
||||
print ""
|
||||
if display:
|
||||
print ""
|
||||
return x, flog, function_eval, status
|
||||
|
|
|
|||
|
|
@ -69,6 +69,7 @@ class Gaussian(likelihood):
|
|||
# Note. for D>1, we need to re-normalise all the outputs independently.
|
||||
# This will mess up computations of diag(true_var), below.
|
||||
# note that the upper, lower quantiles should be the same shape as mean
|
||||
# Augment the output variance with the likelihood variance and rescale.
|
||||
true_var = (var + np.eye(var.shape[0]) * self._variance) * self._scale ** 2
|
||||
_5pc = mean - 2.*np.sqrt(np.diag(true_var))
|
||||
_95pc = mean + 2.*np.sqrt(np.diag(true_var))
|
||||
|
|
|
|||
|
|
@ -208,23 +208,25 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
else:
|
||||
colors = iter(colors)
|
||||
plots = []
|
||||
x = np.arange(self.X.shape[0])
|
||||
for i in range(self.X.shape[1]):
|
||||
if axes is None:
|
||||
ax = fig.add_subplot(self.X.shape[1], 1, i + 1)
|
||||
else:
|
||||
ax = axes[i]
|
||||
ax.plot(self.X, c='k', alpha=.3)
|
||||
plots.extend(ax.plot(self.X.T[i], c=colors.next(), label=r"$\mathbf{{X_{{{}}}}}$".format(i)))
|
||||
ax.fill_between(np.arange(self.X.shape[0]),
|
||||
plots.extend(ax.plot(x, self.X.T[i], c=colors.next(), label=r"$\mathbf{{X_{{{}}}}}$".format(i)))
|
||||
ax.fill_between(x,
|
||||
self.X.T[i] - 2 * np.sqrt(self.X_variance.T[i]),
|
||||
self.X.T[i] + 2 * np.sqrt(self.X_variance.T[i]),
|
||||
facecolor=plots[-1].get_color(),
|
||||
alpha=.3)
|
||||
ax.legend(borderaxespad=0.)
|
||||
ax.set_xlim(x.min(), x.max())
|
||||
if i < self.X.shape[1] - 1:
|
||||
ax.set_xticklabels('')
|
||||
pylab.draw()
|
||||
fig.tight_layout(h_pad=.01) # , rect=(0, 0, 1, .95))
|
||||
fig.tight_layout(h_pad=.01) # , rect=(0, 0, 1, .95))
|
||||
return fig
|
||||
|
||||
def _debug_filter_params(self, x):
|
||||
|
|
@ -263,7 +265,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
kllls = np.array(self._savedklll)
|
||||
LL, = ax1.plot(kllls[:, 0], kllls[:, 1] - kllls[:, 2], '-', label=r'$\log p(\mathbf{Y})$', mew=1.5)
|
||||
KL, = ax1.plot(kllls[:, 0], kllls[:, 2], '-', label=r'$\mathcal{KL}(p||q)$', mew=1.5)
|
||||
L, = ax1.plot(kllls[:, 0], kllls[:, 1], '-', label=r'$L$', mew=1.5) # \mathds{E}_{q(\mathbf{X})}[p(\mathbf{Y|X})\frac{p(\mathbf{X})}{q(\mathbf{X})}]
|
||||
L, = ax1.plot(kllls[:, 0], kllls[:, 1], '-', label=r'$L$', mew=1.5) # \mathds{E}_{q(\mathbf{X})}[p(\mathbf{Y|X})\frac{p(\mathbf{X})}{q(\mathbf{X})}]
|
||||
|
||||
param_dict = dict(self._savedparams)
|
||||
gradient_dict = dict(self._savedgradients)
|
||||
|
|
@ -411,7 +413,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
|
||||
# parameter changes
|
||||
# ax2 = pylab.subplot2grid((4, 1), (1, 0), 3, 1, projection='3d')
|
||||
button_options = [0, 0] # [0]: clicked -- [1]: dragged
|
||||
button_options = [0, 0] # [0]: clicked -- [1]: dragged
|
||||
|
||||
def update_plots(event):
|
||||
if button_options[0] and not button_options[1]:
|
||||
|
|
@ -483,4 +485,4 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
cidp = figs[0].canvas.mpl_connect('button_press_event', onclick)
|
||||
cidd = figs[0].canvas.mpl_connect('motion_notify_event', motion)
|
||||
|
||||
return ax1, ax2, ax3, ax4, ax5 # , ax6, ax7
|
||||
return ax1, ax2, ax3, ax4, ax5 # , ax6, ax7
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ class GPLVM(GP):
|
|||
if X is None:
|
||||
X = self.initialise_latent(init, Q, Y)
|
||||
if kernel is None:
|
||||
kernel = kern.rbf(Q) + kern.bias(Q)
|
||||
kernel = kern.rbf(Q, ARD=Q>1) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||
likelihood = Gaussian(Y, normalize=normalize_Y)
|
||||
GP.__init__(self, X, likelihood, kernel, **kwargs)
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class vector_show(data_show):
|
|||
|
||||
|
||||
class lvm(data_show):
|
||||
def __init__(self, vals, model, data_visualize, latent_axes=None, latent_index=[0,1]):
|
||||
def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1]):
|
||||
"""Visualize a latent variable model
|
||||
|
||||
:param model: the latent variable model to visualize.
|
||||
|
|
@ -71,7 +71,7 @@ class lvm(data_show):
|
|||
self.data_visualize = data_visualize
|
||||
self.model = model
|
||||
self.latent_axes = latent_axes
|
||||
|
||||
self.sense_axes = sense_axes
|
||||
self.called = False
|
||||
self.move_on = False
|
||||
self.latent_index = latent_index
|
||||
|
|
@ -81,10 +81,12 @@ class lvm(data_show):
|
|||
self.latent_values = vals
|
||||
self.latent_handle = self.latent_axes.plot([0],[0],'rx',mew=2)[0]
|
||||
self.modify(vals)
|
||||
self.show_sensitivities()
|
||||
|
||||
def modify(self, vals):
|
||||
"""When latent values are modified update the latent representation and ulso update the output visualization."""
|
||||
y = self.model.predict(vals)[0]
|
||||
print y
|
||||
self.data_visualize.modify(y)
|
||||
self.latent_handle.set_data(vals[self.latent_index[0]], vals[self.latent_index[1]])
|
||||
self.axes.figure.canvas.draw()
|
||||
|
|
@ -99,6 +101,7 @@ class lvm(data_show):
|
|||
if event.inaxes!=self.latent_axes: return
|
||||
self.move_on = not self.move_on
|
||||
self.called = True
|
||||
|
||||
def on_move(self, event):
|
||||
if event.inaxes!=self.latent_axes: return
|
||||
if self.called and self.move_on:
|
||||
|
|
@ -107,22 +110,54 @@ class lvm(data_show):
|
|||
self.latent_values[self.latent_index[1]]=event.ydata
|
||||
self.modify(self.latent_values)
|
||||
|
||||
def show_sensitivities(self):
|
||||
# A click in the bar chart axis for selection a dimension.
|
||||
if self.sense_axes != None:
|
||||
self.sense_axes.cla()
|
||||
self.sense_axes.bar(np.arange(self.model.Q),1./self.model.input_sensitivity(),color='b')
|
||||
|
||||
if self.latent_index[1] == self.latent_index[0]:
|
||||
self.sense_axes.bar(np.array(self.latent_index[0]),1./self.model.input_sensitivity()[self.latent_index[0]],color='y')
|
||||
self.sense_axes.bar(np.array(self.latent_index[1]),1./self.model.input_sensitivity()[self.latent_index[1]],color='y')
|
||||
|
||||
else:
|
||||
self.sense_axes.bar(np.array(self.latent_index[0]),1./self.model.input_sensitivity()[self.latent_index[0]],color='g')
|
||||
self.sense_axes.bar(np.array(self.latent_index[1]),1./self.model.input_sensitivity()[self.latent_index[1]],color='r')
|
||||
|
||||
self.sense_axes.figure.canvas.draw()
|
||||
|
||||
|
||||
class lvm_subplots(lvm):
|
||||
"""
|
||||
latent_axes is a np array of dimension np.ceil(Q/2) + 1,
|
||||
one for each pair of the axes, and the last one for the sensitiity bar chart
|
||||
latent_axes is a np array of dimension np.ceil(Q/2),
|
||||
one for each pair of the latent dimensions.
|
||||
"""
|
||||
def __init__(self, vals, model, data_visualize, latent_axes=None, latent_index=[0,1]):
|
||||
lvm.__init__(self, vals, model,data_visualize,latent_axes,[0,1])
|
||||
def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None):
|
||||
self.nplots = int(np.ceil(model.Q/2.))+1
|
||||
lvm.__init__(self,model,data_visualize,latent_axes,latent_index)
|
||||
self.latent_values = np.zeros(2*np.ceil(self.model.Q/2.)) # possibly an extra dimension on this
|
||||
assert latent_axes.size == self.nplots
|
||||
assert len(latent_axes)==self.nplots
|
||||
if vals==None:
|
||||
vals = model.X[0, :]
|
||||
self.latent_values = vals
|
||||
|
||||
for i, axis in enumerate(latent_axes):
|
||||
if i == self.nplots-1:
|
||||
if self.nplots*2!=model.Q:
|
||||
latent_index = [i*2, i*2]
|
||||
lvm.__init__(self, self.latent_vals, model, data_visualize, axis, sense_axes, latent_index=latent_index)
|
||||
else:
|
||||
latent_index = [i*2, i*2+1]
|
||||
lvm.__init__(self, self.latent_vals, model, data_visualize, axis, latent_index=latent_index)
|
||||
|
||||
|
||||
|
||||
class lvm_dimselect(lvm):
|
||||
"""
|
||||
A visualizer for latent variable models which allows selection of the latent dimensions to use by clicking on a bar chart of their length scales.
|
||||
|
||||
For an example of the visualizer's use try:
|
||||
|
||||
GPy.examples.dimensionality_reduction.BGPVLM_oil()
|
||||
|
||||
"""
|
||||
def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0, 1]):
|
||||
if latent_axes==None and sense_axes==None:
|
||||
|
|
@ -133,24 +168,9 @@ class lvm_dimselect(lvm):
|
|||
else:
|
||||
self.sense_axes = sense_axes
|
||||
|
||||
lvm.__init__(self,vals,model,data_visualize,latent_axes,latent_index)
|
||||
self.show_sensitivities()
|
||||
lvm.__init__(self,vals,model,data_visualize,latent_axes,sense_axes,latent_index)
|
||||
print "use left and right mouse butons to select dimensions"
|
||||
|
||||
def show_sensitivities(self):
|
||||
# A click in the bar chart axis for selection a dimension.
|
||||
self.sense_axes.cla()
|
||||
self.sense_axes.bar(np.arange(self.model.Q),1./self.model.input_sensitivity(),color='b')
|
||||
|
||||
if self.latent_index[1] == self.latent_index[0]:
|
||||
self.sense_axes.bar(np.array(self.latent_index[0]),1./self.model.input_sensitivity()[self.latent_index[0]],color='y')
|
||||
self.sense_axes.bar(np.array(self.latent_index[1]),1./self.model.input_sensitivity()[self.latent_index[1]],color='y')
|
||||
|
||||
else:
|
||||
self.sense_axes.bar(np.array(self.latent_index[0]),1./self.model.input_sensitivity()[self.latent_index[0]],color='g')
|
||||
self.sense_axes.bar(np.array(self.latent_index[1]),1./self.model.input_sensitivity()[self.latent_index[1]],color='r')
|
||||
|
||||
self.sense_axes.figure.canvas.draw()
|
||||
|
||||
def on_click(self, event):
|
||||
|
||||
|
|
@ -177,12 +197,6 @@ class lvm_dimselect(lvm):
|
|||
self.called = True
|
||||
|
||||
|
||||
def on_move(self, event):
|
||||
if event.inaxes!=self.latent_axes: return
|
||||
if self.called and self.move_on:
|
||||
self.latent_values[self.latent_index[0]]=event.xdata
|
||||
self.latent_values[self.latent_index[1]]=event.ydata
|
||||
self.modify(self.latent_values)
|
||||
|
||||
def on_leave(self,event):
|
||||
latent_values = self.latent_values.copy()
|
||||
|
|
@ -193,7 +207,7 @@ class lvm_dimselect(lvm):
|
|||
|
||||
class image_show(data_show):
|
||||
"""Show a data vector as an image."""
|
||||
def __init__(self, vals, axes=None, dimensions=(16,16), transpose=False, invert=False, scale=False, palette=[], presetMean = 0., presetSTD = -1., selectImage = 0):
|
||||
def __init__(self, vals, axes=None, dimensions=(16,16), transpose=False, invert=False, scale=False, palette=[], presetMean = 0., presetSTD = -1., selectImage=0):
|
||||
data_show.__init__(self, vals, axes)
|
||||
self.dimensions = dimensions
|
||||
self.transpose = transpose
|
||||
|
|
@ -214,15 +228,30 @@ class image_show(data_show):
|
|||
def modify(self, vals):
|
||||
self.set_image(vals)
|
||||
self.handle.set_array(self.vals)
|
||||
self.axes.figure.canvas.draw() # Teo - original line: plt.show()
|
||||
self.axes.figure.canvas.draw()
|
||||
|
||||
def set_image(self, vals):
|
||||
dim = self.dimensions[0] * self.dimensions[1]
|
||||
self.vals = np.reshape(vals[0,dim*self.selectImage+np.array(range(dim))], self.dimensions, order='F')
|
||||
nImg = np.sqrt(vals[0,].size/dim)
|
||||
if nImg > 1 and nImg.is_integer(): # Show a mosaic of images
|
||||
nImg = np.int(nImg)
|
||||
self.vals = np.zeros((self.dimensions[0]*nImg, self.dimensions[1]*nImg))
|
||||
for iR in range(nImg):
|
||||
for iC in range(nImg):
|
||||
currImgId = iR*nImg + iC
|
||||
currImg = np.reshape(vals[0,dim*currImgId+np.array(range(dim))], self.dimensions, order='F')
|
||||
firstRow = iR*self.dimensions[0]
|
||||
lastRow = (iR+1)*self.dimensions[0]
|
||||
firstCol = iC*self.dimensions[1]
|
||||
lastCol = (iC+1)*self.dimensions[1]
|
||||
self.vals[firstRow:lastRow, firstCol:lastCol] = currImg
|
||||
|
||||
else:
|
||||
self.vals = np.reshape(vals[0,dim*self.selectImage+np.array(range(dim))], self.dimensions, order='F')
|
||||
if self.transpose:
|
||||
self.vals = self.vals.T.copy()
|
||||
if not self.scale:
|
||||
self.vals = self.vals
|
||||
# if not self.scale:
|
||||
# self.vals = self.vals
|
||||
if self.invert:
|
||||
self.vals = -self.vals
|
||||
|
||||
|
|
@ -318,7 +347,7 @@ class stick_show(mocap_data_show):
|
|||
|
||||
def process_values(self, vals):
|
||||
self.vals = vals.reshape((3, vals.shape[1]/3)).T.copy()
|
||||
|
||||
|
||||
class skeleton_show(mocap_data_show):
|
||||
"""data_show class for visualizing motion capture data encoded as a skeleton with angles."""
|
||||
def __init__(self, vals, skel, padding=0, axes=None):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue