mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-03 00:32:39 +02:00
demo changed, bgplvm still broken
This commit is contained in:
parent
865e9df255
commit
10703e4774
3 changed files with 113 additions and 92 deletions
|
|
@ -170,26 +170,30 @@ def bgplvm_simulation(burnin='scg', plot_sim=False, max_f_eval=12):
|
||||||
from GPy import kern
|
from GPy import kern
|
||||||
reload(mrd); reload(kern)
|
reload(mrd); reload(kern)
|
||||||
|
|
||||||
|
|
||||||
Y = Ylist[1]
|
Y = Ylist[1]
|
||||||
|
|
||||||
k = kern.linear(Q, ARD=True) + kern.bias(Q, .0001) + kern.white(Q, .1)
|
k = kern.linear(Q, ARD=True) + kern.white(Q, .00001) # + kern.bias(Q)
|
||||||
m = Bayesian_GPLVM(Y, Q, init="PCA", M=M, kernel=k)
|
m = Bayesian_GPLVM(Y, Q, init="PCA", M=M, kernel=k)
|
||||||
m.set('noise', Y.var() / 100.)
|
# m.set('noise',)
|
||||||
# m.auto_scale_factor = True
|
# m.auto_scale_factor = True
|
||||||
# m.scale_factor = 1.
|
# m.scale_factor = 1.
|
||||||
|
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
|
|
||||||
|
|
||||||
if burnin:
|
if burnin:
|
||||||
print "initializing beta"
|
print "initializing beta"
|
||||||
cstr = "noise"
|
cstr = "noise"
|
||||||
m.unconstrain(cstr); m.constrain_fixed(cstr)
|
m.unconstrain(cstr); m.constrain_fixed(cstr, Y.var() / 100.)
|
||||||
m.optimize(burnin, messages=1, max_f_eval=max_f_eval)
|
m.optimize(burnin, messages=1, max_f_eval=max_f_eval)
|
||||||
|
|
||||||
print "releasing beta"
|
print "releasing beta"
|
||||||
cstr = "noise"
|
cstr = "noise"
|
||||||
m.unconstrain(cstr); m.constrain_positive(cstr)
|
m.unconstrain(cstr); m.constrain_positive(cstr)
|
||||||
|
|
||||||
|
true_X = np.hstack((slist[1], slist[3], 0. * np.ones((N, Q - 2))))
|
||||||
|
m.set('X_\d', true_X)
|
||||||
|
m.constrain_fixed("X_\d")
|
||||||
|
|
||||||
# # cstr = 'variance'
|
# # cstr = 'variance'
|
||||||
# # m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-10, 1.)
|
# # m.unconstrain(cstr), m.constrain_bounded(cstr, 1e-10, 1.)
|
||||||
|
|
|
||||||
|
|
@ -82,7 +82,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
||||||
self._set_params(self.oldps[-1], save_old=False)
|
self._set_params(self.oldps[-1], save_old=False)
|
||||||
|
|
||||||
def dKL_dmuS(self):
|
def dKL_dmuS(self):
|
||||||
dKL_dS = (1. - (1. / self.X_variance)) * 0.5
|
dKL_dS = (1. - (1. / (self.X_variance))) * 0.5
|
||||||
dKL_dmu = self.X
|
dKL_dmu = self.X
|
||||||
return dKL_dmu, dKL_dS
|
return dKL_dmu, dKL_dS
|
||||||
|
|
||||||
|
|
@ -101,13 +101,26 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
||||||
return 0.5 * (var_mean + var_S) - 0.5 * self.Q * self.N
|
return 0.5 * (var_mean + var_S) - 0.5 * self.Q * self.N
|
||||||
|
|
||||||
def log_likelihood(self):
|
def log_likelihood(self):
|
||||||
return sparse_GP.log_likelihood(self) - self.KL_divergence()
|
ll = sparse_GP.log_likelihood(self)
|
||||||
|
kl = self.KL_divergence()
|
||||||
|
return ll + kl
|
||||||
|
|
||||||
def _log_likelihood_gradients(self):
|
def _log_likelihood_gradients(self):
|
||||||
dKL_dmu, dKL_dS = self.dKL_dmuS()
|
dKL_dmu, dKL_dS = self.dKL_dmuS()
|
||||||
dL_dmu, dL_dS = self.dL_dmuS()
|
dL_dmu, dL_dS = self.dL_dmuS()
|
||||||
# TODO: find way to make faster
|
# TODO: find way to make faster
|
||||||
dbound_dmuS = np.hstack(((dL_dmu - dKL_dmu).flatten(), (dL_dS - dKL_dS).flatten()))
|
|
||||||
|
d_dmu = (dL_dmu + dKL_dmu).flatten()
|
||||||
|
d_dS = (dL_dS + dKL_dS).flatten()
|
||||||
|
# TEST KL: ====================
|
||||||
|
# d_dmu = (dKL_dmu).flatten()
|
||||||
|
# d_dS = (dKL_dS).flatten()
|
||||||
|
# ========================
|
||||||
|
# TEST L: ====================
|
||||||
|
# d_dmu = (dL_dmu).flatten()
|
||||||
|
# d_dS = (dL_dS).flatten()
|
||||||
|
# ========================
|
||||||
|
dbound_dmuS = np.hstack((d_dmu, d_dS))
|
||||||
return np.hstack((dbound_dmuS.flatten(), sparse_GP._log_likelihood_gradients(self)))
|
return np.hstack((dbound_dmuS.flatten(), sparse_GP._log_likelihood_gradients(self)))
|
||||||
|
|
||||||
def plot_latent(self, which_indices=None, *args, **kwargs):
|
def plot_latent(self, which_indices=None, *args, **kwargs):
|
||||||
|
|
|
||||||
|
|
@ -261,6 +261,11 @@ class GP(model):
|
||||||
m, var, lower, upper = self.predict(Xnew, slices=which_functions)
|
m, var, lower, upper = self.predict(Xnew, slices=which_functions)
|
||||||
gpplot(Xnew, m, lower, upper)
|
gpplot(Xnew, m, lower, upper)
|
||||||
pb.plot(Xu[which_data], self.likelihood.data[which_data], 'kx', mew=1.5)
|
pb.plot(Xu[which_data], self.likelihood.data[which_data], 'kx', mew=1.5)
|
||||||
|
if self.has_uncertain_inputs:
|
||||||
|
pb.errorbar(Xu[which_data, 0], self.likelihood.data[which_data, 0],
|
||||||
|
xerr=2 * np.sqrt(self.X_variance[which_data, 0]),
|
||||||
|
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
||||||
|
|
||||||
ymin, ymax = min(np.append(self.likelihood.data, lower)), max(np.append(self.likelihood.data, upper))
|
ymin, ymax = min(np.append(self.likelihood.data, lower)), max(np.append(self.likelihood.data, upper))
|
||||||
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
|
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
|
||||||
pb.xlim(xmin, xmax)
|
pb.xlim(xmin, xmax)
|
||||||
|
|
@ -268,8 +273,7 @@ class GP(model):
|
||||||
if hasattr(self, 'Z'):
|
if hasattr(self, 'Z'):
|
||||||
Zu = self.Z * self._Xstd + self._Xmean
|
Zu = self.Z * self._Xstd + self._Xmean
|
||||||
pb.plot(Zu, Zu * 0 + pb.ylim()[0], 'r|', mew=1.5, markersize=12)
|
pb.plot(Zu, Zu * 0 + pb.ylim()[0], 'r|', mew=1.5, markersize=12)
|
||||||
if self.has_uncertain_inputs:
|
# pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten()))
|
||||||
pb.errorbar(self.X[:,0], pb.ylim()[0]+np.zeros(self.N), xerr=2*np.sqrt(self.X_variance.flatten()))
|
|
||||||
|
|
||||||
elif self.X.shape[1] == 2: # FIXME
|
elif self.X.shape[1] == 2: # FIXME
|
||||||
resolution = resolution or 50
|
resolution = resolution or 50
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue