mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-10 12:32:40 +02:00
Merge branch 'devel' of github.com:SheffieldML/GPy into devel
Conflicts: GPy/models/GP.py
This commit is contained in:
commit
171a25d46d
4 changed files with 67 additions and 42 deletions
|
|
@ -18,7 +18,7 @@ class opt_SGD(Optimizer):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, **kwargs):
|
def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, actual_iter=None, schedule=None, **kwargs):
|
||||||
self.opt_name = "Stochastic Gradient Descent"
|
self.opt_name = "Stochastic Gradient Descent"
|
||||||
|
|
||||||
self.model = model
|
self.model = model
|
||||||
|
|
@ -34,12 +34,14 @@ class opt_SGD(Optimizer):
|
||||||
self.param_traces = [('noise',[])]
|
self.param_traces = [('noise',[])]
|
||||||
self.iteration_file = iteration_file
|
self.iteration_file = iteration_file
|
||||||
self.learning_rate_adaptation = learning_rate_adaptation
|
self.learning_rate_adaptation = learning_rate_adaptation
|
||||||
|
self.actual_iter = actual_iter
|
||||||
if self.learning_rate_adaptation != None:
|
if self.learning_rate_adaptation != None:
|
||||||
if self.learning_rate_adaptation == 'annealing':
|
if self.learning_rate_adaptation == 'annealing':
|
||||||
self.learning_rate_0 = self.learning_rate
|
self.learning_rate_0 = self.learning_rate
|
||||||
else:
|
else:
|
||||||
self.learning_rate_0 = self.learning_rate.mean()
|
self.learning_rate_0 = self.learning_rate.mean()
|
||||||
|
|
||||||
|
self.schedule = schedule
|
||||||
# if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1:
|
# if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1:
|
||||||
# self.param_traces.append(('bias',[]))
|
# self.param_traces.append(('bias',[]))
|
||||||
# if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1:
|
# if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1:
|
||||||
|
|
@ -224,48 +226,36 @@ class opt_SGD(Optimizer):
|
||||||
|
|
||||||
return f, step, self.model.N
|
return f, step, self.model.N
|
||||||
|
|
||||||
def adapt_learning_rate(self, t):
|
def adapt_learning_rate(self, t, D):
|
||||||
if self.learning_rate_adaptation == 'adagrad':
|
if self.learning_rate_adaptation == 'adagrad':
|
||||||
if t > 5:
|
if t > 0:
|
||||||
g = np.array(self.grads)
|
g_k = self.model.grads
|
||||||
l2_g = np.sqrt(np.square(g).sum(0))
|
self.s_k += np.square(g_k)
|
||||||
self.learning_rate = 0.001/l2_g
|
t0 = 100.0
|
||||||
|
self.learning_rate = 0.1/(t0 + np.sqrt(self.s_k))
|
||||||
|
|
||||||
|
import pdb; pdb.set_trace()
|
||||||
else:
|
else:
|
||||||
self.learning_rate = np.zeros_like(self.learning_rate)
|
self.learning_rate = np.zeros_like(self.learning_rate)
|
||||||
|
self.s_k = np.zeros_like(self.x_opt)
|
||||||
|
|
||||||
elif self.learning_rate_adaptation == 'annealing':
|
elif self.learning_rate_adaptation == 'annealing':
|
||||||
self.learning_rate = self.learning_rate_0/(1+float(t+1)/10)
|
#self.learning_rate = self.learning_rate_0/(1+float(t+1)/10)
|
||||||
|
self.learning_rate = np.ones_like(self.learning_rate) * self.schedule[t]
|
||||||
|
|
||||||
|
|
||||||
elif self.learning_rate_adaptation == 'semi_pesky':
|
elif self.learning_rate_adaptation == 'semi_pesky':
|
||||||
if self.model.__class__.__name__ == 'Bayesian_GPLVM':
|
if self.model.__class__.__name__ == 'Bayesian_GPLVM':
|
||||||
|
g_t = self.model.grads
|
||||||
if t == 0:
|
if t == 0:
|
||||||
N = self.model.N
|
self.hbar_t = 0.0
|
||||||
Q = self.model.Q
|
self.tau_t = 100.0
|
||||||
M = self.model.M
|
self.gbar_t = 0.0
|
||||||
|
|
||||||
iip_pos = np.arange(2*N*Q,2*N*Q+M*Q)
|
self.gbar_t = (1-1/self.tau_t)*self.gbar_t + 1/self.tau_t * g_t
|
||||||
mu_pos = np.arange(0,N*Q)
|
self.hbar_t = (1-1/self.tau_t)*self.hbar_t + 1/self.tau_t * np.dot(g_t.T, g_t)
|
||||||
S_pos = np.arange(N*Q,2*N*Q)
|
self.learning_rate = np.ones_like(self.learning_rate)*(np.dot(self.gbar_t.T, self.gbar_t) / self.hbar_t)
|
||||||
self.vbparam_dict = {'iip': [iip_pos],
|
tau_t = self.tau_t*(1-self.learning_rate) + 1
|
||||||
'mu': [mu_pos],
|
|
||||||
'S': [S_pos]}
|
|
||||||
|
|
||||||
for k in self.vbparam_dict.keys():
|
|
||||||
hbar_t = 0.0
|
|
||||||
tau_t = 1000.0
|
|
||||||
gbar_t = 0.0
|
|
||||||
self.vbparam_dict[k].append(hbar_t)
|
|
||||||
self.vbparam_dict[k].append(tau_t)
|
|
||||||
self.vbparam_dict[k].append(gbar_t)
|
|
||||||
|
|
||||||
g_t = self.model.grads
|
|
||||||
|
|
||||||
for k in self.vbparam_dict.keys():
|
|
||||||
pos, hbar_t, tau_t, gbar_t = self.vbparam_dict[k]
|
|
||||||
|
|
||||||
gbar_t = (1-1/tau_t)*gbar_t + 1/tau_t * g_t[pos]
|
|
||||||
hbar_t = (1-1/tau_t)*hbar_t + 1/tau_t * np.dot(g_t[pos].T, g_t[pos])
|
|
||||||
self.learning_rate[pos] = np.dot(gbar_t.T, gbar_t) / hbar_t
|
|
||||||
tau_t = tau_t*(1-self.learning_rate[pos]) + 1
|
|
||||||
self.vbparam_dict[k] = [pos, hbar_t, tau_t, gbar_t]
|
|
||||||
|
|
||||||
|
|
||||||
def opt(self, f_fp=None, f=None, fp=None):
|
def opt(self, f_fp=None, f=None, fp=None):
|
||||||
|
|
@ -274,8 +264,8 @@ class opt_SGD(Optimizer):
|
||||||
|
|
||||||
X, Y = self.model.X.copy(), self.model.likelihood.Y.copy()
|
X, Y = self.model.X.copy(), self.model.likelihood.Y.copy()
|
||||||
|
|
||||||
self.model.likelihood.YYT = None
|
self.model.likelihood.YYT = 0
|
||||||
self.model.likelihood.trYYT = None
|
self.model.likelihood.trYYT = 0
|
||||||
self.model.likelihood._bias = 0.0
|
self.model.likelihood._bias = 0.0
|
||||||
self.model.likelihood._scale = 1.0
|
self.model.likelihood._scale = 1.0
|
||||||
|
|
||||||
|
|
@ -287,6 +277,9 @@ class opt_SGD(Optimizer):
|
||||||
|
|
||||||
step = np.zeros_like(num_params)
|
step = np.zeros_like(num_params)
|
||||||
for it in range(self.iterations):
|
for it in range(self.iterations):
|
||||||
|
if self.actual_iter != None:
|
||||||
|
it = self.actual_iter
|
||||||
|
|
||||||
self.model.grads = np.zeros_like(self.x_opt) # TODO this is ugly
|
self.model.grads = np.zeros_like(self.x_opt) # TODO this is ugly
|
||||||
|
|
||||||
if it == 0 or self.self_paced is False:
|
if it == 0 or self.self_paced is False:
|
||||||
|
|
@ -316,6 +309,7 @@ class opt_SGD(Optimizer):
|
||||||
self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT)
|
self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT)
|
||||||
Nj = N
|
Nj = N
|
||||||
f, fp = f_fp(self.x_opt)
|
f, fp = f_fp(self.x_opt)
|
||||||
|
self.model.grads = fp.copy()
|
||||||
step = self.momentum * step + self.learning_rate * fp
|
step = self.momentum * step + self.learning_rate * fp
|
||||||
self.x_opt -= step
|
self.x_opt -= step
|
||||||
|
|
||||||
|
|
@ -326,6 +320,7 @@ class opt_SGD(Optimizer):
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
self.param_traces['noise'].append(noise)
|
self.param_traces['noise'].append(noise)
|
||||||
|
|
||||||
|
self.adapt_learning_rate(it+count, D)
|
||||||
NLL.append(f)
|
NLL.append(f)
|
||||||
self.fopt_trace.append(NLL[-1])
|
self.fopt_trace.append(NLL[-1])
|
||||||
# fig = plt.figure('traces')
|
# fig = plt.figure('traces')
|
||||||
|
|
@ -335,7 +330,6 @@ class opt_SGD(Optimizer):
|
||||||
# for k in self.param_traces.keys():
|
# for k in self.param_traces.keys():
|
||||||
# self.param_traces[k].append(self.model.get(k)[0])
|
# self.param_traces[k].append(self.model.get(k)[0])
|
||||||
self.grads.append(self.model.grads.tolist())
|
self.grads.append(self.model.grads.tolist())
|
||||||
self.adapt_learning_rate(it)
|
|
||||||
# should really be a sum(), but earlier samples in the iteration will have a very crappy ll
|
# should really be a sum(), but earlier samples in the iteration will have a very crappy ll
|
||||||
self.f_opt = np.mean(NLL)
|
self.f_opt = np.mean(NLL)
|
||||||
self.model.N = N
|
self.model.N = N
|
||||||
|
|
|
||||||
|
|
@ -41,8 +41,40 @@ class MRD(model):
|
||||||
:param kernel:
|
:param kernel:
|
||||||
kernel to use
|
kernel to use
|
||||||
"""
|
"""
|
||||||
#TODO allow different kernels for different outputs
|
def __init__(self,likelihood_list,Q,M=10,names=None,kernels=None,initX='PCA',initz='permute',_debug=False, **kwargs):
|
||||||
#def __init__(self, *Ylist, **kwargs):
|
if names is None:
|
||||||
|
self.names = ["{}".format(i + 1) for i in range(len(likelihood_list))]
|
||||||
|
|
||||||
|
#sort out the kernels
|
||||||
|
if kernels is None:
|
||||||
|
kernels = [None]*len(likelihood_list)
|
||||||
|
elif isinstance(kernels,kern.kern):
|
||||||
|
kernels = [kernels.copy() for i in range(len(likelihood_list))]
|
||||||
|
else:
|
||||||
|
assert len(kernels)==len(likelihood_list), "need one kernel per output"
|
||||||
|
assert all([isinstance(k, kern.kern) for k in kernels]), "invalid kernel object detected!"
|
||||||
|
|
||||||
|
self.Q = Q
|
||||||
|
self.M = M
|
||||||
|
self.N = self.gref.N
|
||||||
|
self.NQ = self.N * self.Q
|
||||||
|
self.MQ = self.M * self.Q
|
||||||
|
|
||||||
|
self._init = True
|
||||||
|
X = self._init_X(initx, likelihood_list)
|
||||||
|
Z = self._init_Z(initz, X)
|
||||||
|
self.bgplvms = [Bayesian_GPLVM(l, k, X=X, Z=Z, M=self.M, **kwargs) for l,k in zip(likelihood_list,kernels)]
|
||||||
|
|
||||||
|
del self._init
|
||||||
|
|
||||||
|
self.gref = self.bgplvms[0]
|
||||||
|
nparams = numpy.array([0] + [sparse_GP._get_params(g).size - g.Z.size for g in self.bgplvms])
|
||||||
|
self.nparams = nparams.cumsum()
|
||||||
|
|
||||||
|
model.__init__(self) # @UndefinedVariable
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def __init__(self, *likelihood_list, **kwargs):
|
def __init__(self, *likelihood_list, **kwargs):
|
||||||
if kwargs.has_key("_debug"):
|
if kwargs.has_key("_debug"):
|
||||||
self._debug = kwargs['_debug']
|
self._debug = kwargs['_debug']
|
||||||
|
|
|
||||||
|
|
@ -234,7 +234,7 @@ class sparse_GP(GP):
|
||||||
Kxx = self.kern.Kdiag(Xnew, which_parts=which_parts)
|
Kxx = self.kern.Kdiag(Xnew, which_parts=which_parts)
|
||||||
var = Kxx - np.sum(Kx * np.dot(Kmmi_LmiBLmi, Kx), 0)
|
var = Kxx - np.sum(Kx * np.dot(Kmmi_LmiBLmi, Kx), 0)
|
||||||
else:
|
else:
|
||||||
assert which_parts=='all', "swithching out parts of variational kernels is not implemented"
|
# assert which_parts=='all', "swithching out parts of variational kernels is not implemented"
|
||||||
Kx = self.kern.psi1(self.Z, Xnew, X_variance_new)#, which_parts=which_parts) TODO: which_parts
|
Kx = self.kern.psi1(self.Z, Xnew, X_variance_new)#, which_parts=which_parts) TODO: which_parts
|
||||||
mu = np.dot(Kx, self.Cpsi1V)
|
mu = np.dot(Kx, self.Cpsi1V)
|
||||||
if full_cov:
|
if full_cov:
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,6 @@ class lvm(data_show):
|
||||||
def modify(self, vals):
|
def modify(self, vals):
|
||||||
"""When latent values are modified update the latent representation and ulso update the output visualization."""
|
"""When latent values are modified update the latent representation and ulso update the output visualization."""
|
||||||
y = self.model.predict(vals)[0]
|
y = self.model.predict(vals)[0]
|
||||||
print y
|
|
||||||
self.data_visualize.modify(y)
|
self.data_visualize.modify(y)
|
||||||
self.latent_handle.set_data(vals[self.latent_index[0]], vals[self.latent_index[1]])
|
self.latent_handle.set_data(vals[self.latent_index[0]], vals[self.latent_index[1]])
|
||||||
self.axes.figure.canvas.draw()
|
self.axes.figure.canvas.draw()
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue