This commit is contained in:
Nicolo Fusi 2013-05-20 19:22:04 +01:00
parent 3c12f85a28
commit dced085a06

View file

@ -18,7 +18,7 @@ class opt_SGD(Optimizer):
""" """
def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, **kwargs): def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, actual_iter=None, schedule=None, **kwargs):
self.opt_name = "Stochastic Gradient Descent" self.opt_name = "Stochastic Gradient Descent"
self.model = model self.model = model
@ -34,12 +34,14 @@ class opt_SGD(Optimizer):
self.param_traces = [('noise',[])] self.param_traces = [('noise',[])]
self.iteration_file = iteration_file self.iteration_file = iteration_file
self.learning_rate_adaptation = learning_rate_adaptation self.learning_rate_adaptation = learning_rate_adaptation
self.actual_iter = actual_iter
if self.learning_rate_adaptation != None: if self.learning_rate_adaptation != None:
if self.learning_rate_adaptation == 'annealing': if self.learning_rate_adaptation == 'annealing':
self.learning_rate_0 = self.learning_rate self.learning_rate_0 = self.learning_rate
else: else:
self.learning_rate_0 = self.learning_rate.mean() self.learning_rate_0 = self.learning_rate.mean()
self.schedule = schedule
# if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1: # if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1:
# self.param_traces.append(('bias',[])) # self.param_traces.append(('bias',[]))
# if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1: # if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1:
@ -224,48 +226,67 @@ class opt_SGD(Optimizer):
return f, step, self.model.N return f, step, self.model.N
def adapt_learning_rate(self, t): def adapt_learning_rate(self, t, D):
if self.learning_rate_adaptation == 'adagrad': if self.learning_rate_adaptation == 'adagrad':
if t > 5: if t > 0:
g = np.array(self.grads) g_k = self.model.grads
l2_g = np.sqrt(np.square(g).sum(0)) self.s_k += np.square(g_k)
self.learning_rate = 0.001/l2_g t0 = 100.0
self.learning_rate = 0.1/(t0 + np.sqrt(self.s_k))
import pdb; pdb.set_trace()
else: else:
self.learning_rate = np.zeros_like(self.learning_rate) self.learning_rate = np.zeros_like(self.learning_rate)
self.s_k = np.zeros_like(self.x_opt)
elif self.learning_rate_adaptation == 'annealing': elif self.learning_rate_adaptation == 'annealing':
self.learning_rate = self.learning_rate_0/(1+float(t+1)/10) #self.learning_rate = self.learning_rate_0/(1+float(t+1)/10)
self.learning_rate = np.ones_like(self.learning_rate) * self.schedule[t]
elif self.learning_rate_adaptation == 'semi_pesky': elif self.learning_rate_adaptation == 'semi_pesky':
if self.model.__class__.__name__ == 'Bayesian_GPLVM': if self.model.__class__.__name__ == 'Bayesian_GPLVM':
g_t = self.model.grads
if t == 0: if t == 0:
N = self.model.N self.hbar_t = 0.0
Q = self.model.Q self.tau_t = 100.0
M = self.model.M self.gbar_t = 0.0
iip_pos = np.arange(2*N*Q,2*N*Q+M*Q) self.gbar_t = (1-1/self.tau_t)*self.gbar_t + 1/self.tau_t * g_t
mu_pos = np.arange(0,N*Q) self.hbar_t = (1-1/self.tau_t)*self.hbar_t + 1/self.tau_t * np.dot(g_t.T, g_t)
S_pos = np.arange(N*Q,2*N*Q) self.learning_rate = np.ones_like(self.learning_rate)*(np.dot(self.gbar_t.T, self.gbar_t) / self.hbar_t)
self.vbparam_dict = {'iip': [iip_pos], tau_t = self.tau_t*(1-self.learning_rate) + 1
'mu': [mu_pos],
'S': [S_pos]}
for k in self.vbparam_dict.keys(): # if t == 0:
hbar_t = 0.0 # N = self.model.N
tau_t = 1000.0 # Q = self.model.Q
gbar_t = 0.0 # M = self.model.M
self.vbparam_dict[k].append(hbar_t)
self.vbparam_dict[k].append(tau_t)
self.vbparam_dict[k].append(gbar_t)
g_t = self.model.grads # iip_pos = np.arange(2*N*Q,2*N*Q+M*Q)
# mu_pos = np.arange(0,N*Q)
# S_pos = np.arange(N*Q,2*N*Q)
# self.vbparam_dict = {'iip': [iip_pos],
# 'mu': [mu_pos],
# 'S': [S_pos]}
for k in self.vbparam_dict.keys(): # for k in self.vbparam_dict.keys():
pos, hbar_t, tau_t, gbar_t = self.vbparam_dict[k] # hbar_t = 0.0
# tau_t = 1.0
# gbar_t = 0.0
# self.vbparam_dict[k].append(hbar_t)
# self.vbparam_dict[k].append(tau_t)
# self.vbparam_dict[k].append(gbar_t)
# if True:
# g_t = self.model.grads
gbar_t = (1-1/tau_t)*gbar_t + 1/tau_t * g_t[pos] # for k in self.vbparam_dict.keys():
hbar_t = (1-1/tau_t)*hbar_t + 1/tau_t * np.dot(g_t[pos].T, g_t[pos]) # pos, hbar_t, tau_t, gbar_t = self.vbparam_dict[k]
self.learning_rate[pos] = np.dot(gbar_t.T, gbar_t) / hbar_t # gbar_t = (1-1/tau_t)*gbar_t + 1/tau_t * g_t[pos]
tau_t = tau_t*(1-self.learning_rate[pos]) + 1 # hbar_t = (1-1/tau_t)*hbar_t + 1/tau_t * np.dot(g_t[pos].T, g_t[pos])
self.vbparam_dict[k] = [pos, hbar_t, tau_t, gbar_t] # self.learning_rate[pos] = (np.dot(gbar_t.T, gbar_t) / hbar_t)*1.0
# tau_t = tau_t*(1-self.learning_rate[pos]) + 1
# self.vbparam_dict[k] = [pos, hbar_t, tau_t, gbar_t]
# print k, self.learning_rate[pos].max()
def opt(self, f_fp=None, f=None, fp=None): def opt(self, f_fp=None, f=None, fp=None):
@ -274,8 +295,8 @@ class opt_SGD(Optimizer):
X, Y = self.model.X.copy(), self.model.likelihood.Y.copy() X, Y = self.model.X.copy(), self.model.likelihood.Y.copy()
self.model.likelihood.YYT = None self.model.likelihood.YYT = 0
self.model.likelihood.trYYT = None self.model.likelihood.trYYT = 0
self.model.likelihood._bias = 0.0 self.model.likelihood._bias = 0.0
self.model.likelihood._scale = 1.0 self.model.likelihood._scale = 1.0
@ -287,6 +308,9 @@ class opt_SGD(Optimizer):
step = np.zeros_like(num_params) step = np.zeros_like(num_params)
for it in range(self.iterations): for it in range(self.iterations):
if self.actual_iter != None:
it = self.actual_iter
self.model.grads = np.zeros_like(self.x_opt) # TODO this is ugly self.model.grads = np.zeros_like(self.x_opt) # TODO this is ugly
if it == 0 or self.self_paced is False: if it == 0 or self.self_paced is False:
@ -316,6 +340,7 @@ class opt_SGD(Optimizer):
self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT) self.model.likelihood.trYYT = np.trace(self.model.likelihood.YYT)
Nj = N Nj = N
f, fp = f_fp(self.x_opt) f, fp = f_fp(self.x_opt)
self.model.grads = fp.copy()
step = self.momentum * step + self.learning_rate * fp step = self.momentum * step + self.learning_rate * fp
self.x_opt -= step self.x_opt -= step
@ -326,6 +351,7 @@ class opt_SGD(Optimizer):
sys.stdout.flush() sys.stdout.flush()
self.param_traces['noise'].append(noise) self.param_traces['noise'].append(noise)
self.adapt_learning_rate(it+count, D)
NLL.append(f) NLL.append(f)
self.fopt_trace.append(NLL[-1]) self.fopt_trace.append(NLL[-1])
# fig = plt.figure('traces') # fig = plt.figure('traces')
@ -335,7 +361,6 @@ class opt_SGD(Optimizer):
# for k in self.param_traces.keys(): # for k in self.param_traces.keys():
# self.param_traces[k].append(self.model.get(k)[0]) # self.param_traces[k].append(self.model.get(k)[0])
self.grads.append(self.model.grads.tolist()) self.grads.append(self.model.grads.tolist())
self.adapt_learning_rate(it)
# should really be a sum(), but earlier samples in the iteration will have a very crappy ll # should really be a sum(), but earlier samples in the iteration will have a very crappy ll
self.f_opt = np.mean(NLL) self.f_opt = np.mean(NLL)
self.model.N = N self.model.N = N