mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-05 01:32:40 +02:00
Merge branch 'devel' of github.com:SheffieldML/GPy into devel
This commit is contained in:
commit
3c12f85a28
6 changed files with 102 additions and 111 deletions
|
|
@ -18,7 +18,7 @@ class opt_SGD(Optimizer):
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, **kwargs):
|
||||
def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, **kwargs):
|
||||
self.opt_name = "Stochastic Gradient Descent"
|
||||
|
||||
self.model = model
|
||||
|
|
@ -33,6 +33,13 @@ class opt_SGD(Optimizer):
|
|||
self.center = center
|
||||
self.param_traces = [('noise',[])]
|
||||
self.iteration_file = iteration_file
|
||||
self.learning_rate_adaptation = learning_rate_adaptation
|
||||
if self.learning_rate_adaptation != None:
|
||||
if self.learning_rate_adaptation == 'annealing':
|
||||
self.learning_rate_0 = self.learning_rate
|
||||
else:
|
||||
self.learning_rate_0 = self.learning_rate.mean()
|
||||
|
||||
# if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1:
|
||||
# self.param_traces.append(('bias',[]))
|
||||
# if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1:
|
||||
|
|
@ -204,6 +211,7 @@ class opt_SGD(Optimizer):
|
|||
|
||||
ci = self.shift_constraints(j)
|
||||
f, fp = f_fp(self.x_opt[j])
|
||||
|
||||
step[j] = self.momentum * step[j] + self.learning_rate[j] * fp
|
||||
self.x_opt[j] -= step[j]
|
||||
self.restore_constraints(ci)
|
||||
|
|
@ -216,9 +224,53 @@ class opt_SGD(Optimizer):
|
|||
|
||||
return f, step, self.model.N
|
||||
|
||||
def adapt_learning_rate(self, t):
|
||||
if self.learning_rate_adaptation == 'adagrad':
|
||||
if t > 5:
|
||||
g = np.array(self.grads)
|
||||
l2_g = np.sqrt(np.square(g).sum(0))
|
||||
self.learning_rate = 0.001/l2_g
|
||||
else:
|
||||
self.learning_rate = np.zeros_like(self.learning_rate)
|
||||
elif self.learning_rate_adaptation == 'annealing':
|
||||
self.learning_rate = self.learning_rate_0/(1+float(t+1)/10)
|
||||
elif self.learning_rate_adaptation == 'semi_pesky':
|
||||
if self.model.__class__.__name__ == 'Bayesian_GPLVM':
|
||||
if t == 0:
|
||||
N = self.model.N
|
||||
Q = self.model.Q
|
||||
M = self.model.M
|
||||
|
||||
iip_pos = np.arange(2*N*Q,2*N*Q+M*Q)
|
||||
mu_pos = np.arange(0,N*Q)
|
||||
S_pos = np.arange(N*Q,2*N*Q)
|
||||
self.vbparam_dict = {'iip': [iip_pos],
|
||||
'mu': [mu_pos],
|
||||
'S': [S_pos]}
|
||||
|
||||
for k in self.vbparam_dict.keys():
|
||||
hbar_t = 0.0
|
||||
tau_t = 1000.0
|
||||
gbar_t = 0.0
|
||||
self.vbparam_dict[k].append(hbar_t)
|
||||
self.vbparam_dict[k].append(tau_t)
|
||||
self.vbparam_dict[k].append(gbar_t)
|
||||
|
||||
g_t = self.model.grads
|
||||
|
||||
for k in self.vbparam_dict.keys():
|
||||
pos, hbar_t, tau_t, gbar_t = self.vbparam_dict[k]
|
||||
|
||||
gbar_t = (1-1/tau_t)*gbar_t + 1/tau_t * g_t[pos]
|
||||
hbar_t = (1-1/tau_t)*hbar_t + 1/tau_t * np.dot(g_t[pos].T, g_t[pos])
|
||||
self.learning_rate[pos] = np.dot(gbar_t.T, gbar_t) / hbar_t
|
||||
tau_t = tau_t*(1-self.learning_rate[pos]) + 1
|
||||
self.vbparam_dict[k] = [pos, hbar_t, tau_t, gbar_t]
|
||||
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
self.x_opt = self.model._get_params_transformed()
|
||||
self.model.grads = np.zeros_like(self.x_opt)
|
||||
self.grads = []
|
||||
|
||||
X, Y = self.model.X.copy(), self.model.likelihood.Y.copy()
|
||||
|
||||
|
|
@ -235,6 +287,7 @@ class opt_SGD(Optimizer):
|
|||
|
||||
step = np.zeros_like(num_params)
|
||||
for it in range(self.iterations):
|
||||
self.model.grads = np.zeros_like(self.x_opt) # TODO this is ugly
|
||||
|
||||
if it == 0 or self.self_paced is False:
|
||||
features = np.random.permutation(Y.shape[1])
|
||||
|
|
@ -272,16 +325,17 @@ class opt_SGD(Optimizer):
|
|||
sys.stdout.write(status)
|
||||
sys.stdout.flush()
|
||||
self.param_traces['noise'].append(noise)
|
||||
NLL.append(f)
|
||||
|
||||
self.fopt_trace.append(f)
|
||||
NLL.append(f)
|
||||
self.fopt_trace.append(NLL[-1])
|
||||
# fig = plt.figure('traces')
|
||||
# plt.clf()
|
||||
# plt.plot(self.param_traces['noise'])
|
||||
|
||||
# for k in self.param_traces.keys():
|
||||
# self.param_traces[k].append(self.model.get(k)[0])
|
||||
|
||||
self.grads.append(self.model.grads.tolist())
|
||||
self.adapt_learning_rate(it)
|
||||
# should really be a sum(), but earlier samples in the iteration will have a very crappy ll
|
||||
self.f_opt = np.mean(NLL)
|
||||
self.model.N = N
|
||||
|
|
@ -293,7 +347,7 @@ class opt_SGD(Optimizer):
|
|||
sigma = self.model.likelihood._variance
|
||||
self.model.likelihood._variance = None # invalidate cache
|
||||
self.model.likelihood._set_params(sigma)
|
||||
|
||||
|
||||
self.trace.append(self.f_opt)
|
||||
if self.iteration_file is not None:
|
||||
f = open(self.iteration_file + "iteration%d.pickle" % it, 'w')
|
||||
|
|
@ -303,6 +357,6 @@ class opt_SGD(Optimizer):
|
|||
|
||||
if self.messages != 0:
|
||||
sys.stdout.write('\r' + ' '*len(status)*2 + ' \r')
|
||||
status = "SGD Iteration: {0: 3d}/{1: 3d} f: {2: 2.3f}\n".format(it+1, self.iterations, self.f_opt)
|
||||
status = "SGD Iteration: {0: 3d}/{1: 3d} f: {2: 2.3f} max eta: {3: 1.5f}\n".format(it+1, self.iterations, self.f_opt, self.learning_rate.max())
|
||||
sys.stdout.write(status)
|
||||
sys.stdout.flush()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue