From 59d866907b12c408f0194defa0887ad6077ec323 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Thu, 16 May 2013 13:50:16 +0100 Subject: [PATCH 1/4] changes in SGD --- GPy/inference/SGD.py | 47 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/GPy/inference/SGD.py b/GPy/inference/SGD.py index bfc6ee15..3b967466 100644 --- a/GPy/inference/SGD.py +++ b/GPy/inference/SGD.py @@ -18,7 +18,7 @@ class opt_SGD(Optimizer): """ - def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, **kwargs): + def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, **kwargs): self.opt_name = "Stochastic Gradient Descent" self.model = model @@ -33,6 +33,13 @@ class opt_SGD(Optimizer): self.center = center self.param_traces = [('noise',[])] self.iteration_file = iteration_file + self.learning_rate_adaptation = learning_rate_adaptation + if self.learning_rate_adaptation != None: + if self.learning_rate_adaptation == 'annealing': + self.learning_rate_0 = self.learning_rate + else: + self.learning_rate_0 = self.learning_rate.mean() + # if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1: # self.param_traces.append(('bias',[])) # if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1: @@ -204,6 +211,7 @@ class opt_SGD(Optimizer): ci = self.shift_constraints(j) f, fp = f_fp(self.x_opt[j]) + step[j] = self.momentum * step[j] + self.learning_rate[j] * fp self.x_opt[j] -= step[j] self.restore_constraints(ci) @@ -216,9 +224,32 @@ class opt_SGD(Optimizer): return f, step, self.model.N + def adapt_learning_rate(self, t): + if self.learning_rate_adaptation == 'adagrad': + if t > 5: + g = np.array(self.grads) + l2_g = np.sqrt(np.square(g).sum(0)) + self.learning_rate = 0.001/l2_g + else: + self.learning_rate = np.zeros_like(self.learning_rate) + elif self.learning_rate_adaptation == 'annealing': + self.learning_rate = self.learning_rate_0/(1+float(t+1)/2) + elif self.learning_rate_adaptation == 'semi_pesky': + if t == 0: + self.hbar_t = 0.0 + self.tau_t = 1000.0 + self.gbar_t = 0.0 + g_t = self.model.grads + self.gbar_t = (1-1/self.tau_t)*self.gbar_t + 1/self.tau_t * g_t + self.hbar_t = (1-1/self.tau_t)*self.hbar_t + 1/self.tau_t * np.dot(g_t.T, g_t) + self.learning_rate = np.dot(self.gbar_t.T, self.gbar_t) / self.hbar_t + self.tau_t = self.tau_t*(1-self.learning_rate) + 1 + print self.learning_rate + self.learning_rate *= np.ones_like(self.x_opt) + def opt(self, f_fp=None, f=None, fp=None): self.x_opt = self.model._get_params_transformed() - self.model.grads = np.zeros_like(self.x_opt) + self.grads = [] X, Y = self.model.X.copy(), self.model.likelihood.Y.copy() @@ -235,6 +266,7 @@ class opt_SGD(Optimizer): step = np.zeros_like(num_params) for it in range(self.iterations): + self.model.grads = np.zeros_like(self.x_opt) # TODO this is ugly if it == 0 or self.self_paced is False: features = np.random.permutation(Y.shape[1]) @@ -272,16 +304,17 @@ class opt_SGD(Optimizer): sys.stdout.write(status) sys.stdout.flush() self.param_traces['noise'].append(noise) - NLL.append(f) - self.fopt_trace.append(f) + NLL.append(f) + self.fopt_trace.append(NLL[-1]) # fig = plt.figure('traces') # plt.clf() # plt.plot(self.param_traces['noise']) # for k in self.param_traces.keys(): # self.param_traces[k].append(self.model.get(k)[0]) - + self.grads.append(self.model.grads.tolist()) + self.adapt_learning_rate(it) # should really be a sum(), but earlier samples in the iteration will have a very crappy ll self.f_opt = np.mean(NLL) self.model.N = N @@ -293,7 +326,7 @@ class opt_SGD(Optimizer): sigma = self.model.likelihood._variance self.model.likelihood._variance = None # invalidate cache self.model.likelihood._set_params(sigma) - + self.trace.append(self.f_opt) if self.iteration_file is not None: f = open(self.iteration_file + "iteration%d.pickle" % it, 'w') @@ -303,6 +336,6 @@ class opt_SGD(Optimizer): if self.messages != 0: sys.stdout.write('\r' + ' '*len(status)*2 + ' \r') - status = "SGD Iteration: {0: 3d}/{1: 3d} f: {2: 2.3f}\n".format(it+1, self.iterations, self.f_opt) + status = "SGD Iteration: {0: 3d}/{1: 3d} f: {2: 2.3f} max eta: {3: 1.5f}\n".format(it+1, self.iterations, self.f_opt, self.learning_rate.max()) sys.stdout.write(status) sys.stdout.flush() From 5183a18a1f94e3d51506b446575172e004109348 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Fri, 17 May 2013 12:26:08 +0100 Subject: [PATCH 2/4] minor SGD changes --- GPy/inference/SGD.py | 43 ++++++++++++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/GPy/inference/SGD.py b/GPy/inference/SGD.py index 3b967466..701a8c65 100644 --- a/GPy/inference/SGD.py +++ b/GPy/inference/SGD.py @@ -233,19 +233,40 @@ class opt_SGD(Optimizer): else: self.learning_rate = np.zeros_like(self.learning_rate) elif self.learning_rate_adaptation == 'annealing': - self.learning_rate = self.learning_rate_0/(1+float(t+1)/2) + self.learning_rate = self.learning_rate_0/(1+float(t+1)/10) elif self.learning_rate_adaptation == 'semi_pesky': - if t == 0: - self.hbar_t = 0.0 - self.tau_t = 1000.0 - self.gbar_t = 0.0 + if self.model.__class__.__name__ == 'Bayesian_GPLVM': + if t == 0: + N = self.model.N + Q = self.model.Q + M = self.model.M + + iip_pos = np.arange(2*N*Q,2*N*Q+M*Q) + mu_pos = np.arange(0,N*Q) + S_pos = np.arange(N*Q,2*N*Q) + self.vbparam_dict = {'iip': [iip_pos], + 'mu': [mu_pos], + 'S': [S_pos]} + + for k in self.vbparam_dict.keys(): + hbar_t = 0.0 + tau_t = 1000.0 + gbar_t = 0.0 + self.vbparam_dict[k].append(hbar_t) + self.vbparam_dict[k].append(tau_t) + self.vbparam_dict[k].append(gbar_t) + g_t = self.model.grads - self.gbar_t = (1-1/self.tau_t)*self.gbar_t + 1/self.tau_t * g_t - self.hbar_t = (1-1/self.tau_t)*self.hbar_t + 1/self.tau_t * np.dot(g_t.T, g_t) - self.learning_rate = np.dot(self.gbar_t.T, self.gbar_t) / self.hbar_t - self.tau_t = self.tau_t*(1-self.learning_rate) + 1 - print self.learning_rate - self.learning_rate *= np.ones_like(self.x_opt) + + for k in self.vbparam_dict.keys(): + pos, hbar_t, tau_t, gbar_t = self.vbparam_dict[k] + + gbar_t = (1-1/tau_t)*gbar_t + 1/tau_t * g_t[pos] + hbar_t = (1-1/tau_t)*hbar_t + 1/tau_t * np.dot(g_t[pos].T, g_t[pos]) + self.learning_rate[pos] = np.dot(gbar_t.T, gbar_t) / hbar_t + tau_t = tau_t*(1-self.learning_rate[pos]) + 1 + self.vbparam_dict[k] = [pos, hbar_t, tau_t, gbar_t] + def opt(self, f_fp=None, f=None, fp=None): self.x_opt = self.model._get_params_transformed() From ddd3ece3ceb225b33f0ac59215d38832fc67ca14 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Fri, 17 May 2013 12:29:13 +0100 Subject: [PATCH 3/4] test --- GPy/kern/kern.py | 50 +++++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index c682fdcc..0e425e38 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -315,31 +315,33 @@ class kern(parameterised): # compute the "cross" terms # TODO: input_slices needed + crossterms = 0 for p1, p2 in itertools.combinations(self.parts, 2): - # white doesn;t combine with anything - if p1.name == 'white' or p2.name == 'white': - pass - # rbf X bias - elif p1.name == 'bias' and p2.name == 'rbf': - target += p1.variance * (p2._psi1[:, :, None] + p2._psi1[:, None, :]) - elif p2.name == 'bias' and p1.name == 'rbf': - target += p2.variance * (p1._psi1[:, :, None] + p1._psi1[:, None, :]) - # linear X bias - elif p1.name == 'bias' and p2.name == 'linear': - tmp = np.zeros((mu.shape[0], Z.shape[0])) - p2.psi1(Z, mu, S, tmp) - target += p1.variance * (tmp[:, :, None] + tmp[:, None, :]) - elif p2.name == 'bias' and p1.name == 'linear': - tmp = np.zeros((mu.shape[0], Z.shape[0])) - p1.psi1(Z, mu, S, tmp) - target += p2.variance * (tmp[:, :, None] + tmp[:, None, :]) - # rbf X linear - elif p1.name == 'linear' and p2.name == 'rbf': - raise NotImplementedError # TODO - elif p2.name == 'linear' and p1.name == 'rbf': - raise NotImplementedError # TODO - else: - raise NotImplementedError, "psi2 cannot be computed for this kernel" + prod = np.multiply + # # white doesn;t combine with anything + # if p1.name == 'white' or p2.name == 'white': + # pass + # # rbf X bias + # elif p1.name == 'bias' and p2.name == 'rbf': + # target += p1.variance * (p2._psi1[:, :, None] + p2._psi1[:, None, :]) + # elif p2.name == 'bias' and p1.name == 'rbf': + # target += p2.variance * (p1._psi1[:, :, None] + p1._psi1[:, None, :]) + # # linear X bias + # elif p1.name == 'bias' and p2.name == 'linear': + # tmp = np.zeros((mu.shape[0], Z.shape[0])) + # p2.psi1(Z, mu, S, tmp) + # target += p1.variance * (tmp[:, :, None] + tmp[:, None, :]) + # elif p2.name == 'bias' and p1.name == 'linear': + # tmp = np.zeros((mu.shape[0], Z.shape[0])) + # p1.psi1(Z, mu, S, tmp) + # target += p2.variance * (tmp[:, :, None] + tmp[:, None, :]) + # # rbf X linear + # elif p1.name == 'linear' and p2.name == 'rbf': + # raise NotImplementedError # TODO + # elif p2.name == 'linear' and p1.name == 'rbf': + # raise NotImplementedError # TODO + # else: + # raise NotImplementedError, "psi2 cannot be computed for this kernel" return target def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S): From 19407293dcaf7b642ab4335fea985aa7ee398ec7 Mon Sep 17 00:00:00 2001 From: Nicolo Fusi Date: Fri, 17 May 2013 16:29:32 +0100 Subject: [PATCH 4/4] cross-terms --- GPy/examples/dimensionality_reduction.py | 4 +- GPy/kern/bias.py | 5 +- GPy/kern/kern.py | 130 ++++++----------------- GPy/kern/kernpart.py | 2 - GPy/kern/white.py | 4 +- 5 files changed, 40 insertions(+), 105 deletions(-) diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index dcda4f42..9b51947b 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -17,11 +17,11 @@ def BGPLVM(seed=default_seed): D = 4 # generate GPLVM-like data X = np.random.rand(N, Q) - k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001) + k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N), K, D).T - k = GPy.kern.linear(Q, ARD=True) + GPy.kern.white(Q) + k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q) # k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q) # k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001) # k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001) diff --git a/GPy/kern/bias.py b/GPy/kern/bias.py index b5883f87..09f0afa9 100644 --- a/GPy/kern/bias.py +++ b/GPy/kern/bias.py @@ -55,8 +55,9 @@ class bias(kernpart): target += self.variance def psi1(self, Z, mu, S, target): - target += self.variance - + self._psi1 = self.variance + target += self._psi1 + def psi2(self, Z, mu, S, target): target += self.variance**2 diff --git a/GPy/kern/kern.py b/GPy/kern/kern.py index 0e425e38..c9582ac8 100644 --- a/GPy/kern/kern.py +++ b/GPy/kern/kern.py @@ -316,32 +316,19 @@ class kern(parameterised): # compute the "cross" terms # TODO: input_slices needed crossterms = 0 + for p1, p2 in itertools.combinations(self.parts, 2): - prod = np.multiply - # # white doesn;t combine with anything - # if p1.name == 'white' or p2.name == 'white': - # pass - # # rbf X bias - # elif p1.name == 'bias' and p2.name == 'rbf': - # target += p1.variance * (p2._psi1[:, :, None] + p2._psi1[:, None, :]) - # elif p2.name == 'bias' and p1.name == 'rbf': - # target += p2.variance * (p1._psi1[:, :, None] + p1._psi1[:, None, :]) - # # linear X bias - # elif p1.name == 'bias' and p2.name == 'linear': - # tmp = np.zeros((mu.shape[0], Z.shape[0])) - # p2.psi1(Z, mu, S, tmp) - # target += p1.variance * (tmp[:, :, None] + tmp[:, None, :]) - # elif p2.name == 'bias' and p1.name == 'linear': - # tmp = np.zeros((mu.shape[0], Z.shape[0])) - # p1.psi1(Z, mu, S, tmp) - # target += p2.variance * (tmp[:, :, None] + tmp[:, None, :]) - # # rbf X linear - # elif p1.name == 'linear' and p2.name == 'rbf': - # raise NotImplementedError # TODO - # elif p2.name == 'linear' and p1.name == 'rbf': - # raise NotImplementedError # TODO - # else: - # raise NotImplementedError, "psi2 cannot be computed for this kernel" + + # TODO psi1 this must be faster/better/precached/more nice + tmp1 = np.zeros((mu.shape[0], Z.shape[0])) + p1.psi1(Z, mu, S, tmp1) + tmp2 = np.zeros((mu.shape[0], Z.shape[0])) + p2.psi1(Z, mu, S, tmp2) + + prod = np.multiply(tmp1, tmp2) + crossterms += prod[:,:,None] + prod[:, None, :] + + target += crossterms return target def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S): @@ -350,71 +337,34 @@ class kern(parameterised): # compute the "cross" terms # TODO: better looping, input_slices - for i1, i2 in itertools.combinations(range(len(self.parts)), 2): + for i1, i2 in itertools.permutations(range(len(self.parts)), 2): p1, p2 = self.parts[i1], self.parts[i2] # ipsl1, ipsl2 = self.input_slices[i1], self.input_slices[i2] ps1, ps2 = self.param_slices[i1], self.param_slices[i2] - # white doesn;t combine with anything - if p1.name == 'white' or p2.name == 'white': - pass - # rbf X bias - elif p1.name == 'bias' and p2.name == 'rbf': - p2.dpsi1_dtheta(dL_dpsi2.sum(1) * p1.variance * 2., Z, mu, S, target[ps2]) - p1.dpsi1_dtheta(dL_dpsi2.sum(1) * p2._psi1 * 2., Z, mu, S, target[ps1]) - elif p2.name == 'bias' and p1.name == 'rbf': - p1.dpsi1_dtheta(dL_dpsi2.sum(1) * p2.variance * 2., Z, mu, S, target[ps1]) - p2.dpsi1_dtheta(dL_dpsi2.sum(1) * p1._psi1 * 2., Z, mu, S, target[ps2]) - # linear X bias - elif p1.name == 'bias' and p2.name == 'linear': - p2.dpsi1_dtheta(dL_dpsi2.sum(1) * p1.variance * 2., Z, mu, S, target[ps2]) # [ps1]) - psi1 = np.zeros((mu.shape[0], Z.shape[0])) - p2.psi1(Z, mu, S, psi1) - p1.dpsi1_dtheta(dL_dpsi2.sum(1) * psi1 * 2., Z, mu, S, target[ps1]) - elif p2.name == 'bias' and p1.name == 'linear': - p1.dpsi1_dtheta(dL_dpsi2.sum(1) * p2.variance * 2., Z, mu, S, target[ps1]) - psi1 = np.zeros((mu.shape[0], Z.shape[0])) - p1.psi1(Z, mu, S, psi1) - p2.dpsi1_dtheta(dL_dpsi2.sum(1) * psi1 * 2., Z, mu, S, target[ps2]) - # rbf X linear - elif p1.name == 'linear' and p2.name == 'rbf': - raise NotImplementedError # TODO - elif p2.name == 'linear' and p1.name == 'rbf': - raise NotImplementedError # TODO - else: - raise NotImplementedError, "psi2 cannot be computed for this kernel" + tmp = np.zeros((mu.shape[0], Z.shape[0])) + p1.psi1(Z, mu, S, tmp) + p2.dpsi1_dtheta((tmp[:,None,:]*dL_dpsi2).sum(1)*2., Z, mu, S, target[ps2]) return self._transform_gradients(target) def dpsi2_dZ(self, dL_dpsi2, Z, mu, S): target = np.zeros_like(Z) [p.dpsi2_dZ(dL_dpsi2, Z[:, i_s], mu[:, i_s], S[:, i_s], target[:, i_s]) for p, i_s in zip(self.parts, self.input_slices)] + #target *= 2 # compute the "cross" terms # TODO: we need input_slices here. - for p1, p2 in itertools.combinations(self.parts, 2): - # white doesn;t combine with anything - if p1.name == 'white' or p2.name == 'white': - pass - # rbf X bias - elif p1.name == 'bias' and p2.name == 'rbf': - p2.dpsi1_dX(dL_dpsi2.sum(1).T * p1.variance, Z, mu, S, target) - elif p2.name == 'bias' and p1.name == 'rbf': - p1.dpsi1_dZ(dL_dpsi2.sum(1).T * p2.variance, Z, mu, S, target) - # linear X bias - elif p1.name == 'bias' and p2.name == 'linear': - p2.dpsi1_dZ(dL_dpsi2.sum(1).T * p1.variance, Z, mu, S, target) - elif p2.name == 'bias' and p1.name == 'linear': - p1.dpsi1_dZ(dL_dpsi2.sum(1).T * p2.variance, Z, mu, S, target) - # rbf X linear - elif p1.name == 'linear' and p2.name == 'rbf': - raise NotImplementedError # TODO - elif p2.name == 'linear' and p1.name == 'rbf': - raise NotImplementedError # TODO - else: - raise NotImplementedError, "psi2 cannot be computed for this kernel" + for p1, p2 in itertools.permutations(self.parts, 2): + if p1.name == 'linear' and p2.name == 'linear': + raise NotImplementedError("We don't handle linear/linear cross-terms") + tmp = np.zeros((mu.shape[0], Z.shape[0])) + p1.psi1(Z, mu, S, tmp) + tmp2 = np.zeros_like(target) + p2.dpsi1_dZ((tmp[:,None,:]*dL_dpsi2).sum(1).T, Z, mu, S, tmp2) + target += tmp2 - return target * 2. + return target * 2 def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S): target_mu, target_S = np.zeros((2, mu.shape[0], mu.shape[1])) @@ -422,27 +372,13 @@ class kern(parameterised): # compute the "cross" terms # TODO: we need input_slices here. - for p1, p2 in itertools.combinations(self.parts, 2): - # white doesn;t combine with anything - if p1.name == 'white' or p2.name == 'white': - pass - # rbf X bias - elif p1.name == 'bias' and p2.name == 'rbf': - p2.dpsi1_dmuS(dL_dpsi2.sum(1).T * p1.variance * 2., Z, mu, S, target_mu, target_S) - elif p2.name == 'bias' and p1.name == 'rbf': - p1.dpsi1_dmuS(dL_dpsi2.sum(1).T * p2.variance * 2., Z, mu, S, target_mu, target_S) - # linear X bias - elif p1.name == 'bias' and p2.name == 'linear': - p2.dpsi1_dmuS(dL_dpsi2.sum(1).T * p1.variance * 2., Z, mu, S, target_mu, target_S) - elif p2.name == 'bias' and p1.name == 'linear': - p1.dpsi1_dmuS(dL_dpsi2.sum(1).T * p2.variance * 2., Z, mu, S, target_mu, target_S) - # rbf X linear - elif p1.name == 'linear' and p2.name == 'rbf': - raise NotImplementedError # TODO - elif p2.name == 'linear' and p1.name == 'rbf': - raise NotImplementedError # TODO - else: - raise NotImplementedError, "psi2 cannot be computed for this kernel" + for p1, p2 in itertools.permutations(self.parts, 2): + if p1.name == 'linear' and p2.name == 'linear': + raise NotImplementedError("We don't handle linear/linear cross-terms") + + tmp = np.zeros((mu.shape[0], Z.shape[0])) + p1.psi1(Z, mu, S, tmp) + p2.dpsi1_dmuS((tmp[:,None,:]*dL_dpsi2).sum(1).T*2., Z, mu, S, target_mu, target_S) return target_mu, target_S diff --git a/GPy/kern/kernpart.py b/GPy/kern/kernpart.py index 30a1cc3d..7de150e9 100644 --- a/GPy/kern/kernpart.py +++ b/GPy/kern/kernpart.py @@ -54,5 +54,3 @@ class kernpart(object): raise NotImplementedError def dK_dX(self,X,X2,target): raise NotImplementedError - - diff --git a/GPy/kern/white.py b/GPy/kern/white.py index be6aad45..d5701cd9 100644 --- a/GPy/kern/white.py +++ b/GPy/kern/white.py @@ -18,7 +18,8 @@ class white(kernpart): self.Nparam = 1 self.name = 'white' self._set_params(np.array([variance]).flatten()) - + self._psi1 = 0 # TODO: more elegance here + def _get_params(self): return self.variance @@ -81,4 +82,3 @@ class white(kernpart): def dpsi2_dmuS(self,dL_dpsi2,Z,mu,S,target_mu,target_S): pass -