diff --git a/GPy/inference/SGD.py b/GPy/inference/SGD.py index ca9a5a61..7cb7566f 100644 --- a/GPy/inference/SGD.py +++ b/GPy/inference/SGD.py @@ -257,37 +257,6 @@ class opt_SGD(Optimizer): self.learning_rate = np.ones_like(self.learning_rate)*(np.dot(self.gbar_t.T, self.gbar_t) / self.hbar_t) tau_t = self.tau_t*(1-self.learning_rate) + 1 - # if t == 0: - # N = self.model.N - # Q = self.model.Q - # M = self.model.M - - # iip_pos = np.arange(2*N*Q,2*N*Q+M*Q) - # mu_pos = np.arange(0,N*Q) - # S_pos = np.arange(N*Q,2*N*Q) - # self.vbparam_dict = {'iip': [iip_pos], - # 'mu': [mu_pos], - # 'S': [S_pos]} - - # for k in self.vbparam_dict.keys(): - # hbar_t = 0.0 - # tau_t = 1.0 - # gbar_t = 0.0 - # self.vbparam_dict[k].append(hbar_t) - # self.vbparam_dict[k].append(tau_t) - # self.vbparam_dict[k].append(gbar_t) - # if True: - # g_t = self.model.grads - - # for k in self.vbparam_dict.keys(): - # pos, hbar_t, tau_t, gbar_t = self.vbparam_dict[k] - # gbar_t = (1-1/tau_t)*gbar_t + 1/tau_t * g_t[pos] - # hbar_t = (1-1/tau_t)*hbar_t + 1/tau_t * np.dot(g_t[pos].T, g_t[pos]) - # self.learning_rate[pos] = (np.dot(gbar_t.T, gbar_t) / hbar_t)*1.0 - # tau_t = tau_t*(1-self.learning_rate[pos]) + 1 - # self.vbparam_dict[k] = [pos, hbar_t, tau_t, gbar_t] - # print k, self.learning_rate[pos].max() - def opt(self, f_fp=None, f=None, fp=None): self.x_opt = self.model._get_params_transformed() diff --git a/GPy/models/GP.py b/GPy/models/GP.py index b518ce4e..8f7ef9d3 100644 --- a/GPy/models/GP.py +++ b/GPy/models/GP.py @@ -173,7 +173,7 @@ class GP(model): """ # normalize X values Xnew = (Xnew.copy() - self._Xmean) / self._Xstd - mu, var = self._raw_predict(Xnew, which_parts, full_cov) + mu, var = self._raw_predict(Xnew, which_parts=which_parts, full_cov=full_cov) # now push through likelihood mean, var, _025pm, _975pm = self.likelihood.predictive_values(mu, var, full_cov) diff --git a/GPy/models/sparse_GP.py b/GPy/models/sparse_GP.py index 6eb6fb49..fe9d9dc4 100644 --- a/GPy/models/sparse_GP.py +++ b/GPy/models/sparse_GP.py @@ -234,7 +234,7 @@ class sparse_GP(GP): Kxx = self.kern.Kdiag(Xnew, which_parts=which_parts) var = Kxx - np.sum(Kx * np.dot(Kmmi_LmiBLmi, Kx), 0) else: - assert which_parts=='all', "swithching out parts of variational kernels is not implemented" + # assert which_parts=='all', "swithching out parts of variational kernels is not implemented" Kx = self.kern.psi1(self.Z, Xnew, X_variance_new)#, which_parts=which_parts) TODO: which_parts mu = np.dot(Kx, self.Cpsi1V) if full_cov: