mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 03:52:39 +02:00
non-working grads in linear
This commit is contained in:
parent
81e0861a60
commit
0dc9a32ba3
3 changed files with 14 additions and 11 deletions
|
|
@ -147,7 +147,7 @@ class Param(Constrainable, ObservableArray, Gradcheckable, Indexable):
|
||||||
target += self.gradient.flat
|
target += self.gradient.flat
|
||||||
|
|
||||||
def _set_gradient(self, g):
|
def _set_gradient(self, g):
|
||||||
self.gradient = g
|
self.gradient = g.reshape(self._realshape_)
|
||||||
|
|
||||||
#===========================================================================
|
#===========================================================================
|
||||||
# Array operations -> done
|
# Array operations -> done
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ def bgplvm_test_model(optimize=False, verbose=1, plot=False, output_dim=200, nan
|
||||||
#k = (GPy.kern.RBF(input_dim, .5, lengthscales, ARD=True)
|
#k = (GPy.kern.RBF(input_dim, .5, lengthscales, ARD=True)
|
||||||
##+ GPy.kern.white(input_dim, 0.01)
|
##+ GPy.kern.white(input_dim, 0.01)
|
||||||
#)
|
#)
|
||||||
k = GPy.kern.Linear(input_dim)# + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.Linear(input_dim, ARD=1)# + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
K = k.K(X)
|
K = k.K(X)
|
||||||
Y = _np.random.multivariate_normal(_np.zeros(num_inputs), K, (output_dim,)).T
|
Y = _np.random.multivariate_normal(_np.zeros(num_inputs), K, (output_dim,)).T
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ class Linear(Kern):
|
||||||
|
|
||||||
self.variances = Param('variances', variances, Logexp())
|
self.variances = Param('variances', variances, Logexp())
|
||||||
self.add_parameter(self.variances)
|
self.add_parameter(self.variances)
|
||||||
self.variances.add_observer(self._on_changed)
|
self.variances.add_observer(self, self._on_changed)
|
||||||
|
|
||||||
def _on_changed(self, obj):
|
def _on_changed(self, obj):
|
||||||
#TODO: move this to base class? isnt it jst for the caching?
|
#TODO: move this to base class? isnt it jst for the caching?
|
||||||
|
|
@ -82,9 +82,9 @@ class Linear(Kern):
|
||||||
self._collect_gradient(target)
|
self._collect_gradient(target)
|
||||||
self.update_gradients_full(dL_dKmm, Z, None)
|
self.update_gradients_full(dL_dKmm, Z, None)
|
||||||
self._collect_gradient(target)
|
self._collect_gradient(target)
|
||||||
return target
|
self._set_gradient(target)
|
||||||
|
|
||||||
def update_gradients_full(self, dL_dK, X):
|
def update_gradients_full(self, dL_dK, X, X2=None):
|
||||||
if self.ARD:
|
if self.ARD:
|
||||||
if X2 is None:
|
if X2 is None:
|
||||||
self.variances.gradient = np.array([np.sum(dL_dK * tdot(X[:, i:i + 1])) for i in range(self.input_dim)])
|
self.variances.gradient = np.array([np.sum(dL_dK * tdot(X[:, i:i + 1])) for i in range(self.input_dim)])
|
||||||
|
|
@ -130,16 +130,19 @@ class Linear(Kern):
|
||||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||||
# psi0:
|
# psi0:
|
||||||
tmp = dL_dpsi0[:, None] * self._mu2S(mu, S)
|
tmp = dL_dpsi0[:, None] * self._mu2S(mu, S)
|
||||||
if self.ARD: self.variances.gradient[:] = tmp.sum(0)
|
if self.ARD: grad = tmp.sum(0)
|
||||||
else: self.variances.gradient[:] = tmp.sum()
|
else: grad = np.atleast_1d(tmp.sum())
|
||||||
#psi1
|
#psi1
|
||||||
self.variances.gradient += self._param_grad_helper(dL_dpsi1, mu, Z)
|
self.update_gradients_full(dL_dpsi1, mu, Z)
|
||||||
|
grad += self.variances.gradient
|
||||||
#psi2
|
#psi2
|
||||||
tmp = dL_dpsi2[:, :, :, None] * (self._ZAinner(mu, S, Z)[:, :, None, :] * (2. * Z)[None, None, :, :])
|
tmp = dL_dpsi2[:, :, :, None] * (self._ZAinner(mu, S, Z)[:, :, None, :] * (2. * Z)[None, None, :, :])
|
||||||
if self.ARD: self.variances.gradient += tmp.sum(0).sum(0).sum(0)
|
if self.ARD: grad += tmp.sum(0).sum(0).sum(0)
|
||||||
else: self.variances.gradient += tmp.sum()
|
else: grad += tmp.sum()
|
||||||
#from Kmm
|
#from Kmm
|
||||||
self.variances.gradient += self._param_grad_helper(dL_dKmm, Z, None)
|
self.update_gradients_full(dL_dpsi1, mu, Z)
|
||||||
|
grad += self.variances.gradient
|
||||||
|
self._set_gradient(grad)
|
||||||
|
|
||||||
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
def gradients_Z_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||||
# Kmm
|
# Kmm
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue