[magnification] added static kernel support and faster derivative computations

This commit is contained in:
Max Zwiessele 2015-09-03 10:19:57 +01:00
parent 4dfdce9b80
commit f6d07ff76a
2 changed files with 10 additions and 4 deletions

View file

@ -352,13 +352,16 @@ class GP(Model):
for i in range(self._predictive_variable.shape[0]): for i in range(self._predictive_variable.shape[0]):
dK_dXnew_full[i] = kern.gradients_X([[1.]], Xnew, self._predictive_variable[[i]]) dK_dXnew_full[i] = kern.gradients_X([[1.]], Xnew, self._predictive_variable[[i]])
if full_cov:
dK2_dXdX = kern.gradients_XX([[1.]], Xnew)
else:
dK2_dXdX = kern.gradients_XX_diag([[1.]], Xnew)
def compute_cov_inner(wi): def compute_cov_inner(wi):
if full_cov: if full_cov:
# full covariance gradients: # full covariance gradients:
dK2_dXdX = kern.gradients_XX([[1.]], Xnew)
var_jac = dK2_dXdX - np.einsum('qnm,miq->niq', dK_dXnew_full.T.dot(wi), dK_dXnew_full) var_jac = dK2_dXdX - np.einsum('qnm,miq->niq', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
else: else:
dK2_dXdX = kern.gradients_XX_diag([[1.]], Xnew)
var_jac = dK2_dXdX - np.einsum('qim,miq->iq', dK_dXnew_full.T.dot(wi), dK_dXnew_full) var_jac = dK2_dXdX - np.einsum('qim,miq->iq', dK_dXnew_full.T.dot(wi), dK_dXnew_full)
return var_jac return var_jac
@ -568,7 +571,7 @@ class GP(Model):
which_data_ycols, fixed_inputs, which_data_ycols, fixed_inputs,
levels, samples, fignum, ax, resolution, levels, samples, fignum, ax, resolution,
plot_raw=plot_raw, Y_metadata=Y_metadata, plot_raw=plot_raw, Y_metadata=Y_metadata,
data_symbol=data_symbol, predict_kw=predict_kw, data_symbol=data_symbol, predict_kw=predict_kw,
plot_training_data=plot_training_data, **kw) plot_training_data=plot_training_data, **kw)

View file

@ -73,7 +73,10 @@ class Add(CombinationKernel):
return target return target
def gradients_XX(self, dL_dK, X, X2): def gradients_XX(self, dL_dK, X, X2):
target = 0. if X2 is None:
target = np.zeros((X.shape[0], X.shape[0], X.shape[1]))
else:
target = np.zeros((X.shape[0], X2.shape[0], X.shape[1]))
[target.__iadd__(p.gradients_XX(dL_dK, X, X2)) for p in self.parts] [target.__iadd__(p.gradients_XX(dL_dK, X, X2)) for p in self.parts]
return target return target