mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 12:02:38 +02:00
merged array_core
This commit is contained in:
commit
643c90010b
7 changed files with 22 additions and 26 deletions
|
|
@ -44,7 +44,8 @@ class ObservableArray(np.ndarray, Observable):
|
|||
return self.__setitem__(slice(start, stop), val)
|
||||
|
||||
def __copy__(self, *args):
|
||||
return ObservableArray(self.base.base.copy(*args))
|
||||
return ObservableArray(self.view(np.ndarray).copy())
|
||||
|
||||
def copy(self, *args):
|
||||
return self.__copy__(*args)
|
||||
|
||||
|
|
@ -53,31 +54,26 @@ class ObservableArray(np.ndarray, Observable):
|
|||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __ilshift__(self, *args, **kwargs):
|
||||
r = np.ndarray.__ilshift__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __irshift__(self, *args, **kwargs):
|
||||
r = np.ndarray.__irshift__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rrshift__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rrshift__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __ixor__(self, *args, **kwargs):
|
||||
r = np.ndarray.__ixor__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
def __rxor__(self, *args, **kwargs):
|
||||
r = np.ndarray.__rxor__(self, *args, **kwargs)
|
||||
self._notify_observers()
|
||||
|
|
|
|||
|
|
@ -26,3 +26,4 @@ etc.
|
|||
from exact_gaussian_inference import ExactGaussianInference
|
||||
from laplace import LaplaceInference
|
||||
expectation_propagation = 'foo' # TODO
|
||||
from dtc import DTC
|
||||
|
|
|
|||
|
|
@ -52,20 +52,20 @@ class DTC(object):
|
|||
b, _ = dtrtrs(LA, tmp*beta, lower=1)
|
||||
tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
|
||||
v, _ = dtrtrs(L, tmp, lower=1, trans=1)
|
||||
tmp = tdrtrs(LA, Li, lower=1, trans=0)
|
||||
tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
|
||||
P = tdot(tmp.T)
|
||||
|
||||
#compute log marginal
|
||||
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
|
||||
-np.sum(np.log(np.diag(LA)))*output_dim + \
|
||||
0.5*num_data*output_dim*np.log(beta) + \
|
||||
-0.5*beta*np.sum(np.square(Y)) +
|
||||
-0.5*beta*np.sum(np.square(Y)) + \
|
||||
0.5*np.sum(np.square(b))
|
||||
|
||||
# Compute dL_dKmm
|
||||
tmp, _ = dtrtrs(L, A_I, lower=1, trans=1)
|
||||
dL_dK, _ = dtrtrs(L, tmp.T, lower=1, trans=0)
|
||||
tmp, _ = dtrtrs(LA, tmp.T. lower=1, trans=1)
|
||||
tmp, _ = dtrtrs(LA, tmp.T, lower=1, trans=1)
|
||||
dL_dK -= tdot(tmp.T)
|
||||
dL_dK *= output_dim
|
||||
dL_dK -= tdot(v)
|
||||
|
|
@ -79,17 +79,17 @@ class DTC(object):
|
|||
|
||||
#compute dL_dR
|
||||
Uv = np.dot(U, v)
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(P, U.T), 1) - beta * np.sum(np.square(Y, 1)) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1)
|
||||
dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - beta * np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1)
|
||||
)*beta**2
|
||||
|
||||
grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU}
|
||||
grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU.T}
|
||||
|
||||
#update gradients
|
||||
kern.update_gradients_sparse(X=X, Z=Z, **grad_dict)
|
||||
likelihood.update_gradients(dL_dR)
|
||||
|
||||
#construct a posterior object
|
||||
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm)
|
||||
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
|
||||
|
||||
return post, log_marginal, grad_dict
|
||||
|
||||
|
|
@ -92,12 +92,11 @@ class LaplaceInference(object):
|
|||
iteration = 0
|
||||
while difference > self._mode_finding_tolerance and iteration < self._mode_finding_max_iter:
|
||||
W = -likelihood.d2logpdf_df2(f, Y, extra_data=Y_metadata)
|
||||
|
||||
W_f = W*f
|
||||
grad = likelihood.dlogpdf_df(f, Y, extra_data=Y_metadata)
|
||||
|
||||
W_f = W*f
|
||||
|
||||
b = W_f + grad # R+W p46 line 6.
|
||||
#W12BiW12Kb, B_logdet = self._compute_B_statistics(K, W.copy(), np.dot(K, b), likelihood.log_concave)
|
||||
W12BiW12, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave)
|
||||
W12BiW12Kb = np.dot(W12BiW12, np.dot(K, b))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
import numpy as np
|
||||
import unittest
|
||||
import GPy
|
||||
from GPy.models import GradientChecker
|
||||
from ..models import GradientChecker
|
||||
import functools
|
||||
import inspect
|
||||
from GPy.likelihoods import link_functions
|
||||
from ..likelihoods import link_functions
|
||||
from ..core.parameterization import Param
|
||||
from functools import partial
|
||||
#np.random.seed(300)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue