merged array_core

This commit is contained in:
Max Zwiessele 2014-02-12 11:32:28 +00:00
commit 643c90010b
7 changed files with 22 additions and 26 deletions

View file

@ -44,7 +44,8 @@ class ObservableArray(np.ndarray, Observable):
return self.__setitem__(slice(start, stop), val) return self.__setitem__(slice(start, stop), val)
def __copy__(self, *args): def __copy__(self, *args):
return ObservableArray(self.base.base.copy(*args)) return ObservableArray(self.view(np.ndarray).copy())
def copy(self, *args): def copy(self, *args):
return self.__copy__(*args) return self.__copy__(*args)
@ -53,31 +54,26 @@ class ObservableArray(np.ndarray, Observable):
self._notify_observers() self._notify_observers()
return r return r
def __ilshift__(self, *args, **kwargs): def __ilshift__(self, *args, **kwargs):
r = np.ndarray.__ilshift__(self, *args, **kwargs) r = np.ndarray.__ilshift__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()
return r return r
def __irshift__(self, *args, **kwargs): def __irshift__(self, *args, **kwargs):
r = np.ndarray.__irshift__(self, *args, **kwargs) r = np.ndarray.__irshift__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()
return r return r
def __rrshift__(self, *args, **kwargs): def __rrshift__(self, *args, **kwargs):
r = np.ndarray.__rrshift__(self, *args, **kwargs) r = np.ndarray.__rrshift__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()
return r return r
def __ixor__(self, *args, **kwargs): def __ixor__(self, *args, **kwargs):
r = np.ndarray.__ixor__(self, *args, **kwargs) r = np.ndarray.__ixor__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()
return r return r
def __rxor__(self, *args, **kwargs): def __rxor__(self, *args, **kwargs):
r = np.ndarray.__rxor__(self, *args, **kwargs) r = np.ndarray.__rxor__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()

View file

@ -38,9 +38,9 @@ class SparseGP(GP):
if inference_method is None: if inference_method is None:
if isinstance(likelihood, likelihoods.Gaussian): if isinstance(likelihood, likelihoods.Gaussian):
inference_method = varDTC.VarDTC() inference_method = varDTC.VarDTC()
else: else:
#inference_method = ?? #inference_method = ??
raise NotImplementedError, "what to do what to do?" raise NotImplementedError, "what to do what to do?"
print "defaulting to ", inference_method, "for latent function inference" print "defaulting to ", inference_method, "for latent function inference"
self.Z = Param('inducing inputs', Z) self.Z = Param('inducing inputs', Z)

View file

@ -26,3 +26,4 @@ etc.
from exact_gaussian_inference import ExactGaussianInference from exact_gaussian_inference import ExactGaussianInference
from laplace import LaplaceInference from laplace import LaplaceInference
expectation_propagation = 'foo' # TODO expectation_propagation = 'foo' # TODO
from dtc import DTC

View file

@ -52,20 +52,20 @@ class DTC(object):
b, _ = dtrtrs(LA, tmp*beta, lower=1) b, _ = dtrtrs(LA, tmp*beta, lower=1)
tmp, _ = dtrtrs(LA, b, lower=1, trans=1) tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
v, _ = dtrtrs(L, tmp, lower=1, trans=1) v, _ = dtrtrs(L, tmp, lower=1, trans=1)
tmp = tdrtrs(LA, Li, lower=1, trans=0) tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
P = tdot(tmp.T) P = tdot(tmp.T)
#compute log marginal #compute log marginal
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \ log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
-np.sum(np.log(np.diag(LA)))*output_dim + \ -np.sum(np.log(np.diag(LA)))*output_dim + \
0.5*num_data*output_dim*np.log(beta) + \ 0.5*num_data*output_dim*np.log(beta) + \
-0.5*beta*np.sum(np.square(Y)) + -0.5*beta*np.sum(np.square(Y)) + \
0.5*np.sum(np.square(b)) 0.5*np.sum(np.square(b))
# Compute dL_dKmm # Compute dL_dKmm
tmp, _ = dtrtrs(L, A_I, lower=1, trans=1) tmp, _ = dtrtrs(L, A_I, lower=1, trans=1)
dL_dK, _ = dtrtrs(L, tmp.T, lower=1, trans=0) dL_dK, _ = dtrtrs(L, tmp.T, lower=1, trans=0)
tmp, _ = dtrtrs(LA, tmp.T. lower=1, trans=1) tmp, _ = dtrtrs(LA, tmp.T, lower=1, trans=1)
dL_dK -= tdot(tmp.T) dL_dK -= tdot(tmp.T)
dL_dK *= output_dim dL_dK *= output_dim
dL_dK -= tdot(v) dL_dK -= tdot(v)
@ -79,17 +79,17 @@ class DTC(object):
#compute dL_dR #compute dL_dR
Uv = np.dot(U, v) Uv = np.dot(U, v)
dL_dR = 0.5*(np.sum(U*np.dot(P, U.T), 1) - beta * np.sum(np.square(Y, 1)) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - beta * np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1)
)*beta**2 )*beta**2
grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU} grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU.T}
#update gradients #update gradients
kern.update_gradients_sparse(X=X, Z=Z, **grad_dict) kern.update_gradients_sparse(X=X, Z=Z, **grad_dict)
likelihood.update_gradients(dL_dR) likelihood.update_gradients(dL_dR)
#construct a posterior object #construct a posterior object
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm) post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
return post, log_marginal, grad_dict return post, log_marginal, grad_dict

View file

@ -92,12 +92,11 @@ class LaplaceInference(object):
iteration = 0 iteration = 0
while difference > self._mode_finding_tolerance and iteration < self._mode_finding_max_iter: while difference > self._mode_finding_tolerance and iteration < self._mode_finding_max_iter:
W = -likelihood.d2logpdf_df2(f, Y, extra_data=Y_metadata) W = -likelihood.d2logpdf_df2(f, Y, extra_data=Y_metadata)
W_f = W*f
grad = likelihood.dlogpdf_df(f, Y, extra_data=Y_metadata) grad = likelihood.dlogpdf_df(f, Y, extra_data=Y_metadata)
W_f = W*f
b = W_f + grad # R+W p46 line 6. b = W_f + grad # R+W p46 line 6.
#W12BiW12Kb, B_logdet = self._compute_B_statistics(K, W.copy(), np.dot(K, b), likelihood.log_concave)
W12BiW12, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave) W12BiW12, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave)
W12BiW12Kb = np.dot(W12BiW12, np.dot(K, b)) W12BiW12Kb = np.dot(W12BiW12, np.dot(K, b))

View file

@ -1,10 +1,10 @@
import numpy as np import numpy as np
import unittest import unittest
import GPy import GPy
from GPy.models import GradientChecker from ..models import GradientChecker
import functools import functools
import inspect import inspect
from GPy.likelihoods import link_functions from ..likelihoods import link_functions
from ..core.parameterization import Param from ..core.parameterization import Param
from functools import partial from functools import partial
#np.random.seed(300) #np.random.seed(300)