merged array_core

This commit is contained in:
Max Zwiessele 2014-02-12 11:32:28 +00:00
commit 643c90010b
7 changed files with 22 additions and 26 deletions

View file

@ -13,7 +13,7 @@ class ParamList(list):
if el is other: if el is other:
return True return True
return False return False
pass pass
class ObservableArray(np.ndarray, Observable): class ObservableArray(np.ndarray, Observable):
@ -33,7 +33,7 @@ class ObservableArray(np.ndarray, Observable):
# see InfoArray.__array_finalize__ for comments # see InfoArray.__array_finalize__ for comments
if obj is None: return if obj is None: return
self._observers_ = getattr(obj, '_observers_', None) self._observers_ = getattr(obj, '_observers_', None)
def __setitem__(self, s, val, update=True): def __setitem__(self, s, val, update=True):
super(ObservableArray, self).__setitem__(s, val) super(ObservableArray, self).__setitem__(s, val)
if update: if update:
@ -41,10 +41,11 @@ class ObservableArray(np.ndarray, Observable):
def __getslice__(self, start, stop): def __getslice__(self, start, stop):
return self.__getitem__(slice(start, stop)) return self.__getitem__(slice(start, stop))
def __setslice__(self, start, stop, val): def __setslice__(self, start, stop, val):
return self.__setitem__(slice(start, stop), val) return self.__setitem__(slice(start, stop), val)
def __copy__(self, *args): def __copy__(self, *args):
return ObservableArray(self.base.base.copy(*args)) return ObservableArray(self.view(np.ndarray).copy())
def copy(self, *args): def copy(self, *args):
return self.__copy__(*args) return self.__copy__(*args)
@ -52,32 +53,27 @@ class ObservableArray(np.ndarray, Observable):
r = np.ndarray.__ror__(self, *args, **kwargs) r = np.ndarray.__ror__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()
return r return r
def __ilshift__(self, *args, **kwargs): def __ilshift__(self, *args, **kwargs):
r = np.ndarray.__ilshift__(self, *args, **kwargs) r = np.ndarray.__ilshift__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()
return r return r
def __irshift__(self, *args, **kwargs): def __irshift__(self, *args, **kwargs):
r = np.ndarray.__irshift__(self, *args, **kwargs) r = np.ndarray.__irshift__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()
return r return r
def __rrshift__(self, *args, **kwargs): def __rrshift__(self, *args, **kwargs):
r = np.ndarray.__rrshift__(self, *args, **kwargs) r = np.ndarray.__rrshift__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()
return r return r
def __ixor__(self, *args, **kwargs): def __ixor__(self, *args, **kwargs):
r = np.ndarray.__ixor__(self, *args, **kwargs) r = np.ndarray.__ixor__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()
return r return r
def __rxor__(self, *args, **kwargs): def __rxor__(self, *args, **kwargs):
r = np.ndarray.__rxor__(self, *args, **kwargs) r = np.ndarray.__rxor__(self, *args, **kwargs)
self._notify_observers() self._notify_observers()

View file

@ -152,14 +152,14 @@ class Param(ObservableArray, Constrainable, Gradcheckable):
#=========================================================================== #===========================================================================
def tie_to(self, param): def tie_to(self, param):
""" """
:param param: the parameter object to tie this parameter to. :param param: the parameter object to tie this parameter to.
Can be ParamConcatenation (retrieved by regexp search) Can be ParamConcatenation (retrieved by regexp search)
Tie this parameter to the given parameter. Tie this parameter to the given parameter.
Broadcasting is not allowed, but you can tie a whole dimension to Broadcasting is not allowed, but you can tie a whole dimension to
one parameter: self[:,0].tie_to(other), where other is a one-value one parameter: self[:,0].tie_to(other), where other is a one-value
parameter. parameter.
Note: For now only one parameter can have ties, so all of a parameter Note: For now only one parameter can have ties, so all of a parameter
will be removed, when re-tieing! will be removed, when re-tieing!
""" """
@ -529,7 +529,7 @@ class ParamConcatenation(object):
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3): def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3):
return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance) return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance)
#checkgrad.__doc__ = Gradcheckable.checkgrad.__doc__ #checkgrad.__doc__ = Gradcheckable.checkgrad.__doc__
__lt__ = lambda self, val: self._vals() < val __lt__ = lambda self, val: self._vals() < val
__le__ = lambda self, val: self._vals() <= val __le__ = lambda self, val: self._vals() <= val
__eq__ = lambda self, val: self._vals() == val __eq__ = lambda self, val: self._vals() == val

View file

@ -38,9 +38,9 @@ class SparseGP(GP):
if inference_method is None: if inference_method is None:
if isinstance(likelihood, likelihoods.Gaussian): if isinstance(likelihood, likelihoods.Gaussian):
inference_method = varDTC.VarDTC() inference_method = varDTC.VarDTC()
else: else:
#inference_method = ?? #inference_method = ??
raise NotImplementedError, "what to do what to do?" raise NotImplementedError, "what to do what to do?"
print "defaulting to ", inference_method, "for latent function inference" print "defaulting to ", inference_method, "for latent function inference"
self.Z = Param('inducing inputs', Z) self.Z = Param('inducing inputs', Z)

View file

@ -26,3 +26,4 @@ etc.
from exact_gaussian_inference import ExactGaussianInference from exact_gaussian_inference import ExactGaussianInference
from laplace import LaplaceInference from laplace import LaplaceInference
expectation_propagation = 'foo' # TODO expectation_propagation = 'foo' # TODO
from dtc import DTC

View file

@ -52,20 +52,20 @@ class DTC(object):
b, _ = dtrtrs(LA, tmp*beta, lower=1) b, _ = dtrtrs(LA, tmp*beta, lower=1)
tmp, _ = dtrtrs(LA, b, lower=1, trans=1) tmp, _ = dtrtrs(LA, b, lower=1, trans=1)
v, _ = dtrtrs(L, tmp, lower=1, trans=1) v, _ = dtrtrs(L, tmp, lower=1, trans=1)
tmp = tdrtrs(LA, Li, lower=1, trans=0) tmp, _ = dtrtrs(LA, Li, lower=1, trans=0)
P = tdot(tmp.T) P = tdot(tmp.T)
#compute log marginal #compute log marginal
log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \ log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \
-np.sum(np.log(np.diag(LA)))*output_dim + \ -np.sum(np.log(np.diag(LA)))*output_dim + \
0.5*num_data*output_dim*np.log(beta) + \ 0.5*num_data*output_dim*np.log(beta) + \
-0.5*beta*np.sum(np.square(Y)) + -0.5*beta*np.sum(np.square(Y)) + \
0.5*np.sum(np.square(b)) 0.5*np.sum(np.square(b))
# Compute dL_dKmm # Compute dL_dKmm
tmp, _ = dtrtrs(L, A_I, lower=1, trans=1) tmp, _ = dtrtrs(L, A_I, lower=1, trans=1)
dL_dK, _ = dtrtrs(L, tmp.T, lower=1, trans=0) dL_dK, _ = dtrtrs(L, tmp.T, lower=1, trans=0)
tmp, _ = dtrtrs(LA, tmp.T. lower=1, trans=1) tmp, _ = dtrtrs(LA, tmp.T, lower=1, trans=1)
dL_dK -= tdot(tmp.T) dL_dK -= tdot(tmp.T)
dL_dK *= output_dim dL_dK *= output_dim
dL_dK -= tdot(v) dL_dK -= tdot(v)
@ -79,17 +79,17 @@ class DTC(object):
#compute dL_dR #compute dL_dR
Uv = np.dot(U, v) Uv = np.dot(U, v)
dL_dR = 0.5*(np.sum(U*np.dot(P, U.T), 1) - beta * np.sum(np.square(Y, 1)) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1) dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - beta * np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1)
)*beta**2 )*beta**2
grad_dict = {'dL_dKmm': dL_dKmm, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU} grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':np.zeros_like(Knn), 'dL_dKnm':dL_dU.T}
#update gradients #update gradients
kern.update_gradients_sparse(X=X, Z=Z, **grad_dict) kern.update_gradients_sparse(X=X, Z=Z, **grad_dict)
likelihood.update_gradients(dL_dR) likelihood.update_gradients(dL_dR)
#construct a posterior object #construct a posterior object
post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm) post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L)
return post, log_marginal, grad_dict return post, log_marginal, grad_dict

View file

@ -92,12 +92,11 @@ class LaplaceInference(object):
iteration = 0 iteration = 0
while difference > self._mode_finding_tolerance and iteration < self._mode_finding_max_iter: while difference > self._mode_finding_tolerance and iteration < self._mode_finding_max_iter:
W = -likelihood.d2logpdf_df2(f, Y, extra_data=Y_metadata) W = -likelihood.d2logpdf_df2(f, Y, extra_data=Y_metadata)
W_f = W*f
grad = likelihood.dlogpdf_df(f, Y, extra_data=Y_metadata) grad = likelihood.dlogpdf_df(f, Y, extra_data=Y_metadata)
W_f = W*f
b = W_f + grad # R+W p46 line 6. b = W_f + grad # R+W p46 line 6.
#W12BiW12Kb, B_logdet = self._compute_B_statistics(K, W.copy(), np.dot(K, b), likelihood.log_concave)
W12BiW12, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave) W12BiW12, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave)
W12BiW12Kb = np.dot(W12BiW12, np.dot(K, b)) W12BiW12Kb = np.dot(W12BiW12, np.dot(K, b))

View file

@ -1,10 +1,10 @@
import numpy as np import numpy as np
import unittest import unittest
import GPy import GPy
from GPy.models import GradientChecker from ..models import GradientChecker
import functools import functools
import inspect import inspect
from GPy.likelihoods import link_functions from ..likelihoods import link_functions
from ..core.parameterization import Param from ..core.parameterization import Param
from functools import partial from functools import partial
#np.random.seed(300) #np.random.seed(300)