Merge branch 'devel' of github.com:SheffieldML/GPy into devel

This commit is contained in:
Zhenwen Dai 2015-05-21 11:33:43 +01:00
commit 21a39ffb87
5 changed files with 14488 additions and 330 deletions

View file

@ -38,6 +38,11 @@ class Param(Parameterizable, ObsAr):
Fixing parameters will fix them to the value they are right now. If you change
the fixed value, it will be fixed to the new value!
Important Note:
Multilevel indexing (e.g. self[:2][1:]) is not supported and might lead to unexpected behaviour.
Try to index in one go, using boolean indexing or the numpy builtin
np.index function.
See :py:class:`GPy.core.parameterized.Parameterized` for more details on constraining etc.
"""

View file

@ -44,9 +44,11 @@ class SVGP(LatentFunctionInference):
#compute the marginal means and variances of q(f)
A, _ = linalg.dpotrs(Lm, Kmn)
mu = prior_mean_f + np.dot(A.T, q_u_mean - prior_mean_u)
LA = L.reshape(-1, num_inducing).dot(A).reshape(num_outputs, num_inducing, num_data)
#TODO? possibly use dtrmm for the above line?
v = (Knn_diag - np.sum(A*Kmn,0))[:,None] + np.sum(np.square(LA),1).T
v = np.empty((num_data, num_outputs))
for i in range(num_outputs):
tmp = dtrmm(1.0,L[i].T, A, lower=0, trans_a=0)
v[:,i] = np.sum(np.square(tmp),0)
v += (Knn_diag - np.sum(A*Kmn,0))[:,None]
#compute the KL term
Kmmim = np.dot(Kmmi, q_u_mean)
@ -90,11 +92,9 @@ class SVGP(LatentFunctionInference):
tmp = S.reshape(-1, num_inducing).dot(Kmmi).reshape(num_outputs, num_inducing , num_inducing )
tmp = 2.*(tmp - np.eye(num_inducing)[None, :,:])
dF_dKnm = Kmmim.dot(dF_dmu.T).T
assert dF_dKnm.flags['F_CONTIGUOUS'] # needed for dsymm in place call:
dF_dKmn = Kmmim.dot(dF_dmu.T)
for a,b in zip(tmp, Adv):
dsymm(1.0, a.T, b.T, beta=1., side=1, c=dF_dKnm, overwrite_c=True)
dF_dKmn = dF_dKnm.T
dF_dKmn += np.dot(a.T, b)
dF_dm = Admu
dF_dS = AdvA

View file

@ -78,7 +78,7 @@ class MLP(Kern):
*((vec1[:, None]+vec2[None, :])*self.weight_variance
+ 2*self.bias_variance + 2.))*base_cov_grad).sum()
def update_gradients_diag(self, X):
def update_gradients_diag(self, dL_dKdiag, X):
self._K_diag_computations(X)
self.variance.gradient = np.sum(self._K_diag_dvar*dL_dKdiag)

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,9 @@
#cython: boundscheck=False
#cython: nonecheck=False
#cython: wraparound=False
import numpy as np
cimport numpy as np
from cython.parallel import prange
ctypedef np.float64_t DTYPE_t
@ -22,7 +24,18 @@ def grad_X(int N, int D, int M,
cdef double *grad = <double*> _grad.data
_grad_X(N, D, M, X, X2, tmp, grad) # return nothing, work in place.
def lengthscale_grads(int N, int M, int Q,
def grad_X_cython(int N, int D, int M, double[:,:] X, double[:,:] X2, double[:,:] tmp, double[:,:] grad):
cdef int n,d,nd,m
for nd in prange(N*D, nogil=True):
n = nd/D
d = nd%D
grad[n,d] = 0.0
for m in range(M):
grad[n,d] += tmp[n,m]*(X[n,d]-X2[m,d])
def lengthscale_grads_in_c(int N, int M, int Q,
np.ndarray[DTYPE_t, ndim=2] _tmp,
np.ndarray[DTYPE_t, ndim=2] _X,
np.ndarray[DTYPE_t, ndim=2] _X2,
@ -33,4 +46,14 @@ def lengthscale_grads(int N, int M, int Q,
cdef double *grad = <double*> _grad.data
_lengthscale_grads(N, M, Q, tmp, X, X2, grad) # return nothing, work in place.
def lengthscale_grads(int N, int M, int Q, double[:,:] tmp, double[:,:] X, double[:,:] X2, double[:] grad):
cdef int q, n, m
cdef double gradq, dist
for q in range(Q):
grad[q] = 0.0
for n in range(N):
for m in range(M):
dist = X[n,q] - X2[m,q]
grad[q] += tmp[n,m]*dist*dist