mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-29 23:06:22 +02:00
[param_to_array] deprecated and removed param_to_array from code, use param.values instead
This commit is contained in:
parent
c1d998e272
commit
6a260409fa
16 changed files with 349 additions and 231 deletions
|
|
@ -1,7 +1,6 @@
|
|||
import numpy as np
|
||||
from ...util import diag
|
||||
from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify, DSYR
|
||||
from ...util.misc import param_to_array
|
||||
from ...core.parameterization.variational import VariationalPosterior
|
||||
from . import LatentFunctionInference
|
||||
from posterior import Posterior
|
||||
|
|
@ -23,7 +22,7 @@ class EPDTC(LatentFunctionInference):
|
|||
self.get_YYTfactor.limit = limit
|
||||
|
||||
def _get_trYYT(self, Y):
|
||||
return param_to_array(np.sum(np.square(Y)))
|
||||
return np.sum(np.square(Y))
|
||||
|
||||
def __getstate__(self):
|
||||
# has to be overridden, as Cacher objects cannot be pickled.
|
||||
|
|
@ -44,7 +43,7 @@ class EPDTC(LatentFunctionInference):
|
|||
"""
|
||||
N, D = Y.shape
|
||||
if (N>=D):
|
||||
return param_to_array(Y)
|
||||
return Y
|
||||
else:
|
||||
return jitchol(tdot(Y))
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
import numpy as np
|
||||
from ...util.linalg import mdot, jitchol, dpotrs, dtrtrs, dpotri, symmetrify, pdinv
|
||||
from ...util.misc import param_to_array
|
||||
from posterior import Posterior
|
||||
import warnings
|
||||
from scipy import optimize
|
||||
|
|
@ -39,9 +38,6 @@ class Laplace(LatentFunctionInference):
|
|||
Returns a Posterior class containing essential quantities of the posterior
|
||||
"""
|
||||
|
||||
#make Y a normal array!
|
||||
Y = param_to_array(Y)
|
||||
|
||||
# Compute K
|
||||
K = kern.K(X)
|
||||
|
||||
|
|
@ -153,7 +149,7 @@ class Laplace(LatentFunctionInference):
|
|||
|
||||
#compute vital matrices
|
||||
C = np.dot(LiW12, K)
|
||||
Ki_W_i = K - C.T.dot(C)
|
||||
Ki_W_i = K - C.T.dot(C)
|
||||
|
||||
#compute the log marginal
|
||||
log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata) - np.sum(np.log(np.diag(L)))
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrt
|
|||
from ...util import diag
|
||||
from ...core.parameterization.variational import VariationalPosterior
|
||||
import numpy as np
|
||||
from ...util.misc import param_to_array
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
import logging, itertools
|
||||
|
|
@ -35,7 +34,7 @@ class VarDTC(LatentFunctionInference):
|
|||
self.get_YYTfactor.limit = limit
|
||||
|
||||
def _get_trYYT(self, Y):
|
||||
return param_to_array(np.sum(np.square(Y)))
|
||||
return np.sum(np.square(Y))
|
||||
|
||||
def __getstate__(self):
|
||||
# has to be overridden, as Cacher objects cannot be pickled.
|
||||
|
|
@ -56,7 +55,7 @@ class VarDTC(LatentFunctionInference):
|
|||
"""
|
||||
N, D = Y.shape
|
||||
if (N>=D):
|
||||
return param_to_array(Y)
|
||||
return Y.view(np.ndarray)
|
||||
else:
|
||||
return jitchol(tdot(Y))
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs
|
|||
from ...util import diag
|
||||
from ...core.parameterization.variational import VariationalPosterior
|
||||
import numpy as np
|
||||
from ...util.misc import param_to_array
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
|
|
@ -32,18 +31,18 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
"""
|
||||
const_jitter = np.float64(1e-6)
|
||||
def __init__(self, batchsize=None, gpu_memory=4., limit=1):
|
||||
|
||||
|
||||
self.batchsize = batchsize
|
||||
self.gpu_memory = gpu_memory
|
||||
|
||||
|
||||
self.midRes = {}
|
||||
self.batch_pos = 0 # the starting position of the current mini-batch
|
||||
|
||||
|
||||
self.cublas_handle = gpu_init.cublas_handle
|
||||
|
||||
|
||||
# Initialize GPU caches
|
||||
self.gpuCache = None
|
||||
|
||||
|
||||
def _initGPUCache(self, kern, num_inducing, input_dim, output_dim, Y):
|
||||
ndata = Y.shape[0]
|
||||
if self.batchsize==None:
|
||||
|
|
@ -75,10 +74,10 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
'psi2p_gpu' :gpuarray.empty((num_inducing,num_inducing),np.float64,order='F'),
|
||||
}
|
||||
self.gpuCache['ones_gpu'].fill(1.0)
|
||||
|
||||
|
||||
YT_gpu = self.gpuCache['YT_gpu']
|
||||
self._trYYT = cublas.cublasDdot(self.cublas_handle, YT_gpu.size, YT_gpu.gpudata, 1, YT_gpu.gpudata, 1)
|
||||
|
||||
|
||||
def _estimateMemoryOccupation(self, N, M, D):
|
||||
"""
|
||||
Estimate the best batch size.
|
||||
|
|
@ -89,7 +88,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
unit: GB
|
||||
"""
|
||||
return (M+9.*M*M+3*M*D+N+2.*N*D)*8./1024./1024./1024., (4.+3.*M+D+3.*M*M)*8./1024./1024./1024.
|
||||
|
||||
|
||||
def _estimateBatchSize(self, kern, N, M, Q, D):
|
||||
"""
|
||||
Estimate the best batch size.
|
||||
|
|
@ -104,11 +103,11 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
else:
|
||||
x0, x1 = 0.,0.
|
||||
y0, y1 = self._estimateMemoryOccupation(N, M, D)
|
||||
|
||||
|
||||
opt_batchsize = min(int((self.gpu_memory-y0-x0)/(x1+y1)), N)
|
||||
|
||||
|
||||
return opt_batchsize
|
||||
|
||||
|
||||
def _get_YYTfactor(self, Y):
|
||||
"""
|
||||
find a matrix L which satisfies LLT = YYT.
|
||||
|
|
@ -117,10 +116,10 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
"""
|
||||
N, D = Y.shape
|
||||
if (N>=D):
|
||||
return param_to_array(Y)
|
||||
return Y.view(np.ndarray)
|
||||
else:
|
||||
return jitchol(tdot(Y))
|
||||
|
||||
|
||||
def gatherPsiStat(self, kern, X, Z, Y, beta, uncertain_inputs, het_noise):
|
||||
num_inducing, input_dim = Z.shape[0], Z.shape[1]
|
||||
num_data, output_dim = Y.shape
|
||||
|
|
@ -130,7 +129,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
beta_gpu = self.gpuCache['beta_gpu']
|
||||
YT_gpu = self.gpuCache['YT_gpu']
|
||||
betaYT_gpu = self.gpuCache['betaYT_gpu']
|
||||
|
||||
|
||||
beta_gpu.fill(beta)
|
||||
betaYT_gpu.fill(0.)
|
||||
cublas.cublasDaxpy(self.cublas_handle, betaYT_gpu.size, beta, YT_gpu.gpudata, 1, betaYT_gpu.gpudata, 1)
|
||||
|
|
@ -140,7 +139,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
psi1Y_gpu.fill(0.)
|
||||
psi2_gpu.fill(0.)
|
||||
psi0_full = 0
|
||||
|
||||
|
||||
for n_start in xrange(0,num_data,self.batchsize):
|
||||
n_end = min(self.batchsize+n_start, num_data)
|
||||
ndata = n_end - n_start
|
||||
|
|
@ -156,35 +155,35 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
psi1p_gpu = kern.K(X_slice, Z)
|
||||
|
||||
cublas.cublasDgemm(self.cublas_handle, 'T', 'T', num_inducing, output_dim, ndata, 1.0, psi1p_gpu.gpudata, ndata, betaYT_gpu_slice.gpudata, output_dim, 1.0, psi1Y_gpu.gpudata, num_inducing)
|
||||
|
||||
|
||||
psi0_full += psi0.sum()
|
||||
|
||||
|
||||
if uncertain_inputs:
|
||||
sum_axis(psi2_gpu,psi2p_gpu,1,1)
|
||||
else:
|
||||
cublas.cublasDgemm(self.cublas_handle, 'T', 'N', num_inducing, num_inducing, ndata, beta, psi1p_gpu.gpudata, ndata, psi1p_gpu.gpudata, ndata, 1.0, psi2_gpu.gpudata, num_inducing)
|
||||
|
||||
|
||||
psi0_full *= beta
|
||||
if uncertain_inputs:
|
||||
cublas.cublasDscal(self.cublas_handle, psi2_gpu.size, beta, psi2_gpu.gpudata, 1)
|
||||
|
||||
else:
|
||||
|
||||
else:
|
||||
psi2_full = np.zeros((num_inducing,num_inducing))
|
||||
psi1Y_full = np.zeros((output_dim,num_inducing)) # DxM
|
||||
psi0_full = 0.
|
||||
YRY_full = 0.
|
||||
|
||||
for n_start in xrange(0,num_data,self.batchsize):
|
||||
|
||||
for n_start in xrange(0,num_data,self.batchsize):
|
||||
n_end = min(self.batchsize+n_start, num_data)
|
||||
Y_slice = Y[n_start:n_end]
|
||||
X_slice = X[n_start:n_end]
|
||||
|
||||
|
||||
if het_noise:
|
||||
b = beta[n_start]
|
||||
YRY_full += np.inner(Y_slice, Y_slice)*b
|
||||
else:
|
||||
b = beta
|
||||
|
||||
|
||||
if uncertain_inputs:
|
||||
psi0 = kern.psi0(Z, X_slice)
|
||||
psi1 = kern.psi1(Z, X_slice)
|
||||
|
|
@ -193,50 +192,50 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
psi0 = kern.Kdiag(X_slice)
|
||||
psi1 = kern.K(X_slice, Z)
|
||||
psi2_full += np.dot(psi1.T,psi1)*b
|
||||
|
||||
|
||||
psi0_full += psi0.sum()*b
|
||||
psi1Y_full += np.dot(Y_slice.T,psi1)*b # DxM
|
||||
|
||||
psi1Y_full += np.dot(Y_slice.T,psi1)*b # DxM
|
||||
|
||||
if not het_noise:
|
||||
YRY_full = trYYT*beta
|
||||
psi1Y_gpu.set(psi1Y_full)
|
||||
psi2_gpu.set(psi2_full)
|
||||
|
||||
|
||||
return psi0_full, YRY_full
|
||||
|
||||
|
||||
def inference_likelihood(self, kern, X, Z, likelihood, Y):
|
||||
"""
|
||||
The first phase of inference:
|
||||
Compute: log-likelihood, dL_dKmm
|
||||
|
||||
|
||||
Cached intermediate results: Kmm, KmmInv,
|
||||
"""
|
||||
|
||||
|
||||
num_inducing, input_dim = Z.shape[0], Z.shape[1]
|
||||
num_data, output_dim = Y.shape
|
||||
|
||||
|
||||
#see whether we've got a different noise variance for each datum
|
||||
beta = 1./np.fmax(likelihood.variance, 1e-6)
|
||||
het_noise = beta.size > 1
|
||||
if het_noise:
|
||||
self.batchsize=0
|
||||
|
||||
|
||||
self._initGPUCache(kern, num_inducing, input_dim, output_dim, Y)
|
||||
|
||||
if isinstance(X, VariationalPosterior):
|
||||
uncertain_inputs = True
|
||||
else:
|
||||
uncertain_inputs = False
|
||||
|
||||
|
||||
psi1Y_gpu = self.gpuCache['psi1Y_gpu']
|
||||
psi2_gpu = self.gpuCache['psi2_gpu']
|
||||
|
||||
|
||||
psi0_full, YRY_full = self.gatherPsiStat(kern, X, Z, Y, beta, uncertain_inputs, het_noise)
|
||||
|
||||
|
||||
#======================================================================
|
||||
# Compute Common Components
|
||||
#======================================================================
|
||||
|
||||
|
||||
Kmm = kern.K(Z).copy()
|
||||
Kmm_gpu = self.gpuCache['Kmm_gpu']
|
||||
Kmm_gpu.set(np.asfortranarray(Kmm))
|
||||
|
|
@ -244,14 +243,14 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
ones_gpu = self.gpuCache['ones_gpu']
|
||||
cublas.cublasDaxpy(self.cublas_handle, num_inducing, self.const_jitter, ones_gpu.gpudata, 1, Kmm_gpu.gpudata, num_inducing+1)
|
||||
# assert np.allclose(Kmm, Kmm_gpu.get())
|
||||
|
||||
|
||||
# Lm = jitchol(Kmm)
|
||||
#
|
||||
Lm_gpu = self.gpuCache['Lm_gpu']
|
||||
cublas.cublasDcopy(self.cublas_handle, Kmm_gpu.size, Kmm_gpu.gpudata, 1, Lm_gpu.gpudata, 1)
|
||||
culinalg.cho_factor(Lm_gpu,'L')
|
||||
# print np.abs(np.tril(Lm)-np.tril(Lm_gpu.get())).max()
|
||||
|
||||
|
||||
# Lambda = Kmm+psi2_full
|
||||
# LL = jitchol(Lambda)
|
||||
#
|
||||
|
|
@ -261,7 +260,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
LL_gpu = Lambda_gpu
|
||||
culinalg.cho_factor(LL_gpu,'L')
|
||||
# print np.abs(np.tril(LL)-np.tril(LL_gpu.get())).max()
|
||||
|
||||
|
||||
# b,_ = dtrtrs(LL, psi1Y_full)
|
||||
# bbt_cpu = np.square(b).sum()
|
||||
#
|
||||
|
|
@ -270,7 +269,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
cublas.cublasDtrsm(self.cublas_handle , 'L', 'L', 'N', 'N', num_inducing, output_dim, np.float64(1.0), LL_gpu.gpudata, num_inducing, b_gpu.gpudata, num_inducing)
|
||||
bbt = cublas.cublasDdot(self.cublas_handle, b_gpu.size, b_gpu.gpudata, 1, b_gpu.gpudata, 1)
|
||||
# print np.abs(bbt-bbt_cpu)
|
||||
|
||||
|
||||
# v,_ = dtrtrs(LL.T,b,lower=False)
|
||||
# vvt = np.einsum('md,od->mo',v,v)
|
||||
# LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right')
|
||||
|
|
@ -288,7 +287,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
tr_LmInvPsi2LmInvT = float(strideSum(LmInvPsi2LmInvT_gpu, num_inducing+1).get())
|
||||
# print np.abs(vvt-vvt_gpu.get()).max()
|
||||
# print np.abs(np.trace(LmInvPsi2LmInvT)-tr_LmInvPsi2LmInvT)
|
||||
|
||||
|
||||
# Psi2LLInvT = dtrtrs(LL,psi2_full)[0].T
|
||||
# LmInvPsi2LLInvT= dtrtrs(Lm,Psi2LLInvT)[0]
|
||||
# KmmInvPsi2LLInvT = dtrtrs(Lm,LmInvPsi2LLInvT,trans=True)[0]
|
||||
|
|
@ -303,7 +302,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
cublas.cublasDcopy(self.cublas_handle, KmmInvPsi2LLInvT_gpu.size, KmmInvPsi2LLInvT_gpu.gpudata, 1, KmmInvPsi2P_gpu.gpudata, 1)
|
||||
cublas.cublasDtrsm(self.cublas_handle , 'r', 'L', 'N', 'N', num_inducing, num_inducing, np.float64(1.0), LL_gpu.gpudata, num_inducing, KmmInvPsi2P_gpu.gpudata, num_inducing)
|
||||
# print np.abs(KmmInvPsi2P-KmmInvPsi2P_gpu.get()).max()
|
||||
|
||||
|
||||
# dL_dpsi2R = (output_dim*KmmInvPsi2P - vvt)/2. # dL_dpsi2 with R inside psi2
|
||||
#
|
||||
dL_dpsi2R_gpu = self.gpuCache['dL_dpsi2R_gpu']
|
||||
|
|
@ -311,7 +310,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
cublas.cublasDaxpy(self.cublas_handle, KmmInvPsi2P_gpu.size, np.float64(-output_dim), KmmInvPsi2P_gpu.gpudata, 1, dL_dpsi2R_gpu.gpudata, 1)
|
||||
cublas.cublasDscal(self.cublas_handle, dL_dpsi2R_gpu.size, np.float64(-0.5), dL_dpsi2R_gpu.gpudata, 1)
|
||||
# print np.abs(dL_dpsi2R_gpu.get()-dL_dpsi2R).max()
|
||||
|
||||
|
||||
#======================================================================
|
||||
# Compute log-likelihood
|
||||
#======================================================================
|
||||
|
|
@ -320,7 +319,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
else:
|
||||
logL_R = -num_data*np.log(beta)
|
||||
# logL_old = -(output_dim*(num_data*log_2_pi+logL_R+psi0_full-np.trace(LmInvPsi2LmInvT))+YRY_full-bbt)/2.-output_dim*(-np.log(np.diag(Lm)).sum()+np.log(np.diag(LL)).sum())
|
||||
|
||||
|
||||
logdetKmm = float(logDiagSum(Lm_gpu,num_inducing+1).get())
|
||||
logdetLambda = float(logDiagSum(LL_gpu,num_inducing+1).get())
|
||||
logL = -(output_dim*(num_data*log_2_pi+logL_R+psi0_full-tr_LmInvPsi2LmInvT)+YRY_full-bbt)/2.+output_dim*(logdetKmm-logdetLambda)
|
||||
|
|
@ -329,7 +328,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
#======================================================================
|
||||
# Compute dL_dKmm
|
||||
#======================================================================
|
||||
|
||||
|
||||
# dL_dKmm = -(output_dim*np.einsum('md,od->mo',KmmInvPsi2LLInvT,KmmInvPsi2LLInvT) + vvt)/2.
|
||||
#
|
||||
dL_dKmm_gpu = self.gpuCache['dL_dKmm_gpu']
|
||||
|
|
@ -341,24 +340,24 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
#======================================================================
|
||||
# Compute the Posterior distribution of inducing points p(u|Y)
|
||||
#======================================================================
|
||||
|
||||
|
||||
post = Posterior(woodbury_inv=KmmInvPsi2P_gpu.get(), woodbury_vector=v_gpu.get(), K=Kmm_gpu.get(), mean=None, cov=None, K_chol=Lm_gpu.get())
|
||||
|
||||
#======================================================================
|
||||
# Compute dL_dthetaL for uncertian input and non-heter noise
|
||||
#======================================================================
|
||||
|
||||
#======================================================================
|
||||
|
||||
if not het_noise:
|
||||
dL_dthetaL = (YRY_full + output_dim*psi0_full - num_data*output_dim)/-2.
|
||||
dL_dthetaL += cublas.cublasDdot(self.cublas_handle,dL_dpsi2R_gpu.size, dL_dpsi2R_gpu.gpudata,1,psi2_gpu.gpudata,1)
|
||||
dL_dthetaL += cublas.cublasDdot(self.cublas_handle,v_gpu.size, v_gpu.gpudata,1,psi1Y_gpu.gpudata,1)
|
||||
self.midRes['dL_dthetaL'] = -beta*dL_dthetaL
|
||||
|
||||
|
||||
return logL, dL_dKmm_gpu.get(), post
|
||||
|
||||
def inference_minibatch(self, kern, X, Z, likelihood, Y):
|
||||
"""
|
||||
The second phase of inference: Computing the derivatives over a minibatch of Y
|
||||
The second phase of inference: Computing the derivatives over a minibatch of Y
|
||||
Compute: dL_dpsi0, dL_dpsi1, dL_dpsi2, dL_dthetaL
|
||||
return a flag showing whether it reached the end of Y (isEnd)
|
||||
"""
|
||||
|
|
@ -370,10 +369,10 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
uncertain_inputs = True
|
||||
else:
|
||||
uncertain_inputs = False
|
||||
|
||||
|
||||
beta = 1./np.fmax(likelihood.variance, 1e-6)
|
||||
het_noise = beta.size > 1
|
||||
|
||||
|
||||
n_start = self.batch_pos
|
||||
n_end = min(self.batchsize+n_start, num_data)
|
||||
if n_end==num_data:
|
||||
|
|
@ -382,12 +381,12 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
else:
|
||||
isEnd = False
|
||||
self.batch_pos = n_end
|
||||
|
||||
|
||||
nSlice = n_end-n_start
|
||||
X_slice = X[n_start:n_end]
|
||||
if het_noise:
|
||||
beta = beta[n_start] # nSlice==1
|
||||
|
||||
|
||||
if kern.useGPU:
|
||||
if not uncertain_inputs:
|
||||
psi0p_gpu = kern.Kdiag(X_slice)
|
||||
|
|
@ -416,28 +415,28 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
psi1p_gpu.set(np.asfortranarray(psi1))
|
||||
if uncertain_inputs:
|
||||
psi2p_gpu.set(np.asfortranarray(psi2))
|
||||
|
||||
|
||||
#======================================================================
|
||||
# Compute dL_dpsi
|
||||
#======================================================================
|
||||
|
||||
dL_dpsi2R_gpu = self.gpuCache['dL_dpsi2R_gpu']
|
||||
v_gpu = self.gpuCache['v_gpu']
|
||||
v_gpu = self.gpuCache['v_gpu']
|
||||
dL_dpsi0_gpu = self.gpuCache['dL_dpsi0_gpu']
|
||||
dL_dpsi1_gpu = self.gpuCache['dL_dpsi1_gpu']
|
||||
dL_dpsi2_gpu = self.gpuCache['dL_dpsi2_gpu']
|
||||
betaYT_gpu = self.gpuCache['betaYT_gpu']
|
||||
betaYT_gpu_slice = betaYT_gpu[:,n_start:n_end]
|
||||
|
||||
|
||||
# Adjust to the batch size
|
||||
if dL_dpsi0_gpu.shape[0] > nSlice:
|
||||
dL_dpsi0_gpu = dL_dpsi0_gpu.ravel()[:nSlice]
|
||||
dL_dpsi1_gpu = dL_dpsi1_gpu.ravel()[:nSlice*num_inducing].reshape(nSlice,num_inducing)
|
||||
|
||||
|
||||
dL_dpsi0_gpu.fill(-output_dim *beta/2.)
|
||||
|
||||
|
||||
cublas.cublasDgemm(self.cublas_handle, 'T', 'T', nSlice, num_inducing, output_dim, 1.0, betaYT_gpu_slice.gpudata, output_dim, v_gpu.gpudata, num_inducing, 0., dL_dpsi1_gpu.gpudata, nSlice)
|
||||
|
||||
|
||||
if uncertain_inputs:
|
||||
cublas.cublasDcopy(self.cublas_handle, dL_dpsi2R_gpu.size, dL_dpsi2R_gpu.gpudata, 1, dL_dpsi2_gpu.gpudata, 1)
|
||||
cublas.cublasDscal(self.cublas_handle, dL_dpsi2_gpu.size, beta, dL_dpsi2_gpu.gpudata, 1)
|
||||
|
|
@ -458,7 +457,7 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
dL_dpsi1 = dL_dpsi1_gpu
|
||||
else:
|
||||
dL_dpsi0 = dL_dpsi0_gpu.get()
|
||||
dL_dpsi1 = dL_dpsi1_gpu.get()
|
||||
dL_dpsi1 = dL_dpsi1_gpu.get()
|
||||
if uncertain_inputs:
|
||||
if kern.useGPU:
|
||||
dL_dpsi2 = dL_dpsi2_gpu
|
||||
|
|
@ -480,4 +479,4 @@ class VarDTC_GPU(LatentFunctionInference):
|
|||
'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
return isEnd, (n_start,n_end), grad_dict
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri,pdi
|
|||
from ...util import diag
|
||||
from ...core.parameterization.variational import VariationalPosterior
|
||||
import numpy as np
|
||||
from ...util.misc import param_to_array
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
|
|
@ -27,26 +26,26 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
"""
|
||||
const_jitter = 1e-6
|
||||
def __init__(self, batchsize=None, limit=1, mpi_comm=None):
|
||||
|
||||
|
||||
self.batchsize = batchsize
|
||||
self.mpi_comm = mpi_comm
|
||||
self.limit = limit
|
||||
|
||||
|
||||
# Cache functions
|
||||
from ...util.caching import Cacher
|
||||
self.get_trYYT = Cacher(self._get_trYYT, limit)
|
||||
self.get_YYTfactor = Cacher(self._get_YYTfactor, limit)
|
||||
|
||||
|
||||
self.midRes = {}
|
||||
self.batch_pos = 0 # the starting position of the current mini-batch
|
||||
self.Y_speedup = False # Replace Y with the cholesky factor of YY.T, but the posterior inference will be wrong
|
||||
|
||||
|
||||
def __getstate__(self):
|
||||
# has to be overridden, as Cacher objects cannot be pickled.
|
||||
# has to be overridden, as Cacher objects cannot be pickled.
|
||||
return self.batchsize, self.limit, self.Y_speedup
|
||||
|
||||
def __setstate__(self, state):
|
||||
# has to be overridden, as Cacher objects cannot be pickled.
|
||||
# has to be overridden, as Cacher objects cannot be pickled.
|
||||
self.batchsize, self.limit, self.Y_speedup = state
|
||||
self.mpi_comm = None
|
||||
self.midRes = {}
|
||||
|
|
@ -58,9 +57,9 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
def set_limit(self, limit):
|
||||
self.get_trYYT.limit = limit
|
||||
self.get_YYTfactor.limit = limit
|
||||
|
||||
|
||||
def _get_trYYT(self, Y):
|
||||
return param_to_array(np.sum(np.square(Y)))
|
||||
return np.sum(np.square(Y))
|
||||
|
||||
def _get_YYTfactor(self, Y):
|
||||
"""
|
||||
|
|
@ -70,19 +69,19 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
"""
|
||||
N, D = Y.shape
|
||||
if (N>=D):
|
||||
return param_to_array(Y)
|
||||
return Y.view(np.ndarray)
|
||||
else:
|
||||
return jitchol(tdot(Y))
|
||||
|
||||
|
||||
def gatherPsiStat(self, kern, X, Z, Y, beta, uncertain_inputs):
|
||||
|
||||
|
||||
het_noise = beta.size > 1
|
||||
|
||||
trYYT = self.get_trYYT(Y)
|
||||
if self.Y_speedup and not het_noise:
|
||||
Y = self.get_YYTfactor(Y)
|
||||
|
||||
num_inducing = Z.shape[0]
|
||||
|
||||
num_inducing = Z.shape[0]
|
||||
num_data, output_dim = Y.shape
|
||||
if self.batchsize == None:
|
||||
self.batchsize = num_data
|
||||
|
|
@ -91,8 +90,8 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
psi1Y_full = np.zeros((output_dim,num_inducing)) # DxM
|
||||
psi0_full = 0.
|
||||
YRY_full = 0.
|
||||
|
||||
for n_start in xrange(0,num_data,self.batchsize):
|
||||
|
||||
for n_start in xrange(0,num_data,self.batchsize):
|
||||
n_end = min(self.batchsize+n_start, num_data)
|
||||
if (n_end-n_start)==num_data:
|
||||
Y_slice = Y
|
||||
|
|
@ -100,13 +99,13 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
else:
|
||||
Y_slice = Y[n_start:n_end]
|
||||
X_slice = X[n_start:n_end]
|
||||
|
||||
|
||||
if het_noise:
|
||||
b = beta[n_start]
|
||||
YRY_full += np.inner(Y_slice, Y_slice)*b
|
||||
else:
|
||||
b = beta
|
||||
|
||||
|
||||
if uncertain_inputs:
|
||||
psi0 = kern.psi0(Z, X_slice)
|
||||
psi1 = kern.psi1(Z, X_slice)
|
||||
|
|
@ -115,13 +114,13 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
psi0 = kern.Kdiag(X_slice)
|
||||
psi1 = kern.K(X_slice, Z)
|
||||
psi2_full += np.dot(psi1.T,psi1)*b
|
||||
|
||||
|
||||
psi0_full += psi0.sum()*b
|
||||
psi1Y_full += np.dot(Y_slice.T,psi1)*b # DxM
|
||||
psi1Y_full += np.dot(Y_slice.T,psi1)*b # DxM
|
||||
|
||||
if not het_noise:
|
||||
YRY_full = trYYT*beta
|
||||
|
||||
|
||||
if self.mpi_comm != None:
|
||||
psi0_all = np.array(psi0_full)
|
||||
psi1Y_all = psi1Y_full.copy()
|
||||
|
|
@ -132,18 +131,18 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
self.mpi_comm.Allreduce([psi2_full, MPI.DOUBLE], [psi2_all, MPI.DOUBLE])
|
||||
self.mpi_comm.Allreduce([YRY_full, MPI.DOUBLE], [YRY_all, MPI.DOUBLE])
|
||||
return psi0_all, psi1Y_all, psi2_all, YRY_all
|
||||
|
||||
|
||||
return psi0_full, psi1Y_full, psi2_full, YRY_full
|
||||
|
||||
|
||||
def inference_likelihood(self, kern, X, Z, likelihood, Y):
|
||||
"""
|
||||
The first phase of inference:
|
||||
Compute: log-likelihood, dL_dKmm
|
||||
|
||||
|
||||
Cached intermediate results: Kmm, KmmInv,
|
||||
"""
|
||||
|
||||
num_data, output_dim = Y.shape
|
||||
|
||||
num_data, output_dim = Y.shape
|
||||
input_dim = Z.shape[0]
|
||||
if self.mpi_comm != None:
|
||||
num_data_all = np.array(num_data,dtype=np.int32)
|
||||
|
|
@ -154,7 +153,7 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
uncertain_inputs = True
|
||||
else:
|
||||
uncertain_inputs = False
|
||||
|
||||
|
||||
#see whether we've got a different noise variance for each datum
|
||||
beta = 1./np.fmax(likelihood.variance, 1e-6)
|
||||
het_noise = beta.size > 1
|
||||
|
|
@ -162,28 +161,28 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
self.batchsize = 1
|
||||
|
||||
psi0_full, psi1Y_full, psi2_full, YRY_full = self.gatherPsiStat(kern, X, Z, Y, beta, uncertain_inputs)
|
||||
|
||||
|
||||
#======================================================================
|
||||
# Compute Common Components
|
||||
#======================================================================
|
||||
|
||||
|
||||
Kmm = kern.K(Z).copy()
|
||||
diag.add(Kmm, self.const_jitter)
|
||||
KmmInv,Lm,LmInv,_ = pdinv(Kmm)
|
||||
|
||||
|
||||
LmInvPsi2LmInvT = LmInv.dot(psi2_full).dot(LmInv.T)
|
||||
Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT
|
||||
LInv,LL,LLInv,logdet_L = pdinv(Lambda)
|
||||
b = LLInv.dot(LmInv.dot(psi1Y_full.T))
|
||||
bbt = np.square(b).sum()
|
||||
v = LmInv.T.dot(LLInv.T.dot(b))
|
||||
|
||||
|
||||
dL_dpsi2R = LmInv.T.dot(-LLInv.T.dot(tdot(b)+output_dim*np.eye(input_dim)).dot(LLInv)+output_dim*np.eye(input_dim)).dot(LmInv)/2.
|
||||
|
||||
|
||||
# Cache intermediate results
|
||||
self.midRes['dL_dpsi2R'] = dL_dpsi2R
|
||||
self.midRes['v'] = v
|
||||
|
||||
|
||||
#======================================================================
|
||||
# Compute log-likelihood
|
||||
#======================================================================
|
||||
|
|
@ -196,22 +195,22 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
#======================================================================
|
||||
# Compute dL_dKmm
|
||||
#======================================================================
|
||||
|
||||
|
||||
dL_dKmm = dL_dpsi2R - output_dim*KmmInv.dot(psi2_full).dot(KmmInv)/2.
|
||||
|
||||
#======================================================================
|
||||
# Compute the Posterior distribution of inducing points p(u|Y)
|
||||
#======================================================================
|
||||
|
||||
|
||||
if not self.Y_speedup or het_noise:
|
||||
post = Posterior(woodbury_inv=LmInv.T.dot(np.eye(input_dim)-LInv).dot(LmInv), woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=Lm)
|
||||
else:
|
||||
post = None
|
||||
|
||||
|
||||
#======================================================================
|
||||
# Compute dL_dthetaL for uncertian input and non-heter noise
|
||||
#======================================================================
|
||||
|
||||
#======================================================================
|
||||
|
||||
if not het_noise:
|
||||
dL_dthetaL = (YRY_full*beta + beta*output_dim*psi0_full - num_data*output_dim*beta)/2. - beta*(dL_dpsi2R*psi2_full).sum() - beta*(v.T*psi1Y_full).sum()
|
||||
self.midRes['dL_dthetaL'] = dL_dthetaL
|
||||
|
|
@ -220,7 +219,7 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
|
||||
def inference_minibatch(self, kern, X, Z, likelihood, Y):
|
||||
"""
|
||||
The second phase of inference: Computing the derivatives over a minibatch of Y
|
||||
The second phase of inference: Computing the derivatives over a minibatch of Y
|
||||
Compute: dL_dpsi0, dL_dpsi1, dL_dpsi2, dL_dthetaL
|
||||
return a flag showing whether it reached the end of Y (isEnd)
|
||||
"""
|
||||
|
|
@ -231,7 +230,7 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
uncertain_inputs = True
|
||||
else:
|
||||
uncertain_inputs = False
|
||||
|
||||
|
||||
#see whether we've got a different noise variance for each datum
|
||||
beta = 1./np.fmax(likelihood.variance, 1e-6)
|
||||
het_noise = beta.size > 1
|
||||
|
|
@ -241,7 +240,7 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
YYT_factor = self.get_YYTfactor(Y)
|
||||
else:
|
||||
YYT_factor = Y
|
||||
|
||||
|
||||
n_start = self.batch_pos
|
||||
n_end = min(self.batchsize+n_start, num_data)
|
||||
if n_end==num_data:
|
||||
|
|
@ -250,10 +249,10 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
else:
|
||||
isEnd = False
|
||||
self.batch_pos = n_end
|
||||
|
||||
|
||||
Y_slice = YYT_factor[n_start:n_end]
|
||||
X_slice = X[n_start:n_end]
|
||||
|
||||
|
||||
if not uncertain_inputs:
|
||||
psi0 = kern.Kdiag(X_slice)
|
||||
psi1 = kern.K(X_slice, Z)
|
||||
|
|
@ -264,33 +263,33 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
psi1 = kern.psi1(Z, X_slice)
|
||||
psi2 = kern.psi2(Z, X_slice)
|
||||
betapsi1 = np.einsum('n,nm->nm',beta,psi1)
|
||||
|
||||
|
||||
if het_noise:
|
||||
beta = beta[n_start] # assuming batchsize==1
|
||||
|
||||
betaY = beta*Y_slice
|
||||
|
||||
|
||||
#======================================================================
|
||||
# Load Intermediate Results
|
||||
#======================================================================
|
||||
|
||||
|
||||
dL_dpsi2R = self.midRes['dL_dpsi2R']
|
||||
v = self.midRes['v']
|
||||
|
||||
|
||||
#======================================================================
|
||||
# Compute dL_dpsi
|
||||
#======================================================================
|
||||
|
||||
|
||||
dL_dpsi0 = -output_dim * (beta * np.ones((n_end-n_start,)))/2.
|
||||
|
||||
|
||||
dL_dpsi1 = np.dot(betaY,v.T)
|
||||
|
||||
|
||||
if uncertain_inputs:
|
||||
dL_dpsi2 = beta* dL_dpsi2R
|
||||
else:
|
||||
dL_dpsi1 += np.dot(betapsi1,dL_dpsi2R)*2.
|
||||
dL_dpsi2 = None
|
||||
|
||||
|
||||
#======================================================================
|
||||
# Compute dL_dthetaL
|
||||
#======================================================================
|
||||
|
|
@ -300,14 +299,14 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
psiR = np.einsum('mo,mo->',dL_dpsi2R,psi2)
|
||||
else:
|
||||
psiR = np.einsum('nm,no,mo->',psi1,psi1,dL_dpsi2R)
|
||||
|
||||
|
||||
dL_dthetaL = ((np.square(betaY)).sum(axis=-1) + np.square(beta)*(output_dim*psi0)-output_dim*beta)/2. - np.square(beta)*psiR- (betaY*np.dot(betapsi1,v)).sum(axis=-1)
|
||||
else:
|
||||
if isEnd:
|
||||
dL_dthetaL = self.midRes['dL_dthetaL']
|
||||
else:
|
||||
dL_dthetaL = 0.
|
||||
|
||||
|
||||
if uncertain_inputs:
|
||||
grad_dict = {'dL_dpsi0':dL_dpsi0,
|
||||
'dL_dpsi1':dL_dpsi1,
|
||||
|
|
@ -317,7 +316,7 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
grad_dict = {'dL_dKdiag':dL_dpsi0,
|
||||
'dL_dKnm':dL_dpsi1,
|
||||
'dL_dthetaL':dL_dthetaL}
|
||||
|
||||
|
||||
return isEnd, (n_start,n_end), grad_dict
|
||||
|
||||
|
||||
|
|
@ -330,18 +329,18 @@ def update_gradients(model, mpi_comm=None):
|
|||
X = model.X[model.N_range[0]:model.N_range[1]]
|
||||
|
||||
model._log_marginal_likelihood, dL_dKmm, model.posterior = model.inference_method.inference_likelihood(model.kern, X, model.Z, model.likelihood, Y)
|
||||
|
||||
|
||||
het_noise = model.likelihood.variance.size > 1
|
||||
|
||||
|
||||
if het_noise:
|
||||
dL_dthetaL = np.empty((model.Y.shape[0],))
|
||||
else:
|
||||
dL_dthetaL = np.float64(0.)
|
||||
|
||||
|
||||
kern_grad = model.kern.gradient.copy()
|
||||
kern_grad[:] = 0.
|
||||
model.Z.gradient = 0.
|
||||
|
||||
|
||||
isEnd = False
|
||||
while not isEnd:
|
||||
isEnd, n_range, grad_dict = model.inference_method.inference_minibatch(model.kern, X, model.Z, model.likelihood, Y)
|
||||
|
|
@ -352,24 +351,24 @@ def update_gradients(model, mpi_comm=None):
|
|||
X_slice = model.X[n_range[0]:n_range[1]]
|
||||
else:
|
||||
X_slice = model.X[model.N_range[0]+n_range[0]:model.N_range[0]+n_range[1]]
|
||||
|
||||
|
||||
#gradients w.r.t. kernel
|
||||
model.kern.update_gradients_expectations(variational_posterior=X_slice, Z=model.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2'])
|
||||
kern_grad += model.kern.gradient
|
||||
|
||||
|
||||
#gradients w.r.t. Z
|
||||
model.Z.gradient += model.kern.gradients_Z_expectations(
|
||||
dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2'], Z=model.Z, variational_posterior=X_slice)
|
||||
|
||||
|
||||
#gradients w.r.t. posterior parameters of X
|
||||
X_grad = model.kern.gradients_qX_expectations(variational_posterior=X_slice, Z=model.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2'])
|
||||
model.set_X_gradients(X_slice, X_grad)
|
||||
|
||||
|
||||
if het_noise:
|
||||
dL_dthetaL[n_range[0]:n_range[1]] = grad_dict['dL_dthetaL']
|
||||
else:
|
||||
dL_dthetaL += grad_dict['dL_dthetaL']
|
||||
|
||||
|
||||
# Gather the gradients from multiple MPI nodes
|
||||
if mpi_comm != None:
|
||||
if het_noise:
|
||||
|
|
@ -380,14 +379,14 @@ def update_gradients(model, mpi_comm=None):
|
|||
mpi_comm.Allreduce([model.Z.gradient, MPI.DOUBLE], [Z_grad_all, MPI.DOUBLE])
|
||||
kern_grad = kern_grad_all
|
||||
model.Z.gradient = Z_grad_all
|
||||
|
||||
|
||||
#gradients w.r.t. kernel
|
||||
model.kern.update_gradients_full(dL_dKmm, model.Z, None)
|
||||
model.kern.gradient += kern_grad
|
||||
|
||||
#gradients w.r.t. Z
|
||||
model.Z.gradient += model.kern.gradients_X(dL_dKmm, model.Z)
|
||||
|
||||
|
||||
# Update Log-likelihood
|
||||
KL_div = model.variational_prior.KL_divergence(X)
|
||||
# update for the KL divergence
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue