[GPU] psicommputation

This commit is contained in:
Zhenwen Dai 2014-03-27 17:12:17 +00:00
parent bc59cb8b22
commit 4465c5be8d
5 changed files with 153 additions and 19 deletions

View file

@ -16,7 +16,8 @@ class Kern(Parameterized):
__metaclass__ = KernCallsViaSlicerMeta __metaclass__ = KernCallsViaSlicerMeta
#=========================================================================== #===========================================================================
_debug=False _debug=False
def __init__(self, input_dim, active_dims, name, *a, **kw): _support_GPU=False
def __init__(self, input_dim, active_dims, name, useGPU=False,*a, **kw):
""" """
The base class for a kernel: a positive definite function The base class for a kernel: a positive definite function
which forms of a covariance function (kernel). which forms of a covariance function (kernel).
@ -41,6 +42,8 @@ class Kern(Parameterized):
assert active_dim_size == self.input_dim, "input_dim={} does not match len(active_dim)={}, active_dims={}".format(self.input_dim, active_dim_size, self.active_dims) assert active_dim_size == self.input_dim, "input_dim={} does not match len(active_dim)={}, active_dims={}".format(self.input_dim, active_dim_size, self.active_dims)
self._sliced_X = 0 self._sliced_X = 0
self.useGPU = self._support_GPU and useGPU
@Cache_this(limit=10) @Cache_this(limit=10)
def _slice_X(self, X): def _slice_X(self, X):
return X[:, self.active_dims] return X[:, self.active_dims]

View file

@ -15,6 +15,7 @@ try:
import pycuda.autoinit import pycuda.autoinit
from pycuda.reduction import ReductionKernel from pycuda.reduction import ReductionKernel
from pycuda.elementwise import ElementwiseKernel from pycuda.elementwise import ElementwiseKernel
from ....util import linalg_gpu
# The kernel form computing psi1 # The kernel form computing psi1
comp_psi1 = ElementwiseKernel( comp_psi1 = ElementwiseKernel(
@ -45,15 +46,15 @@ try:
# The kernel form computing psi1 het_noise # The kernel form computing psi1 het_noise
comp_psi1_het = ElementwiseKernel( comp_psi1_het = ElementwiseKernel(
"double *psi1, double var, double *l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi1denom, int N, int M, int Q", "double *psi1, double var, double *l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi1denom, int N, int M, int Q",
"psi1[i] = comp_psi1_element(var,l, Z, mu, S, logGamma, log1Gamma, logpsi1denom, N, M, Q, i)", "psi1[i] = comp_psi1_element_het(var,l, Z, mu, S, logGamma, log1Gamma, logpsi1denom, N, M, Q, i)",
"comp_psi1", "comp_psi1_het",
preamble=""" preamble="""
#define IDX_NMQ(n,m,q) ((q*M+m)*N+n) #define IDX_NMQ(n,m,q) ((q*M+m)*N+n)
#define IDX_NQ(n,q) (q*N+n) #define IDX_NQ(n,q) (q*N+n)
#define IDX_MQ(m,q) (q*M+m) #define IDX_MQ(m,q) (q*M+m)
#define LOGEXPSUM(a,b) (a>=b?a+log(1.0+exp(b-a)):b+log(1.0+exp(a-b))) #define LOGEXPSUM(a,b) (a>=b?a+log(1.0+exp(b-a)):b+log(1.0+exp(a-b)))
__device__ double comp_psi1_element(double var, double *l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi1denom, int N, int M, int Q, int idx) __device__ double comp_psi1_element_het(double var, double *l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi1denom, int N, int M, int Q, int idx)
{ {
int n = idx%N; int n = idx%N;
int m = idx/N; int m = idx/N;
@ -71,15 +72,15 @@ try:
# The kernel form computing psi2 het_noise # The kernel form computing psi2 het_noise
comp_psi2_het = ElementwiseKernel( comp_psi2_het = ElementwiseKernel(
"double *psi2, double var, double *l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi2denom, int N, int M, int Q", "double *psi2, double var, double *l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi2denom, int N, int M, int Q",
"psi2[i] = comp_psi1_element(var,l, Z, mu, S, logGamma, log1Gamma, logpsi2denom, N, M, Q, i)", "psi2[i] = comp_psi2_element_het(var,l, Z, mu, S, logGamma, log1Gamma, logpsi2denom, N, M, Q, i)",
"comp_psi2", "comp_psi2_het",
preamble=""" preamble="""
#define IDX_NMQ(n,m,q) ((q*M+m)*N+n) #define IDX_NMQ(n,m,q) ((q*M+m)*N+n)
#define IDX_NQ(n,q) (q*N+n) #define IDX_NQ(n,q) (q*N+n)
#define IDX_MQ(m,q) (q*M+m) #define IDX_MQ(m,q) (q*M+m)
#define LOGEXPSUM(a,b) (a>=b?a+log(1.0+exp(b-a)):b+log(1.0+exp(a-b))) #define LOGEXPSUM(a,b) (a>=b?a+log(1.0+exp(b-a)):b+log(1.0+exp(a-b)))
__device__ double comp_psi1_element(double var, double *l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi2denom, int N, int M, int Q, int idx) __device__ double comp_psi2_element_het(double var, double *l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi2denom, int N, int M, int Q, int idx)
{ {
// psi2 (n,m1,m2) // psi2 (n,m1,m2)
int m2 = idx/(M*N); int m2 = idx/(M*N);
@ -90,7 +91,7 @@ try:
for(int q=0;q<Q;q++){ for(int q=0;q<Q;q++){
double dZ = Z[IDX_MQ(m1,q)]-Z[IDX_MQ(m2,q)]; double dZ = Z[IDX_MQ(m1,q)]-Z[IDX_MQ(m2,q)];
double muZ = mu[IDX_NQ(n,q)] - (Z[IDX_MQ(m1,q)]+Z[IDX_MQ(m2,q)])/2.0; double muZ = mu[IDX_NQ(n,q)] - (Z[IDX_MQ(m1,q)]+Z[IDX_MQ(m2,q)])/2.0;
double exp1 = logGamma[IDX_NQ(n,q)] - (logpsi2denom[IDX_NQ(n,q)])/2.0 - dZ*dZ/(l[q]*4.0) - muZ*muZ/(2*mu[IDX_NQ(n,q)]+l[q]); double exp1 = logGamma[IDX_NQ(n,q)] - (logpsi2denom[IDX_NQ(n,q)])/2.0 - dZ*dZ/(l[q]*4.0) - muZ*muZ/(2*S[IDX_NQ(n,q)]+l[q]);
double exp2 = log1Gamma[IDX_NQ(n,q)] - (Z[IDX_MQ(m1,q)]*Z[IDX_MQ(m1,q)]+Z[IDX_MQ(m2,q)]*Z[IDX_MQ(m2,q)])/(l[q]*2.0); double exp2 = log1Gamma[IDX_NQ(n,q)] - (Z[IDX_MQ(m1,q)]*Z[IDX_MQ(m1,q)]+Z[IDX_MQ(m2,q)]*Z[IDX_MQ(m2,q)])/(l[q]*2.0);
psi2_exp += LOGEXPSUM(exp1,exp2); psi2_exp += LOGEXPSUM(exp1,exp2);
} }
@ -101,7 +102,7 @@ try:
# The kernel form computing psi2 # The kernel form computing psi2
comp_psi2 = ElementwiseKernel( comp_psi2 = ElementwiseKernel(
"double *psi2, double var, double l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi2denom, int N, int M, int Q", "double *psi2, double var, double l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi2denom, int N, int M, int Q",
"psi2[i] = comp_psi1_element(var,l, Z, mu, S, logGamma, log1Gamma, logpsi2denom, N, M, Q, i)", "psi2[i] = comp_psi2_element(var,l, Z, mu, S, logGamma, log1Gamma, logpsi2denom, N, M, Q, i)",
"comp_psi2", "comp_psi2",
preamble=""" preamble="""
#define IDX_NMQ(n,m,q) ((q*M+m)*N+n) #define IDX_NMQ(n,m,q) ((q*M+m)*N+n)
@ -109,7 +110,7 @@ try:
#define IDX_MQ(m,q) (q*M+m) #define IDX_MQ(m,q) (q*M+m)
#define LOGEXPSUM(a,b) (a>=b?a+log(1.0+exp(b-a)):b+log(1.0+exp(a-b))) #define LOGEXPSUM(a,b) (a>=b?a+log(1.0+exp(b-a)):b+log(1.0+exp(a-b)))
__device__ double comp_psi1_element(double var, double l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi2denom, int N, int M, int Q, int idx) __device__ double comp_psi2_element(double var, double l, double *Z, double *mu, double *S, double *logGamma, double *log1Gamma, double *logpsi2denom, int N, int M, int Q, int idx)
{ {
// psi2 (n,m1,m2) // psi2 (n,m1,m2)
int m2 = idx/(M*N); int m2 = idx/(M*N);
@ -120,19 +121,117 @@ try:
for(int q=0;q<Q;q++){ for(int q=0;q<Q;q++){
double dZ = Z[IDX_MQ(m1,q)]-Z[IDX_MQ(m2,q)]; double dZ = Z[IDX_MQ(m1,q)]-Z[IDX_MQ(m2,q)];
double muZ = mu[IDX_NQ(n,q)] - (Z[IDX_MQ(m1,q)]+Z[IDX_MQ(m2,q)])/2.0; double muZ = mu[IDX_NQ(n,q)] - (Z[IDX_MQ(m1,q)]+Z[IDX_MQ(m2,q)])/2.0;
double exp1 = logGamma[IDX_NQ(n,q)] - (logpsi2denom[IDX_NQ(n,q)])/2.0 - dZ*dZ/(l*4.0) - muZ*muZ/(2*mu[IDX_NQ(n,q)]+l); double exp1 = logGamma[IDX_NQ(n,q)] - (logpsi2denom[IDX_NQ(n,q)])/2.0 - dZ*dZ/(l*4.0) - muZ*muZ/(2*S[IDX_NQ(n,q)]+l);
double exp2 = log1Gamma[IDX_NQ(n,q)] - (Z[IDX_MQ(m1,q)]*Z[IDX_MQ(m1,q)]+Z[IDX_MQ(m2,q)]*Z[IDX_MQ(m2,q)])/(l*2.0); double exp2 = log1Gamma[IDX_NQ(n,q)] - (Z[IDX_MQ(m1,q)]*Z[IDX_MQ(m1,q)]+Z[IDX_MQ(m2,q)]*Z[IDX_MQ(m2,q)])/(l*2.0);
psi2_exp += LOGEXPSUM(exp1,exp2); psi2_exp += LOGEXPSUM(exp1,exp2);
} }
return var*var*exp(psi2_exp); return var*var*exp(psi2_exp);
} }
""") """)
# compute psidenom
comp_logpsidenom_het = ElementwiseKernel(
"double *out, double *S, double *l, double scale",
"out[i] = comp_logpsidenom_het_element(S, l, scale, i)",
"comp_logpsidenom_het",
preamble="""
__device__ double comp_logpsidenom_het_element(double *S, double *l, double scale, int idx)
{
int q = idx/N;
int n = idx%N;
return scale*S[idx]/l[q]+1.0;
}
""")
# compute psidenom
comp_logpsidenom = ElementwiseKernel(
"double *out, double *S, double l, double scale",
"out[i] = comp_logpsidenom_element(S, l, scale, i)",
"comp_logpsidenom",
preamble="""
__device__ double comp_logpsidenom_element(double *S, double l, double scale, int idx)
{
int q = idx/N;
int n = idx%N;
return scale*S[idx]/l+1.0;
}
""")
except: except:
pass pass
class PSICOMP_SSRBF(object): class PSICOMP_SSRBF(object):
def __init__(self): def __init__(self):
pass self.gpuCache = None
def _initGPUCache(self, N, M, Q):
if self.gpuCache == None:
self.gpuCache = {
'l_gpu' :gpuarray.empty((Q,),np.float64,order='F'),
'Z_gpu' :gpuarray.empty((M,Q),np.float64,order='F'),
'mu_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'S_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'gamma_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'logGamma_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'log1Gamma_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'logpsidenom_gpu' :gpuarray.empty((N,Q),np.float64,order='F'),
'psi0_gpu' :gpuarray.empty((N,),np.float64,order='F'),
'psi1_gpu' :gpuarray.empty((N,M),np.float64,order='F'),
'psi2_gpu' :gpuarray.empty((N,M,M),np.float64,order='F'),
}
def psicomputations(self, variance, lengthscale, Z, mu, S, gamma):
if isinstance(lengthscale, np.ndarray) and len(lengthscale)>1:
het_noise = True
else:
het_noise = False
N = mu.shape[0]
M = Z.shape[0]
Q = mu.shape[1]
self._initGPUCache(N,M,Q)
if het_noise:
l_gpu = self.gpuCache['l_gpu']
l_gpu.set(np.asfortranarray(lengthscale**2))
else:
lengthscale2 = lengthscale**2
Z_gpu = self.gpuCache['Z_gpu']
mu_gpu = self.gpuCache['mu_gpu']
S_gpu = self.gpuCache['S_gpu']
gamma_gpu = self.gpuCache['gamma_gpu']
logGamma_gpu = self.gpuCache['logGamma_gpu']
log1Gamma_gpu = self.gpuCache['log1Gamma_gpu']
logpsidenom_gpu = self.gpuCache['logpsidenom_gpu']
psi0_gpu = self.gpuCache['psi0_gpu']
psi1_gpu = self.gpuCache['psi1_gpu']
psi2_gpu = self.gpuCache['psi2_gpu']
Z_gpu.set(np.asfortranarray(Z))
mu_gpu.set(np.asfortranarray(mu))
S_gpu.set(S)
gamma_gpu.set(gamma)
linalg_gpu.log(gamma_gpu,logGamma_gpu)
linalg_gpu.logOne(gamma_gpu,log1Gamma_gpu)
psi0_gpu.fill(variance)
if het_noise:
comp_logpsidenom_het(logpsidenom_gpu, S_gpu,l_gpu,1.0)
comp_psi1_het(psi1_gpu, variance, lengthscale2, Z_gpu, mu_gpu, S_gpu, logGamma_gpu, log1Gamma_gpu, logpsidenom_gpu, N, M, Q)
comp_logpsidenom_het(logpsidenom_gpu, S_gpu,l_gpu,2.0)
comp_psi2_het(psi2_gpu, variance, lengthscale2, Z_gpu, mu_gpu, S_gpu, logGamma_gpu, log1Gamma_gpu, logpsidenom_gpu, N, M, Q)
else:
comp_logpsidenom(logpsidenom_gpu, S_gpu,lengthscale2,1.0)
comp_psi1(psi1_gpu, variance, lengthscale2, Z_gpu, mu_gpu, S_gpu, logGamma_gpu, log1Gamma_gpu, logpsidenom_gpu, N, M, Q)
comp_logpsidenom(logpsidenom_gpu, S_gpu,lengthscale2,2.0)
comp_psi2(psi2_gpu, variance, lengthscale2, Z_gpu, mu_gpu, S_gpu, logGamma_gpu, log1Gamma_gpu, logpsidenom_gpu, N, M, Q)
return psi0_gpu.get(), psi1_gpu.get(), psi2_gpu.get()
@Cache_this(limit=1) @Cache_this(limit=1)
def _Z_distances(Z): def _Z_distances(Z):
@ -199,7 +298,7 @@ def _psi1computations(variance, lengthscale, Z, mu, S, gamma):
logGamma_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(gamma))) logGamma_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(gamma)))
log1Gamma_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(1.-gamma))) log1Gamma_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(1.-gamma)))
logpsi1denom_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(S/lengthscale2+1.))) logpsi1denom_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(S/lengthscale2+1.)))
psi1_gpu = gpuarray.empty((mu.shape[0],Z.shape[0]),np.float64) psi1_gpu = gpuarray.empty((mu.shape[0],Z.shape[0]),np.float64, order='F')
comp_psi1(psi1_gpu, variance, lengthscale2, Z_gpu, mu_gpu, S_gpu, logGamma_gpu, log1Gamma_gpu, logpsi1denom_gpu, N, M, Q) comp_psi1(psi1_gpu, variance, lengthscale2, Z_gpu, mu_gpu, S_gpu, logGamma_gpu, log1Gamma_gpu, logpsi1denom_gpu, N, M, Q)
@ -265,7 +364,7 @@ def _psi2computations(variance, lengthscale, Z, mu, S, gamma):
logGamma_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(gamma))) logGamma_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(gamma)))
log1Gamma_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(1.-gamma))) log1Gamma_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(1.-gamma)))
logpsi2denom_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(2.*S/lengthscale2+1.))) logpsi2denom_gpu = gpuarray.to_gpu(np.asfortranarray(np.log(2.*S/lengthscale2+1.)))
psi2_gpu = gpuarray.empty((mu.shape[0],Z.shape[0],Z.shape[0]),np.float64) psi2_gpu = gpuarray.empty((mu.shape[0],Z.shape[0],Z.shape[0]),np.float64, order='F')
comp_psi2(psi2_gpu, variance, lengthscale2, Z_gpu, mu_gpu, S_gpu, logGamma_gpu, log1Gamma_gpu, logpsi2denom_gpu, N, M, Q) comp_psi2(psi2_gpu, variance, lengthscale2, Z_gpu, mu_gpu, S_gpu, logGamma_gpu, log1Gamma_gpu, logpsi2denom_gpu, N, M, Q)

View file

@ -8,7 +8,8 @@ from ...util.misc import param_to_array
from stationary import Stationary from stationary import Stationary
from GPy.util.caching import Cache_this from GPy.util.caching import Cache_this
from ...core.parameterization import variational from ...core.parameterization import variational
from psi_comp import ssrbf_psi_gpucomp as ssrbf_psi_comp from psi_comp import ssrbf_psi_comp
from psi_comp.ssrbf_psi_gpucomp import PSICOMP_SSRBF
class RBF(Stationary): class RBF(Stationary):
""" """
@ -19,11 +20,16 @@ class RBF(Stationary):
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg)
""" """
_support_GPU = True
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf'): def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='rbf'):
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name) super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
self.weave_options = {} self.weave_options = {}
self.group_spike_prob = False self.group_spike_prob = False
if self.useGPU:
self.psicomp = PSICOMP_SSRBF()
def K_of_r(self, r): def K_of_r(self, r):
return self.variance * np.exp(-0.5 * r**2) return self.variance * np.exp(-0.5 * r**2)
@ -35,10 +41,17 @@ class RBF(Stationary):
#---------------------------------------# #---------------------------------------#
def psi0(self, Z, variational_posterior): def psi0(self, Z, variational_posterior):
if self.useGPU:
if isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob)[0]
else:
return self.Kdiag(variational_posterior.mean) return self.Kdiag(variational_posterior.mean)
def psi1(self, Z, variational_posterior): def psi1(self, Z, variational_posterior):
if isinstance(variational_posterior, variational.SpikeAndSlabPosterior): if isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
if self.useGPU:
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob)[1]
else:
psi1, _, _, _, _, _, _ = ssrbf_psi_comp._psi1computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob) psi1, _, _, _, _, _, _ = ssrbf_psi_comp._psi1computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob)
else: else:
_, _, _, psi1 = self._psi1computations(Z, variational_posterior) _, _, _, psi1 = self._psi1computations(Z, variational_posterior)
@ -46,6 +59,9 @@ class RBF(Stationary):
def psi2(self, Z, variational_posterior): def psi2(self, Z, variational_posterior):
if isinstance(variational_posterior, variational.SpikeAndSlabPosterior): if isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
if self.useGPU:
return self.psicomp.psicomputations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob)[2]
else:
psi2, _, _, _, _, _, _ = ssrbf_psi_comp._psi2computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob) psi2, _, _, _, _, _, _ = ssrbf_psi_comp._psi2computations(self.variance, self.lengthscale, Z, variational_posterior.mean, variational_posterior.variance, variational_posterior.binary_prob)
else: else:
_, _, _, _, psi2 = self._psi2computations(Z, variational_posterior) _, _, _, _, psi2 = self._psi2computations(Z, variational_posterior)

View file

@ -63,6 +63,8 @@ class SSGPLVM(SparseGP):
kernel.group_spike_prob = True kernel.group_spike_prob = True
self.variational_prior.group_spike_prob = True self.variational_prior.group_spike_prob = True
if isinstance(inference_method, VarDTC_GPU) and self.kern._support_GPU:
self.kern.useGPU = True
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, name, **kwargs) SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, name, **kwargs)
self.add_parameter(self.X, index=0) self.add_parameter(self.X, index=0)

View file

@ -10,6 +10,20 @@ import numpy as np
try: try:
import pycuda.autoinit import pycuda.autoinit
from pycuda.reduction import ReductionKernel from pycuda.reduction import ReductionKernel
from pycuda.elementwise import ElementwiseKernel
# log|A| for A is a low triangle matrix
# logDiagSum(A, A.shape[0]+1)
logDiagSum = ReductionKernel(np.float64, neutral="0", reduce_expr="a+b", map_expr="i%step==0?log(x[i]):0", arguments="double *x, int step") logDiagSum = ReductionKernel(np.float64, neutral="0", reduce_expr="a+b", map_expr="i%step==0?log(x[i]):0", arguments="double *x, int step")
#=======================================================================================
# Element-wise functions
#=======================================================================================
# log(X)
log = ElementwiseKernel("double *in, double *out", "out[i] = log(in[i])", "log_element")
# log(1.0-X)
logOne = ElementwiseKernel("double *in, double *out", "out[i] = log(1.-in[i])", "logOne_element")
except: except:
pass pass