mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-04 01:02:39 +02:00
fix: Fixed numpy 1.12 indexing and shape preservation
This commit is contained in:
parent
afe37dbfd8
commit
6cd13ac2b3
9 changed files with 26 additions and 25 deletions
|
|
@ -68,12 +68,12 @@ class GpGrid(GP):
|
||||||
for b in (B.T):
|
for b in (B.T):
|
||||||
x = b
|
x = b
|
||||||
N = 1
|
N = 1
|
||||||
G = np.zeros(D)
|
G = np.zeros(D, dtype=np.int_)
|
||||||
for d in range(D):
|
for d in range(D):
|
||||||
G[d] = len(A[d])
|
G[d] = len(A[d])
|
||||||
N = np.prod(G)
|
N = np.prod(G)
|
||||||
for d in range(D-1, -1, -1):
|
for d in range(D-1, -1, -1):
|
||||||
X = np.reshape(x, (G[d], np.round(N/G[d])), order='F')
|
X = np.reshape(x, (G[d], int(np.round(N/G[d]))), order='F')
|
||||||
Z = np.dot(A[d], X)
|
Z = np.dot(A[d], X)
|
||||||
Z = Z.T
|
Z = Z.T
|
||||||
x = np.reshape(Z, (-1, 1), order='F')
|
x = np.reshape(Z, (-1, 1), order='F')
|
||||||
|
|
|
||||||
|
|
@ -36,12 +36,12 @@ class GaussianGridInference(LatentFunctionInference):
|
||||||
x = b
|
x = b
|
||||||
N = 1
|
N = 1
|
||||||
D = len(A)
|
D = len(A)
|
||||||
G = np.zeros((D,1))
|
G = np.zeros((D), dtype=np.int_)
|
||||||
for d in range(0, D):
|
for d in range(0, D):
|
||||||
G[d] = len(A[d])
|
G[d] = len(A[d])
|
||||||
N = np.prod(G)
|
N = np.prod(G)
|
||||||
for d in range(D-1, -1, -1):
|
for d in range(D-1, -1, -1):
|
||||||
X = np.reshape(x, (G[d], np.round(N/G[d])), order='F')
|
X = np.reshape(x, (G[d], int(np.round(N/G[d]))), order='F')
|
||||||
Z = np.dot(A[d], X)
|
Z = np.dot(A[d], X)
|
||||||
Z = Z.T
|
Z = Z.T
|
||||||
x = np.reshape(Z, (-1, 1), order='F')
|
x = np.reshape(Z, (-1, 1), order='F')
|
||||||
|
|
|
||||||
|
|
@ -46,11 +46,11 @@ class Kern(Parameterized):
|
||||||
self.input_dim = int(input_dim)
|
self.input_dim = int(input_dim)
|
||||||
|
|
||||||
if active_dims is None:
|
if active_dims is None:
|
||||||
active_dims = np.arange(input_dim)
|
active_dims = np.arange(input_dim, dtype=np.int_)
|
||||||
|
|
||||||
self.active_dims = np.atleast_1d(np.asarray(active_dims, np.int_))
|
self.active_dims = np.atleast_1d(np.asarray(active_dims, np.int_))
|
||||||
|
|
||||||
self._all_dims_active = np.atleast_1d(self.active_dims).astype(int)
|
self._all_dims_active = np.atleast_1d(self.active_dims).astype(np.int_)
|
||||||
|
|
||||||
assert self.active_dims.size == self.input_dim, "input_dim={} does not match len(active_dim)={}".format(self.input_dim, self._all_dims_active.size)
|
assert self.active_dims.size == self.input_dim, "input_dim={} does not match len(active_dim)={}".format(self.input_dim, self._all_dims_active.size)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,6 @@ class StdPeriodic(Kern):
|
||||||
|
|
||||||
def __init__(self, input_dim, variance=1., period=None, lengthscale=None, ARD1=False, ARD2=False, active_dims=None, name='std_periodic',useGPU=False):
|
def __init__(self, input_dim, variance=1., period=None, lengthscale=None, ARD1=False, ARD2=False, active_dims=None, name='std_periodic',useGPU=False):
|
||||||
super(StdPeriodic, self).__init__(input_dim, active_dims, name, useGPU=useGPU)
|
super(StdPeriodic, self).__init__(input_dim, active_dims, name, useGPU=useGPU)
|
||||||
self.input_dim = input_dim
|
|
||||||
self.ARD1 = ARD1 # correspond to periods
|
self.ARD1 = ARD1 # correspond to periods
|
||||||
self.ARD2 = ARD2 # correspond to lengthscales
|
self.ARD2 = ARD2 # correspond to lengthscales
|
||||||
|
|
||||||
|
|
@ -66,7 +65,7 @@ class StdPeriodic(Kern):
|
||||||
period = np.asarray(period)
|
period = np.asarray(period)
|
||||||
assert period.size == 1, "Only one period needed for non-ARD kernel"
|
assert period.size == 1, "Only one period needed for non-ARD kernel"
|
||||||
else:
|
else:
|
||||||
period = np.ones(1.0)
|
period = np.ones(1)
|
||||||
else:
|
else:
|
||||||
if period is not None:
|
if period is not None:
|
||||||
period = np.asarray(period)
|
period = np.asarray(period)
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,8 @@ class Binomial(Likelihood):
|
||||||
:rtype: float
|
:rtype: float
|
||||||
"""
|
"""
|
||||||
N = Y_metadata['trials']
|
N = Y_metadata['trials']
|
||||||
assert N.shape == y.shape
|
np.testing.assert_array_equal(N.shape, y.shape)
|
||||||
|
|
||||||
nchoosey = special.gammaln(N+1) - special.gammaln(y+1) - special.gammaln(N-y+1)
|
nchoosey = special.gammaln(N+1) - special.gammaln(y+1) - special.gammaln(N-y+1)
|
||||||
return nchoosey + y*np.log(inv_link_f) + (N-y)*np.log(1.-inv_link_f)
|
return nchoosey + y*np.log(inv_link_f) + (N-y)*np.log(1.-inv_link_f)
|
||||||
|
|
||||||
|
|
@ -83,7 +84,8 @@ class Binomial(Likelihood):
|
||||||
:rtype: Nx1 array
|
:rtype: Nx1 array
|
||||||
"""
|
"""
|
||||||
N = Y_metadata['trials']
|
N = Y_metadata['trials']
|
||||||
assert N.shape == y.shape
|
np.testing.assert_array_equal(N.shape, y.shape)
|
||||||
|
|
||||||
return y/inv_link_f - (N-y)/(1.-inv_link_f)
|
return y/inv_link_f - (N-y)/(1.-inv_link_f)
|
||||||
|
|
||||||
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None):
|
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None):
|
||||||
|
|
@ -108,7 +110,7 @@ class Binomial(Likelihood):
|
||||||
(the distribution for y_i depends only on inverse link of f_i not on inverse link of f_(j!=i)
|
(the distribution for y_i depends only on inverse link of f_i not on inverse link of f_(j!=i)
|
||||||
"""
|
"""
|
||||||
N = Y_metadata['trials']
|
N = Y_metadata['trials']
|
||||||
assert N.shape == y.shape
|
np.testing.assert_array_equal(N.shape, y.shape)
|
||||||
return -y/np.square(inv_link_f) - (N-y)/np.square(1.-inv_link_f)
|
return -y/np.square(inv_link_f) - (N-y)/np.square(1.-inv_link_f)
|
||||||
|
|
||||||
def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None):
|
def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None):
|
||||||
|
|
@ -131,7 +133,8 @@ class Binomial(Likelihood):
|
||||||
(the distribution for y_i depends only on inverse link of f_i not on inverse link of f_(j!=i)
|
(the distribution for y_i depends only on inverse link of f_i not on inverse link of f_(j!=i)
|
||||||
"""
|
"""
|
||||||
N = Y_metadata['trials']
|
N = Y_metadata['trials']
|
||||||
assert N.shape == y.shape
|
np.testing.assert_array_equal(N.shape, y.shape)
|
||||||
|
|
||||||
inv_link_f2 = np.square(inv_link_f)
|
inv_link_f2 = np.square(inv_link_f)
|
||||||
return 2*y/inv_link_f**3 - 2*(N-y)/(1.-inv_link_f)**3
|
return 2*y/inv_link_f**3 - 2*(N-y)/(1.-inv_link_f)**3
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -293,13 +293,13 @@ class StateSpace(Model):
|
||||||
|
|
||||||
# Update step (only if there is data)
|
# Update step (only if there is data)
|
||||||
if not np.isnan(Y[:,k]):
|
if not np.isnan(Y[:,k]):
|
||||||
if Y.shape[0]==1:
|
if Y.shape[0]==1:
|
||||||
K = PF[:,:,k].dot(H.T)/(H.dot(PF[:,:,k]).dot(H.T) + R)
|
K = PF[:,:,k].dot(H.T)/(H.dot(PF[:,:,k]).dot(H.T) + R)
|
||||||
else:
|
else:
|
||||||
LL = linalg.cho_factor(H.dot(PF[:,:,k]).dot(H.T) + R)
|
LL = linalg.cho_factor(H.dot(PF[:,:,k]).dot(H.T) + R)
|
||||||
K = linalg.cho_solve(LL, H.dot(PF[:,:,k].T)).T
|
K = linalg.cho_solve(LL, H.dot(PF[:,:,k].T)).T
|
||||||
MF[:,k] += K.dot(Y[:,k]-H.dot(MF[:,k]))
|
MF[:,k] += K.dot(Y[:,k]-H.dot(MF[:,k]))
|
||||||
PF[:,:,k] -= K.dot(H).dot(PF[:,:,k])
|
PF[:,:,k] -= K.dot(H).dot(PF[:,:,k])
|
||||||
|
|
||||||
# Return values
|
# Return values
|
||||||
return (MF, PF)
|
return (MF, PF)
|
||||||
|
|
|
||||||
|
|
@ -215,7 +215,7 @@ class R_handling_Python(Measurement_Callables_Class):
|
||||||
inv_R_square_root(k)
|
inv_R_square_root(k)
|
||||||
"""
|
"""
|
||||||
self.R = R
|
self.R = R
|
||||||
self.index = index
|
self.index = np.asarray(index, np.int_)
|
||||||
self.R_time_var_index = int(R_time_var_index)
|
self.R_time_var_index = int(R_time_var_index)
|
||||||
self.dR = dR
|
self.dR = dR
|
||||||
|
|
||||||
|
|
@ -350,7 +350,7 @@ class Q_handling_Python(Dynamic_Callables_Class):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self.Q = Q
|
self.Q = Q
|
||||||
self.index = index
|
self.index = np.asarray(index, np.int_)
|
||||||
self.Q_time_var_index = Q_time_var_index
|
self.Q_time_var_index = Q_time_var_index
|
||||||
self.dQ = dQ
|
self.dQ = dQ
|
||||||
|
|
||||||
|
|
@ -427,7 +427,7 @@ class Std_Dynamic_Callables_Python(Q_handling_Class):
|
||||||
self).__init__(Q, index, Q_time_var_index, unique_Q_number, dQ)
|
self).__init__(Q, index, Q_time_var_index, unique_Q_number, dQ)
|
||||||
|
|
||||||
self.A = A
|
self.A = A
|
||||||
self.A_time_var_index = A_time_var_index
|
self.A_time_var_index = np.asarray(A_time_var_index, np.int_)
|
||||||
self.dA = dA
|
self.dA = dA
|
||||||
|
|
||||||
def f_a(self, k, m, A):
|
def f_a(self, k, m, A):
|
||||||
|
|
|
||||||
|
|
@ -119,7 +119,7 @@ class TestNoiseModels(object):
|
||||||
self.integer_Y = np.where(tmp > 0, tmp, 0)
|
self.integer_Y = np.where(tmp > 0, tmp, 0)
|
||||||
self.ns = np.random.poisson(50, size=self.N)[:, None]
|
self.ns = np.random.poisson(50, size=self.N)[:, None]
|
||||||
p = np.abs(np.cos(2*np.pi*self.X + np.random.normal(scale=.2, size=(self.N, self.D)))).mean(1)
|
p = np.abs(np.cos(2*np.pi*self.X + np.random.normal(scale=.2, size=(self.N, self.D)))).mean(1)
|
||||||
self.binomial_Y = np.array([np.random.binomial(self.ns[i], p[i]) for i in range(p.shape[0])])[:, None]
|
self.binomial_Y = np.array([np.random.binomial(int(self.ns[i]), p[i]) for i in range(p.shape[0])])[:, None]
|
||||||
|
|
||||||
self.var = 0.2
|
self.var = 0.2
|
||||||
self.deg_free = 4.0
|
self.deg_free = 4.0
|
||||||
|
|
@ -570,7 +570,6 @@ class TestNoiseModels(object):
|
||||||
white_var = 1e-4
|
white_var = 1e-4
|
||||||
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
|
kernel = GPy.kern.RBF(X.shape[1]) + GPy.kern.White(X.shape[1])
|
||||||
laplace_likelihood = GPy.inference.latent_function_inference.Laplace()
|
laplace_likelihood = GPy.inference.latent_function_inference.Laplace()
|
||||||
|
|
||||||
m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=laplace_likelihood)
|
m = GPy.core.GP(X.copy(), Y.copy(), kernel, likelihood=model, Y_metadata=Y_metadata, inference_method=laplace_likelihood)
|
||||||
m.kern.white.constrain_fixed(white_var)
|
m.kern.white.constrain_fixed(white_var)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,7 @@ class MiscTests(unittest.TestCase):
|
||||||
m.randomize()
|
m.randomize()
|
||||||
m.optimize()
|
m.optimize()
|
||||||
# Compute the mean of model prediction on 1e5 Monte Carlo samples
|
# Compute the mean of model prediction on 1e5 Monte Carlo samples
|
||||||
Xp = np.random.uniform(size=(1e5,2))
|
Xp = np.random.uniform(size=(int(1e5),2))
|
||||||
Xp[:,0] = Xp[:,0]*15-5
|
Xp[:,0] = Xp[:,0]*15-5
|
||||||
Xp[:,1] = Xp[:,1]*15
|
Xp[:,1] = Xp[:,1]*15
|
||||||
_, var = m.predict(Xp)
|
_, var = m.predict(Xp)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue