diff --git a/.travis.yml b/.travis.yml
index 51b9ca2b..63fa1c5e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,8 +16,9 @@ addons:
env:
- PYTHON_VERSION=2.7
#- PYTHON_VERSION=3.3
- - PYTHON_VERSION=3.4
+ #- PYTHON_VERSION=3.4
- PYTHON_VERSION=3.5
+ - PYTHON_VERSION=3.6
before_install:
- wget https://github.com/mzwiessele/travis_scripts/raw/master/download_miniconda.sh
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 88284db3..035f857e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,105 @@
# Changelog
+## v1.7.2 (2017-06-17)
+
+### Fix
+
+* Appveyor build python 3.6. [mzwiessele]
+
+### Other
+
+* Bump version: 1.7.1 → 1.7.2. [mzwiessele]
+
+
+## v1.7.1 (2017-06-17)
+
+### Fix
+
+* Appveyor build python 3.6. [mzwiessele]
+
+### Other
+
+* Bump version: 1.7.0 → 1.7.1. [mzwiessele]
+
+
+## v1.7.0 (2017-06-17)
+
+### Fix
+
+* Support for 3.5 and higher now that 3.6 is out. [mzwiessele]
+
+### Other
+
+* Bump version: 1.6.3 → 1.7.0. [mzwiessele]
+
+
+## v1.6.3 (2017-06-17)
+
+### Other
+
+* Bump version: 1.6.2 → 1.6.3. [mzwiessele]
+
+* Merge pull request #504 from rmcantin/devel. [Max Zwiessele]
+
+* Fix python 2-3 compatibility. [Ruben Martinez-Cantin]
+
+* Merge pull request #511 from dirmeier/devel. [Max Zwiessele]
+
+* Added LICENSE file to MANIFEST.in. [dirmeier]
+
+
+## v1.6.2 (2017-04-12)
+
+### Fix
+
+* Updated keywords. [mzwiessele]
+
+### Other
+
+* Bump version: 1.6.1 → 1.6.2. [mzwiessele]
+
+* Merge pull request #491 from alexfeld/parallel_opt. [Max Zwiessele]
+
+ fix for parallel optimization
+
+* Fix in sparse_gp_mpi optimizer. [Alex Feldstein]
+
+* Fix for parallel optimization. [Alex Feldstein]
+
+* Merge pull request #492 from pgmoren/devel. [Zhenwen Dai]
+
+ We did some benchmarking on classification. These changes should be fine. Let's merge it in.
+
+* Changes in EP/EPDTC to fix numerical issues and increase the flexibility of the inference. [Moreno]
+
+ Changes to avoid numerical issues and improve the performance:
+ - Keep value of the EP parameters between calls
+ - Enforce positivity of tau_tilde
+ - Stable computation of the EP moments for the Bernoulli likelihood
+ - Compute marginal in the GP model without directly inverting tau_tilde
+
+ Changes to improve the flexibility:
+ - Add parameter for maximum number of iterations
+ - Distinguish between alternated/nested mode
+ - Distinguish between sequential/parallel updates in EP
+
+* Merge pull request #489 from SheffieldML/linalg_cython-1. [Max Zwiessele]
+
+ cython in linalg fix #458
+
+* Cython in linalg. [Max Zwiessele]
+
+ did set cython to working if linalg_cython was importable.
+
+* Merge pull request #486 from SheffieldML/deploy. [Max Zwiessele]
+
+ Merge pull request #471 from SheffieldML/devel
+
+* Merge pull request #471 from SheffieldML/devel. [Max Zwiessele]
+
+ new version
+
+
## v1.6.1 (2017-02-28)
### Fix
diff --git a/GPy/__version__.py b/GPy/__version__.py
index f49459c7..1db13ec2 100644
--- a/GPy/__version__.py
+++ b/GPy/__version__.py
@@ -1 +1 @@
-__version__ = "1.6.1"
+__version__ = "1.7.2"
diff --git a/GPy/core/gp.py b/GPy/core/gp.py
index b90c95c1..7f23e5af 100644
--- a/GPy/core/gp.py
+++ b/GPy/core/gp.py
@@ -562,11 +562,12 @@ class GP(Model):
"""
self.inference_method.on_optimization_start()
try:
- super(GP, self).optimize(optimizer, start, messages, max_iters, ipython_notebook, clear_after_finish, **kwargs)
+ ret = super(GP, self).optimize(optimizer, start, messages, max_iters, ipython_notebook, clear_after_finish, **kwargs)
except KeyboardInterrupt:
print("KeyboardInterrupt caught, calling on_optimization_end() to round things up")
self.inference_method.on_optimization_end()
raise
+ return ret
def infer_newX(self, Y_new, optimize=True):
"""
diff --git a/GPy/core/sparse_gp_mpi.py b/GPy/core/sparse_gp_mpi.py
index a26b858f..f12ae7a7 100644
--- a/GPy/core/sparse_gp_mpi.py
+++ b/GPy/core/sparse_gp_mpi.py
@@ -88,9 +88,9 @@ class SparseGP_MPI(SparseGP):
def optimize(self, optimizer=None, start=None, **kwargs):
self._IN_OPTIMIZATION_ = True
if self.mpi_comm==None:
- super(SparseGP_MPI, self).optimize(optimizer,start,**kwargs)
+ ret = super(SparseGP_MPI, self).optimize(optimizer,start,**kwargs)
elif self.mpi_comm.rank==0:
- super(SparseGP_MPI, self).optimize(optimizer,start,**kwargs)
+ ret = super(SparseGP_MPI, self).optimize(optimizer,start,**kwargs)
self.mpi_comm.Bcast(np.int32(-1),root=0)
elif self.mpi_comm.rank>0:
x = self.optimizer_array.copy()
@@ -111,6 +111,7 @@ class SparseGP_MPI(SparseGP):
self._IN_OPTIMIZATION_ = False
raise Exception("Unrecognizable flag for synchronization!")
self._IN_OPTIMIZATION_ = False
+ return ret
def parameters_changed(self):
if isinstance(self.inference_method,VarDTC_minibatch):
diff --git a/GPy/inference/latent_function_inference/expectation_propagation.py b/GPy/inference/latent_function_inference/expectation_propagation.py
index 01560b3c..02beedb3 100644
--- a/GPy/inference/latent_function_inference/expectation_propagation.py
+++ b/GPy/inference/latent_function_inference/expectation_propagation.py
@@ -1,15 +1,16 @@
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
-from ...util.linalg import jitchol, DSYR, dtrtrs, dtrtri
+from ...util.linalg import jitchol, DSYR, dtrtrs, dtrtri, pdinv, dpotrs, tdot, symmetrify
from paramz import ObsAr
from . import ExactGaussianInference, VarDTC
from ...util import diag
+from .posterior import PosteriorEP as Posterior
log_2_pi = np.log(2*np.pi)
class EPBase(object):
- def __init__(self, epsilon=1e-6, eta=1., delta=1., always_reset=False):
+ def __init__(self, epsilon=1e-6, eta=1., delta=1., always_reset=False, max_iters=np.inf, ep_mode="alternated", parallel_updates=False):
"""
The expectation-propagation algorithm.
For nomenclature see Rasmussen & Williams 2006.
@@ -22,11 +23,15 @@ class EPBase(object):
:type delta: float64
:param always_reset: setting to always reset the approximation at the beginning of every inference call.
:type always_reest: boolean
-
+ :max_iters: int
+ :ep_mode: string. It can be "nested" (EP is run every time the Hyperparameters change) or "alternated" (It runs EP at the beginning and then optimize the Hyperparameters).
+ :parallel_updates: boolean. If true, updates of the parameters of the sites in parallel
"""
super(EPBase, self).__init__()
self.always_reset = always_reset
- self.epsilon, self.eta, self.delta = epsilon, eta, delta
+ self.epsilon, self.eta, self.delta, self.max_iters = epsilon, eta, delta, max_iters
+ self.ep_mode = ep_mode
+ self.parallel_updates = parallel_updates
self.reset()
def reset(self):
@@ -59,25 +64,28 @@ class EP(EPBase, ExactGaussianInference):
if K is None:
K = kern.K(X)
- if getattr(self, '_ep_approximation', None) is None:
- #if we don't yet have the results of runnign EP, run EP and store the computed factors in self._ep_approximation
- mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata)
+ if self.ep_mode=="nested":
+ #Force EP at each step of the optimization
+ self._ep_approximation = None
+ mu, Sigma, mu_tilde, tau_tilde, log_Z_tilde = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata)
+ elif self.ep_mode=="alternated":
+ if getattr(self, '_ep_approximation', None) is None:
+ #if we don't yet have the results of runnign EP, run EP and store the computed factors in self._ep_approximation
+ mu, Sigma, mu_tilde, tau_tilde, log_Z_tilde = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata)
+ else:
+ #if we've already run EP, just use the existing approximation stored in self._ep_approximation
+ mu, Sigma, mu_tilde, tau_tilde, log_Z_tilde = self._ep_approximation
else:
- #if we've already run EP, just use the existing approximation stored in self._ep_approximation
- mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation
+ raise ValueError("ep_mode value not valid")
- return super(EP, self).inference(kern, X, likelihood, mu_tilde[:,None], mean_function=mean_function, Y_metadata=Y_metadata, variance=1./tau_tilde, K=K, Z_tilde=np.log(Z_tilde).sum())
+ v_tilde = mu_tilde * tau_tilde
+ return self._inference(K, tau_tilde, v_tilde, likelihood, Y_metadata=Y_metadata, Z_tilde=log_Z_tilde.sum())
def expectation_propagation(self, K, Y, likelihood, Y_metadata):
num_data, data_dim = Y.shape
assert data_dim == 1, "This EP methods only works for 1D outputs"
- #Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma)
- mu = np.zeros(num_data)
- Sigma = K.copy()
- diag.add(Sigma, 1e-7)
-
# Makes computing the sign quicker if we work with numpy arrays rather
# than ObsArrays
Y = Y.values.copy()
@@ -91,12 +99,19 @@ class EP(EPBase, ExactGaussianInference):
v_cav = np.empty(num_data,dtype=np.float64)
#initial values - Gaussian factors
+ #Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma)
if self.old_mutilde is None:
tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data))
+ Sigma = K.copy()
+ diag.add(Sigma, 1e-7)
+ mu = np.zeros(num_data)
else:
assert self.old_mutilde.size == num_data, "data size mis-match: did you change the data? try resetting!"
mu_tilde, v_tilde = self.old_mutilde, self.old_vtilde
tau_tilde = v_tilde/mu_tilde
+ mu, Sigma, _ = self._ep_compute_posterior(K, tau_tilde, v_tilde)
+ diag.add(Sigma, 1e-7)
+ # TODO: Check the log-marginal under both conditions and choose the best one
#Approximation
tau_diff = self.epsilon + 1.
@@ -104,7 +119,7 @@ class EP(EPBase, ExactGaussianInference):
tau_tilde_old = np.nan
v_tilde_old = np.nan
iterations = 0
- while (tau_diff > self.epsilon) or (v_diff > self.epsilon):
+ while ((tau_diff > self.epsilon) or (v_diff > self.epsilon)) and (iterations < self.max_iters):
update_order = np.random.permutation(num_data)
for i in update_order:
#Cavity distribution parameters
@@ -122,21 +137,25 @@ class EP(EPBase, ExactGaussianInference):
#Site parameters update
delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma[i,i])
delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma[i,i])
+ tau_tilde_prev = tau_tilde[i]
tau_tilde[i] += delta_tau
+
+ # Enforce positivity of tau_tilde. Even though this is guaranteed for logconcave sites, it is still possible
+ # to get negative values due to numerical errors. Moreover, the value of tau_tilde should be positive in order to
+ # update the marginal likelihood without inestability issues.
+ if tau_tilde[i] < np.finfo(float).eps:
+ tau_tilde[i] = np.finfo(float).eps
+ delta_tau = tau_tilde[i] - tau_tilde_prev
v_tilde[i] += delta_v
- #Posterior distribution parameters update
- ci = delta_tau/(1.+ delta_tau*Sigma[i,i])
- DSYR(Sigma, Sigma[:,i].copy(), -ci)
- mu = np.dot(Sigma, v_tilde)
+
+ if self.parallel_updates == False:
+ #Posterior distribution parameters update
+ ci = delta_tau/(1.+ delta_tau*Sigma[i,i])
+ DSYR(Sigma, Sigma[:,i].copy(), -ci)
+ mu = np.dot(Sigma, v_tilde)
#(re) compute Sigma and mu using full Cholesky decompy
- tau_tilde_root = np.sqrt(tau_tilde)
- Sroot_tilde_K = tau_tilde_root[:,None] * K
- B = np.eye(num_data) + Sroot_tilde_K * tau_tilde_root[None,:]
- L = jitchol(B)
- V, _ = dtrtrs(L, Sroot_tilde_K, lower=1)
- Sigma = K - np.dot(V.T,V)
- mu = np.dot(Sigma,v_tilde)
+ mu, Sigma, _ = self._ep_compute_posterior(K, tau_tilde, v_tilde)
#monitor convergence
if iterations > 0:
@@ -150,15 +169,70 @@ class EP(EPBase, ExactGaussianInference):
mu_tilde = v_tilde/tau_tilde
mu_cav = v_cav/tau_cav
sigma2_sigma2tilde = 1./tau_cav + 1./tau_tilde
- Z_tilde = np.exp(np.log(Z_hat) + 0.5*np.log(2*np.pi) + 0.5*np.log(sigma2_sigma2tilde)
- + 0.5*((mu_cav - mu_tilde)**2) / (sigma2_sigma2tilde))
- return mu, Sigma, mu_tilde, tau_tilde, Z_tilde
+
+ # Z_tilde after removing the terms that can lead to infinite terms due to tau_tilde close to zero.
+ # This terms cancel with the coreresponding terms in the marginal loglikelihood
+ log_Z_tilde = (np.log(Z_hat) + 0.5*np.log(2*np.pi) + 0.5*np.log(1+tau_tilde/tau_cav)
+ - 0.5 * ((v_tilde)**2 * 1./(tau_cav + tau_tilde)) + 0.5*(v_cav * ( ( (tau_tilde/tau_cav) * v_cav - 2.0 * v_tilde ) * 1./(tau_cav + tau_tilde))))
+ # - 0.5*np.log(tau_tilde) + 0.5*(v_tilde*v_tilde*1./tau_tilde)
+
+ self.old_mutilde = mu_tilde
+ self.old_vtilde = v_tilde
+
+ return mu, Sigma, mu_tilde, tau_tilde, log_Z_tilde
+
+ def _ep_compute_posterior(self, K, tau_tilde, v_tilde):
+ num_data = len(tau_tilde)
+ tau_tilde_root = np.sqrt(tau_tilde)
+ Sroot_tilde_K = tau_tilde_root[:,None] * K
+ B = np.eye(num_data) + Sroot_tilde_K * tau_tilde_root[None,:]
+ L = jitchol(B)
+ V, _ = dtrtrs(L, Sroot_tilde_K, lower=1)
+ Sigma = K - np.dot(V.T,V) #K - KS^(1/2)BS^(1/2)K = (K^(-1) + \Sigma^(-1))^(-1)
+ mu = np.dot(Sigma,v_tilde)
+ return (mu, Sigma, L)
+
+ def _ep_marginal(self, K, tau_tilde, v_tilde, Z_tilde):
+ mu, Sigma, L = self._ep_compute_posterior(K, tau_tilde, v_tilde)
+
+ # Gaussian log marginal excluding terms that can go to infinity due to arbitrarily small tau_tilde.
+ # These terms cancel out with the terms excluded from Z_tilde
+ B_logdet = np.sum(2.0*np.log(np.diag(L)))
+ log_marginal = 0.5*(-len(tau_tilde) * log_2_pi - B_logdet + np.sum(v_tilde * np.dot(Sigma,v_tilde)))
+ log_marginal += Z_tilde
+
+ return log_marginal, mu, Sigma, L
+
+ def _inference(self, K, tau_tilde, v_tilde, likelihood, Z_tilde, Y_metadata=None):
+ log_marginal, mu, Sigma, L = self._ep_marginal(K, tau_tilde, v_tilde, Z_tilde)
+
+ tau_tilde_root = np.sqrt(tau_tilde)
+ Sroot_tilde_K = tau_tilde_root[:,None] * K
+
+ aux_alpha , _ = dpotrs(L, np.dot(Sroot_tilde_K, v_tilde), lower=1)
+ alpha = (v_tilde - tau_tilde_root * aux_alpha)[:,None] #(K + Sigma^(\tilde))^(-1) /mu^(/tilde)
+ LWi, _ = dtrtrs(L, np.diag(tau_tilde_root), lower=1)
+ Wi = np.dot(LWi.T,LWi)
+ symmetrify(Wi) #(K + Sigma^(\tilde))^(-1)
+
+ dL_dK = 0.5 * (tdot(alpha) - Wi)
+ dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK), Y_metadata)
+
+ return Posterior(woodbury_inv=Wi, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha}
+
class EPDTC(EPBase, VarDTC):
def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None, Lm=None, dL_dKmm=None, psi0=None, psi1=None, psi2=None):
- assert Y.shape[1]==1, "ep in 1D only (for now!)"
+ if self.always_reset:
+ self.reset()
+
+ num_data, output_dim = Y.shape
+ assert output_dim == 1, "ep in 1D only (for now!)"
+
+ if Lm is None:
+ Kmm = kern.K(Z)
+ Lm = jitchol(Kmm)
- Kmm = kern.K(Z)
if psi1 is None:
try:
Kmn = kern.K(Z, X)
@@ -167,35 +241,36 @@ class EPDTC(EPBase, VarDTC):
else:
Kmn = psi1.T
- if getattr(self, '_ep_approximation', None) is None:
- mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation = self.expectation_propagation(Kmm, Kmn, Y, likelihood, Y_metadata)
+ if self.ep_mode=="nested":
+ #Force EP at each step of the optimization
+ self._ep_approximation = None
+ mu, Sigma_diag, mu_tilde, tau_tilde, log_Z_tilde = self._ep_approximation = self.expectation_propagation(Kmm, Kmn, Y, likelihood, Y_metadata)
+ elif self.ep_mode=="alternated":
+ if getattr(self, '_ep_approximation', None) is None:
+ #if we don't yet have the results of runnign EP, run EP and store the computed factors in self._ep_approximation
+ mu, Sigma_diag, mu_tilde, tau_tilde, log_Z_tilde = self._ep_approximation = self.expectation_propagation(Kmm, Kmn, Y, likelihood, Y_metadata)
+ else:
+ #if we've already run EP, just use the existing approximation stored in self._ep_approximation
+ mu, Sigma_diag, mu_tilde, tau_tilde, log_Z_tilde = self._ep_approximation
else:
- mu, Sigma, mu_tilde, tau_tilde, Z_tilde = self._ep_approximation
+ raise ValueError("ep_mode value not valid")
return super(EPDTC, self).inference(kern, X, Z, likelihood, mu_tilde,
mean_function=mean_function,
Y_metadata=Y_metadata,
precision=tau_tilde,
Lm=Lm, dL_dKmm=dL_dKmm,
- psi0=psi0, psi1=psi1, psi2=psi2, Z_tilde=np.log(Z_tilde).sum())
+ psi0=psi0, psi1=psi1, psi2=psi2, Z_tilde=log_Z_tilde.sum())
+
def expectation_propagation(self, Kmm, Kmn, Y, likelihood, Y_metadata):
+
num_data, output_dim = Y.shape
assert output_dim == 1, "This EP methods only works for 1D outputs"
- LLT0 = Kmm.copy()
- #diag.add(LLT0, 1e-8)
-
- Lm = jitchol(LLT0)
- Lmi = dtrtri(Lm)
- Kmmi = np.dot(Lmi.T,Lmi)
- KmmiKmn = np.dot(Kmmi,Kmn)
- Qnn_diag = np.sum(Kmn*KmmiKmn,-2)
-
- #Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma)
- mu = np.zeros(num_data)
- LLT = Kmm.copy() #Sigma = K.copy()
- Sigma_diag = Qnn_diag.copy() + 1e-8
+ # Makes computing the sign quicker if we work with numpy arrays rather
+ # than ObsArrays
+ Y = Y.values.copy()
#Initial values - Marginal moments
Z_hat = np.zeros(num_data,dtype=np.float64)
@@ -206,73 +281,110 @@ class EPDTC(EPBase, VarDTC):
v_cav = np.empty(num_data,dtype=np.float64)
#initial values - Gaussian factors
+ #Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma)
+ LLT0 = Kmm.copy()
+ Lm = jitchol(LLT0) #K_m = L_m L_m^\top
+ Vm,info = dtrtrs(Lm,Kmn,lower=1)
+ # Lmi = dtrtri(Lm)
+ # Kmmi = np.dot(Lmi.T,Lmi)
+ # KmmiKmn = np.dot(Kmmi,Kmn)
+ # Qnn_diag = np.sum(Kmn*KmmiKmn,-2)
+ Qnn_diag = np.sum(Vm*Vm,-2) #diag(Knm Kmm^(-1) Kmn)
+ #diag.add(LLT0, 1e-8)
if self.old_mutilde is None:
+ #Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma)
+ LLT = LLT0.copy() #Sigma = K.copy()
+ mu = np.zeros(num_data)
+ Sigma_diag = Qnn_diag.copy() + 1e-8
tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data))
else:
assert self.old_mutilde.size == num_data, "data size mis-match: did you change the data? try resetting!"
mu_tilde, v_tilde = self.old_mutilde, self.old_vtilde
tau_tilde = v_tilde/mu_tilde
+ mu, Sigma_diag, LLT = self._ep_compute_posterior(LLT0, Kmn, tau_tilde, v_tilde)
+ Sigma_diag += 1e-8
+ # TODO: Check the log-marginal under both conditions and choose the best one
#Approximation
tau_diff = self.epsilon + 1.
v_diff = self.epsilon + 1.
+ tau_tilde_old = np.nan
+ v_tilde_old = np.nan
iterations = 0
- tau_tilde_old = 0.
- v_tilde_old = 0.
- update_order = np.random.permutation(num_data)
-
- while (tau_diff > self.epsilon) or (v_diff > self.epsilon):
+ while ((tau_diff > self.epsilon) or (v_diff > self.epsilon)) and (iterations < self.max_iters):
+ update_order = np.random.permutation(num_data)
for i in update_order:
#Cavity distribution parameters
tau_cav[i] = 1./Sigma_diag[i] - self.eta*tau_tilde[i]
v_cav[i] = mu[i]/Sigma_diag[i] - self.eta*v_tilde[i]
+ if Y_metadata is not None:
+ # Pick out the relavent metadata for Yi
+ Y_metadata_i = {}
+ for key in Y_metadata.keys():
+ Y_metadata_i[key] = Y_metadata[key][i, :]
+ else:
+ Y_metadata_i = None
+
#Marginal moments
- Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav[i], v_cav[i])#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i]))
+ Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav[i], v_cav[i], Y_metadata_i=Y_metadata_i)
#Site parameters update
delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma_diag[i])
delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma_diag[i])
+ tau_tilde_prev = tau_tilde[i]
tau_tilde[i] += delta_tau
+
+ # Enforce positivity of tau_tilde. Even though this is guaranteed for logconcave sites, it is still possible
+ # to get negative values due to numerical errors. Moreover, the value of tau_tilde should be positive in order to
+ # update the marginal likelihood without inestability issues.
+ if tau_tilde[i] < np.finfo(float).eps:
+ tau_tilde[i] = np.finfo(float).eps
+ delta_tau = tau_tilde[i] - tau_tilde_prev
v_tilde[i] += delta_v
+
#Posterior distribution parameters update
+ if self.parallel_updates == False:
+ #DSYR(Sigma, Sigma[:,i].copy(), -delta_tau/(1.+ delta_tau*Sigma[i,i]))
+ DSYR(LLT,Kmn[:,i].copy(),delta_tau)
+ L = jitchol(LLT)
+ V,info = dtrtrs(L,Kmn,lower=1)
+ Sigma_diag = np.maximum(np.sum(V*V,-2), np.finfo(float).eps) #diag(K_nm (L L^\top)^(-1)) K_mn
+ si = np.sum(V.T*V[:,i],-1) #(V V^\top)[:,i]
+ mu += (delta_v-delta_tau*mu[i])*si
+ #mu = np.dot(Sigma, v_tilde)
- #DSYR(Sigma, Sigma[:,i].copy(), -delta_tau/(1.+ delta_tau*Sigma[i,i]))
- DSYR(LLT,Kmn[:,i].copy(),delta_tau)
- L = jitchol(LLT+np.eye(LLT.shape[0])*1e-7)
-
- V,info = dtrtrs(L,Kmn,lower=1)
- Sigma_diag = np.sum(V*V,-2)
- si = np.sum(V.T*V[:,i],-1)
- mu += (delta_v-delta_tau*mu[i])*si
- #mu = np.dot(Sigma, v_tilde)
-
- #(re) compute Sigma and mu using full Cholesky decompy
- LLT = LLT0 + np.dot(Kmn*tau_tilde[None,:],Kmn.T)
- #diag.add(LLT, 1e-8)
- L = jitchol(LLT)
- V, _ = dtrtrs(L,Kmn,lower=1)
- V2, _ = dtrtrs(L.T,V,lower=0)
- #Sigma_diag = np.sum(V*V,-2)
- #Knmv_tilde = np.dot(Kmn,v_tilde)
- #mu = np.dot(V2.T,Knmv_tilde)
- Sigma = np.dot(V2.T,V2)
- mu = np.dot(Sigma,v_tilde)
+ #(re) compute Sigma, Sigma_diag and mu using full Cholesky decompy
+ mu, Sigma_diag, LLT = self._ep_compute_posterior(LLT0, Kmn, tau_tilde, v_tilde)
+ Sigma_diag = np.maximum(Sigma_diag, np.finfo(float).eps)
#monitor convergence
- #if iterations>0:
- tau_diff = np.mean(np.square(tau_tilde-tau_tilde_old))
- v_diff = np.mean(np.square(v_tilde-v_tilde_old))
-
+ if iterations>0:
+ tau_diff = np.mean(np.square(tau_tilde-tau_tilde_old))
+ v_diff = np.mean(np.square(v_tilde-v_tilde_old))
tau_tilde_old = tau_tilde.copy()
v_tilde_old = v_tilde.copy()
-
- # Only to while loop once:?
- tau_diff = 0
- v_diff = 0
iterations += 1
mu_tilde = v_tilde/tau_tilde
mu_cav = v_cav/tau_cav
sigma2_sigma2tilde = 1./tau_cav + 1./tau_tilde
- Z_tilde = np.exp(np.log(Z_hat) + 0.5*np.log(2*np.pi) + 0.5*np.log(sigma2_sigma2tilde)
+
+ log_Z_tilde = (np.log(Z_hat) + 0.5*np.log(2*np.pi) + 0.5*np.log(sigma2_sigma2tilde)
+ 0.5*((mu_cav - mu_tilde)**2) / (sigma2_sigma2tilde))
- return mu, Sigma, ObsAr(mu_tilde[:,None]), tau_tilde, Z_tilde
+
+ self.old_mutilde = mu_tilde
+ self.old_vtilde = v_tilde
+
+ return mu, Sigma_diag, ObsAr(mu_tilde[:,None]), tau_tilde, log_Z_tilde
+
+ def _ep_compute_posterior(self, LLT0, Kmn, tau_tilde, v_tilde):
+ LLT = LLT0 + np.dot(Kmn*tau_tilde[None,:],Kmn.T)
+ L = jitchol(LLT)
+ V, _ = dtrtrs(L,Kmn,lower=1)
+ #Sigma_diag = np.sum(V*V,-2)
+ #Knmv_tilde = np.dot(Kmn,v_tilde)
+ #mu = np.dot(V2.T,Knmv_tilde)
+ Sigma = np.dot(V.T,V)
+ mu = np.dot(Sigma,v_tilde)
+ Sigma_diag = np.diag(Sigma).copy()
+
+ return (mu, Sigma_diag, LLT)
diff --git a/GPy/inference/latent_function_inference/posterior.py b/GPy/inference/latent_function_inference/posterior.py
index 73b9dff0..40ea5c73 100644
--- a/GPy/inference/latent_function_inference/posterior.py
+++ b/GPy/inference/latent_function_inference/posterior.py
@@ -192,7 +192,7 @@ class Posterior(object):
def _raw_predict(self, kern, Xnew, pred_var, full_cov=False):
woodbury_vector = self.woodbury_vector
woodbury_inv = self.woodbury_inv
-
+
if not isinstance(Xnew, VariationalPosterior):
Kx = kern.K(pred_var, Xnew)
mu = np.dot(Kx.T, woodbury_vector)
@@ -220,7 +220,7 @@ class Posterior(object):
else:
psi0_star = kern.psi0(pred_var, Xnew)
psi1_star = kern.psi1(pred_var, Xnew)
- psi2_star = kern.psi2n(pred_var, Xnew)
+ psi2_star = kern.psi2n(pred_var, Xnew)
la = woodbury_vector
mu = np.dot(psi1_star, la) # TODO: dimensions?
N,M,D = psi0_star.shape[0],psi1_star.shape[1], la.shape[1]
@@ -231,18 +231,19 @@ class Posterior(object):
di = np.diag_indices(la.shape[1])
else:
tmp = psi2_star - psi1_star[:,:,None]*psi1_star[:,None,:]
- var = (tmp.reshape(-1,M).dot(la).reshape(N,M,D)*la[None,:,:]).sum(1) + psi0_star[:,None]
+ var = (tmp.reshape(-1,M).dot(la).reshape(N,M,D)*la[None,:,:]).sum(1) + psi0_star[:,None]
if woodbury_inv.ndim==2:
var += -psi2_star.reshape(N,-1).dot(woodbury_inv.flat)[:,None]
else:
var += -psi2_star.reshape(N,-1).dot(woodbury_inv.reshape(-1,D))
var = np.clip(var,1e-15,np.inf)
return mu, var
-
+
+
class PosteriorExact(Posterior):
-
+
def _raw_predict(self, kern, Xnew, pred_var, full_cov=False):
-
+
Kx = kern.K(pred_var, Xnew)
mu = np.dot(Kx.T, self.woodbury_vector)
if len(mu.shape)==1:
@@ -270,3 +271,37 @@ class PosteriorExact(Posterior):
var[:, i] = (Kxx - np.square(tmp).sum(0))
var = var
return mu, var
+
+class PosteriorEP(Posterior):
+
+ def _raw_predict(self, kern, Xnew, pred_var, full_cov=False):
+
+ Kx = kern.K(pred_var, Xnew)
+ mu = np.dot(Kx.T, self.woodbury_vector)
+ if len(mu.shape)==1:
+ mu = mu.reshape(-1,1)
+
+ if full_cov:
+ Kxx = kern.K(Xnew)
+ if self._woodbury_inv.ndim == 2:
+ tmp = np.dot(Kx.T,np.dot(self._woodbury_inv, Kx))
+ var = Kxx - tmp
+ elif self._woodbury_inv.ndim == 3: # Missing data
+ var = np.empty((Kxx.shape[0],Kxx.shape[1],self._woodbury_inv.shape[2]))
+ for i in range(var.shape[2]):
+ tmp = np.dot(Kx.T,np.dot(self._woodbury_inv[:,:,i], Kx))
+ var[:, :, i] = (Kxx - tmp)
+ var = var
+ else:
+ Kxx = kern.Kdiag(Xnew)
+ if self._woodbury_inv.ndim == 2:
+ tmp = (np.dot(self._woodbury_inv, Kx) * Kx).sum(0)
+ var = (Kxx - tmp)[:,None]
+ elif self._woodbury_inv.ndim == 3: # Missing data
+ var = np.empty((Kxx.shape[0],self._woodbury_inv.shape[2]))
+ for i in range(var.shape[1]):
+ tmp = (Kx * np.dot(self._woodbury_inv[:,:,i], Kx)).sum(0)
+ var[:, i] = (Kxx - tmp)
+ var = var
+
+ return mu, var
diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py
index ec055120..fb0e946b 100644
--- a/GPy/inference/latent_function_inference/var_dtc.py
+++ b/GPy/inference/latent_function_inference/var_dtc.py
@@ -88,6 +88,10 @@ class VarDTC(LatentFunctionInference):
Kmm = kern.K(Z).copy()
diag.add(Kmm, self.const_jitter)
Lm = jitchol(Kmm)
+ else:
+ Kmm = tdot(Lm)
+ symmetrify(Kmm)
+
# The rather complex computations of A, and the psi stats
if uncertain_inputs:
diff --git a/GPy/likelihoods/bernoulli.py b/GPy/likelihoods/bernoulli.py
index 1997db06..cfa16ad3 100644
--- a/GPy/likelihoods/bernoulli.py
+++ b/GPy/likelihoods/bernoulli.py
@@ -2,7 +2,7 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
-from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf
+from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf, derivLogCdfNormal, logCdfNormal
from . import link_functions
from .likelihood import Likelihood
@@ -59,24 +59,24 @@ class Bernoulli(Likelihood):
raise ValueError("bad value for Bernoulli observation (0, 1)")
if isinstance(self.gp_link, link_functions.Probit):
z = sign*v_i/np.sqrt(tau_i**2 + tau_i)
- Z_hat = std_norm_cdf(z)
- Z_hat = np.where(Z_hat==0, 1e-15, Z_hat)
- phi = std_norm_pdf(z)
+ phi_div_Phi = derivLogCdfNormal(z)
+ log_Z_hat = logCdfNormal(z)
- mu_hat = v_i/tau_i + sign*phi/(Z_hat*np.sqrt(tau_i**2 + tau_i))
- sigma2_hat = 1./tau_i - (phi/((tau_i**2+tau_i)*Z_hat))*(z+phi/Z_hat)
+ mu_hat = v_i/tau_i + sign*phi_div_Phi/np.sqrt(tau_i**2 + tau_i)
+ sigma2_hat = 1./tau_i - (phi_div_Phi/(tau_i**2+tau_i))*(z+phi_div_Phi)
elif isinstance(self.gp_link, link_functions.Heaviside):
- a = sign*v_i/np.sqrt(tau_i)
- Z_hat = np.max(1e-13, std_norm_cdf(z))
- N = std_norm_pdf(a)
- mu_hat = v_i/tau_i + sign*N/Z_hat/np.sqrt(tau_i)
- sigma2_hat = (1. - a*N/Z_hat - np.square(N/Z_hat))/tau_i
+ z = sign*v_i/np.sqrt(tau_i)
+ phi_div_Phi = derivLogCdfNormal(z)
+ log_Z_hat = logCdfNormal(z)
+ mu_hat = v_i/tau_i + sign*phi_div_Phi/np.sqrt(tau_i)
+ sigma2_hat = (1. - a*phi_div_Phi - np.square(phi_div_Phi))/tau_i
else:
#TODO: do we want to revert to numerical quadrature here?
raise ValueError("Exact moment matching not available for link {}".format(self.gp_link.__name__))
- return Z_hat, mu_hat, sigma2_hat
+ # TODO: Output log_Z_hat instead of Z_hat (needs to be change in all others likelihoods)
+ return np.exp(log_Z_hat), mu_hat, sigma2_hat
def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):
if isinstance(self.gp_link, link_functions.Probit):
diff --git a/GPy/testing/inference_tests.py b/GPy/testing/inference_tests.py
index 4bd2bc4f..dbde1329 100644
--- a/GPy/testing/inference_tests.py
+++ b/GPy/testing/inference_tests.py
@@ -49,6 +49,43 @@ class InferenceXTestCase(unittest.TestCase):
m.optimize()
x, mi = m.infer_newX(m.Y, optimize=True)
np.testing.assert_array_almost_equal(m.X, mi.X, decimal=2)
+class InferenceGPEP(unittest.TestCase):
+
+ def genData(self):
+ np.random.seed(1)
+ k = GPy.kern.RBF(1, variance=7., lengthscale=0.2)
+ X = np.random.rand(200,1)
+ f = np.random.multivariate_normal(np.zeros(200), k.K(X) + 1e-5 * np.eye(X.shape[0]))
+ lik = GPy.likelihoods.Bernoulli()
+ p = lik.gp_link.transf(f) # squash the latent function
+ Y = lik.samples(f).reshape(-1,1)
+ return X, Y
+
+ def test_inference_EP(self):
+ from paramz import ObsAr
+ X, Y = self.genData()
+ lik = GPy.likelihoods.Bernoulli()
+ k = GPy.kern.RBF(1, variance=7., lengthscale=0.2)
+ inf = GPy.inference.latent_function_inference.expectation_propagation.EP(max_iters=30, delta=0.5)
+ self.model = GPy.core.GP(X=X,
+ Y=Y,
+ kernel=k,
+ inference_method=inf,
+ likelihood=lik)
+ K = self.model.kern.K(X)
+ mu, Sigma, mu_tilde, tau_tilde, log_Z_tilde = self.model.inference_method.expectation_propagation(K, ObsAr(Y), lik, None)
+
+ v_tilde = mu_tilde * tau_tilde
+ p, m, d = self.model.inference_method._inference(K, tau_tilde, v_tilde, lik, Y_metadata=None, Z_tilde=log_Z_tilde.sum())
+ p0, m0, d0 = super(GPy.inference.latent_function_inference.expectation_propagation.EP, inf).inference(k, X,lik ,mu_tilde[:,None], mean_function=None, variance=1./tau_tilde, K=K, Z_tilde=log_Z_tilde.sum() + np.sum(- 0.5*np.log(tau_tilde) + 0.5*(v_tilde*v_tilde*1./tau_tilde)))
+
+ assert (np.sum(np.array([m - m0,
+ np.sum(d['dL_dK'] - d0['dL_dK']),
+ np.sum(d['dL_dthetaL'] - d0['dL_dthetaL']),
+ np.sum(d['dL_dm'] - d0['dL_dm']),
+ np.sum(p._woodbury_vector - p0._woodbury_vector),
+ np.sum(p.woodbury_inv - p0.woodbury_inv)])) < 1e6)
+
class HMCSamplerTest(unittest.TestCase):
@@ -64,7 +101,7 @@ class HMCSamplerTest(unittest.TestCase):
hmc = GPy.inference.mcmc.HMC(m,stepsize=1e-2)
s = hmc.sample(num_samples=3)
-
+
class MCMCSamplerTest(unittest.TestCase):
def test_sampling(self):
diff --git a/GPy/testing/util_tests.py b/GPy/testing/util_tests.py
index ba3d7ddf..84b88bbf 100644
--- a/GPy/testing/util_tests.py
+++ b/GPy/testing/util_tests.py
@@ -1,21 +1,21 @@
#===============================================================================
# Copyright (c) 2016, Max Zwiessele, Alan Saul
# All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
-#
+#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
-#
+#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
-#
+#
# * Neither the name of GPy.testing.util_tests nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
@@ -36,7 +36,7 @@ class TestDebug(unittest.TestCase):
from GPy.util.debug import checkFinite
array = np.random.normal(0, 1, 100).reshape(25,4)
self.assertTrue(checkFinite(array, name='test'))
-
+
array[np.random.binomial(1, .3, array.shape).astype(bool)] = np.nan
self.assertFalse(checkFinite(array))
@@ -45,10 +45,10 @@ class TestDebug(unittest.TestCase):
from GPy.util.linalg import tdot
array = np.random.normal(0, 1, 100).reshape(25,4)
self.assertFalse(checkFullRank(tdot(array), name='test'))
-
+
array = np.random.normal(0, 1, (25,25))
self.assertTrue(checkFullRank(tdot(array)))
-
+
def test_fixed_inputs_median(self):
""" test fixed_inputs convenience function """
from GPy.plotting.matplot_dep.util import fixed_inputs
@@ -113,8 +113,8 @@ class TestDebug(unittest.TestCase):
#test data set. Not using random noise just in case it occasionally
#causes it not to cluster correctly.
#groundtruth cluster identifiers are: [0,1,1,0]
-
- #data contains a list of the four sets of time series (3 per data point)
+
+ #data contains a list of the four sets of time series (3 per data point)
data = [np.array([[ 2.18094245, 1.96529789, 2.00265523, 2.18218742, 2.06795428],
[ 1.62254829, 1.75748448, 1.83879347, 1.87531326, 1.52503496],
@@ -130,7 +130,7 @@ class TestDebug(unittest.TestCase):
[ 1.69336013, 1.72285186, 1.6339506 , 1.61212022, 1.39198698]])]
#inputs contains their associated X values
-
+
inputs = [np.array([[ 0. ],
[ 0.68040097],
[ 1.20316795],
@@ -148,10 +148,66 @@ class TestDebug(unittest.TestCase):
[ 1.71881079],
[ 2.67162871],
[ 3.23761907]])]
-
+
#try doing the clustering
active = GPy.util.cluster_with_offset.cluster(data,inputs)
#check to see that the clustering has correctly clustered the time series.
clusters = set([frozenset(cluster) for cluster in active])
assert set([1,2]) in clusters, "Offset Clustering algorithm failed"
assert set([0,3]) in clusters, "Offset Clustering algoirthm failed"
+
+
+class TestUnivariateGaussian(unittest.TestCase):
+ def setUp(self):
+ self.zz = [-5.0, -0.8, 0.0, 0.5, 2.0, 10.0]
+
+ def test_logPdfNormal(self):
+ from GPy.util.univariate_Gaussian import logPdfNormal
+ pySols = [-13.4189385332,
+ -1.2389385332,
+ -0.918938533205,
+ -1.0439385332,
+ -2.9189385332,
+ -50.9189385332]
+ diff = 0.0
+ for i in range(len(pySols)):
+ diff += abs(logPdfNormal(self.zz[i]) - pySols[i])
+ self.assertTrue(diff < 1e-10)
+
+ def test_cdfNormal(self):
+ from GPy.util.univariate_Gaussian import cdfNormal
+ pySols = [2.86651571879e-07,
+ 0.211855398583,
+ 0.5,
+ 0.691462461274,
+ 0.977249868052,
+ 1.0]
+ diff = 0.0
+ for i in range(len(pySols)):
+ diff += abs(cdfNormal(self.zz[i]) - pySols[i])
+ self.assertTrue(diff < 1e-10)
+
+ def test_logCdfNormal(self):
+ from GPy.util.univariate_Gaussian import logCdfNormal
+ pySols = [-15.064998394,
+ -1.55185131919,
+ -0.69314718056,
+ -0.368946415289,
+ -0.023012909329,
+ 0.0]
+ diff = 0.0
+ for i in range(len(pySols)):
+ diff += abs(logCdfNormal(self.zz[i]) - pySols[i])
+ self.assertTrue(diff < 1e-10)
+ def test_derivLogCdfNormal(self):
+ from GPy.util.univariate_Gaussian import derivLogCdfNormal
+ pySols = [5.18650396941,
+ 1.3674022693,
+ 0.79788456081,
+ 0.50916043387,
+ 0.0552478626962,
+ 0.0]
+ diff = 0.0
+ for i in range(len(pySols)):
+ diff += abs(derivLogCdfNormal(self.zz[i]) - pySols[i])
+ self.assertTrue(diff < 1e-8)
diff --git a/GPy/util/datasets.py b/GPy/util/datasets.py
index 6cad1eed..f8fa8239 100644
--- a/GPy/util/datasets.py
+++ b/GPy/util/datasets.py
@@ -206,7 +206,10 @@ def authorize_download(dataset_name=None):
def download_data(dataset_name=None):
"""Check with the user that the are happy with terms and conditions for the data set, then download it."""
- import itertools
+ try:
+ from itertools import zip_longest
+ except ImportError:
+ from itertools import izip_longest as zip_longest
dr = data_resources[dataset_name]
if not authorize_download(dataset_name):
@@ -220,8 +223,8 @@ def download_data(dataset_name=None):
if 'suffices' in dr: zip_urls += (dr['suffices'], )
else: zip_urls += ([],)
- for url, files, save_names, suffices in itertools.zip_longest(*zip_urls, fillvalue=[]):
- for f, save_name, suffix in itertools.zip_longest(files, save_names, suffices, fillvalue=None):
+ for url, files, save_names, suffices in zip_longest(*zip_urls, fillvalue=[]):
+ for f, save_name, suffix in zip_longest(files, save_names, suffices, fillvalue=None):
download_url(os.path.join(url,f), dataset_name, save_name, suffix=suffix)
return True
diff --git a/GPy/util/linalg.py b/GPy/util/linalg.py
index 83a6452b..cad3b352 100644
--- a/GPy/util/linalg.py
+++ b/GPy/util/linalg.py
@@ -11,12 +11,8 @@ from scipy.linalg import lapack, blas
from .config import config
import logging
-try:
+if config.getboolean('cython', 'working'):
from . import linalg_cython
- config.set('cython', 'working', 'True')
-except ImportError:
- config.set('cython', 'working', 'False')
-
def force_F_ordered_symmetric(A):
"""
diff --git a/GPy/util/normalizer.py b/GPy/util/normalizer.py
index 78ad945d..557d9825 100644
--- a/GPy/util/normalizer.py
+++ b/GPy/util/normalizer.py
@@ -1,7 +1,7 @@
'''
Created on Aug 27, 2014
-@author: t-mazwie
+@author: Max Zwiessele
'''
import logging
import numpy as np
diff --git a/GPy/util/univariate_Gaussian.py b/GPy/util/univariate_Gaussian.py
index 97d912c2..b1d20a9e 100644
--- a/GPy/util/univariate_Gaussian.py
+++ b/GPy/util/univariate_Gaussian.py
@@ -23,3 +23,164 @@ def inv_std_norm_cdf(x):
inv_erf = np.sign(z) * np.sqrt( np.sqrt(b**2 - ln1z2/a) - b )
return np.sqrt(2) * inv_erf
+def logPdfNormal(z):
+ """
+ Robust implementations of log pdf of a standard normal.
+
+ @see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]]
+ in C from Matthias Seeger.
+ """
+ return -0.5 * (M_LN2PI + z * z)
+
+def cdfNormal(z):
+ """
+ Robust implementations of cdf of a standard normal.
+
+ @see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]]
+ in C from Matthias Seeger.
+ */
+ """
+ if (abs(z) < ERF_CODY_LIMIT1):
+ # Phi(z) approx (1+y R_3(y^2))/2, y=z/sqrt(2)
+ return 0.5 * (1.0 + (z / M_SQRT2) * _erfRationalHelperR3(0.5 * z * z))
+ elif (z < 0.0):
+ # Phi(z) approx N(z)Q(-z)/(-z), z<0
+ return np.exp(logPdfNormal(z)) * _erfRationalHelper(-z) / (-z)
+ else:
+ return 1.0 - np.exp(logPdfNormal(z)) * _erfRationalHelper(z) / z
+
+
+
+def logCdfNormal(z):
+ """
+ Robust implementations of log cdf of a standard normal.
+
+ @see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]]
+ in C from Matthias Seeger.
+ """
+ if (abs(z) < ERF_CODY_LIMIT1):
+ # Phi(z) approx (1+y R_3(y^2))/2, y=z/sqrt(2)
+ return np.log1p((z / M_SQRT2) * _erfRationalHelperR3(0.5 * z * z)) - M_LN2
+ elif (z < 0.0):
+ # Phi(z) approx N(z)Q(-z)/(-z), z<0
+ return logPdfNormal(z) - np.log(-z) + np.log(_erfRationalHelper(-z))
+ else:
+ return np.log1p(-(np.exp(logPdfNormal(z))) * _erfRationalHelper(z) / z)
+
+
+
+def derivLogCdfNormal(z):
+ """
+ Robust implementations of derivative of the log cdf of a standard normal.
+
+ @see [[https://github.com/mseeger/apbsint/blob/master/src/eptools/potentials/SpecfunServices.h original implementation]]
+ in C from Matthias Seeger.
+ """
+ if (abs(z) < ERF_CODY_LIMIT1):
+ # Phi(z) approx (1 + y R_3(y^2))/2, y = z/sqrt(2)
+ return 2.0 * np.exp(logPdfNormal(z)) / (1.0 + (z / M_SQRT2) * _erfRationalHelperR3(0.5 * z * z))
+ elif (z < 0.0):
+ # Phi(z) approx N(z) Q(-z)/(-z), z<0
+ return -z / _erfRationalHelper(-z)
+ else:
+ t = np.exp(logPdfNormal(z))
+ return t / (1.0 - t * _erfRationalHelper(z) / z)
+
+
+def _erfRationalHelper(x):
+ assert x > 0.0, "Arg of erfRationalHelper should be >0.0; was {}".format(x)
+
+ if (x >= ERF_CODY_LIMIT2):
+ """
+ x/sqrt(2) >= 4
+
+ Q(x) = 1 + sqrt(pi) y R_1(y),
+ R_1(y) = poly(p_j,y) / poly(q_j,y), where y = 2/(x*x)
+
+ Ordering of arrays: 4,3,2,1,0,5 (only for numerator p_j; q_5=1)
+ ATTENTION: The p_j are negative of the entries here
+ p (see P1_ERF)
+ q (see Q1_ERF)
+ """
+ y = 2.0 / (x * x)
+
+ res = y * P1_ERF[5]
+ den = y
+ i = 0
+
+ while (i <= 3):
+ res = (res + P1_ERF[i]) * y
+ den = (den + Q1_ERF[i]) * y
+ i += 1
+
+ # Minus, because p(j) values have to be negated
+ return 1.0 - M_SQRTPI * y * (res + P1_ERF[4]) / (den + Q1_ERF[4])
+ else:
+ """
+ x/sqrt(2) < 4, x/sqrt(2) >= 0.469
+
+ Q(x) = sqrt(pi) y R_2(y),
+ R_2(y) = poly(p_j,y) / poly(q_j,y), y = x/sqrt(2)
+
+ Ordering of arrays: 7,6,5,4,3,2,1,0,8 (only p_8; q_8=1)
+ p (see P2_ERF)
+ q (see Q2_ERF
+ """
+ y = x / M_SQRT2
+ res = y * P2_ERF[8]
+ den = y
+ i = 0
+
+ while (i <= 6):
+ res = (res + P2_ERF[i]) * y
+ den = (den + Q2_ERF[i]) * y
+ i += 1
+
+ return M_SQRTPI * y * (res + P2_ERF[7]) / (den + Q2_ERF[7])
+
+def _erfRationalHelperR3(y):
+ assert y >= 0.0, "Arg of erfRationalHelperR3 should be >=0.0; was {}".format(y)
+
+ nom = y * P3_ERF[4]
+ den = y
+ i = 0
+ while (i <= 2):
+ nom = (nom + P3_ERF[i]) * y
+ den = (den + Q3_ERF[i]) * y
+ i += 1
+ return (nom + P3_ERF[3]) / (den + Q3_ERF[3])
+
+ERF_CODY_LIMIT1 = 0.6629
+ERF_CODY_LIMIT2 = 5.6569
+M_LN2PI = 1.83787706640934533908193770913
+M_LN2 = 0.69314718055994530941723212146
+M_SQRTPI = 1.77245385090551602729816748334
+M_SQRT2 = 1.41421356237309504880168872421
+
+#weights for the erfHelpers (defined here to avoid redefinitions at every call)
+P1_ERF = [
+3.05326634961232344e-1, 3.60344899949804439e-1,
+1.25781726111229246e-1, 1.60837851487422766e-2,
+6.58749161529837803e-4, 1.63153871373020978e-2]
+Q1_ERF = [
+2.56852019228982242e+0, 1.87295284992346047e+0,
+5.27905102951428412e-1, 6.05183413124413191e-2,
+2.33520497626869185e-3]
+P2_ERF = [
+5.64188496988670089e-1, 8.88314979438837594e+0,
+6.61191906371416295e+1, 2.98635138197400131e+2,
+8.81952221241769090e+2, 1.71204761263407058e+3,
+2.05107837782607147e+3, 1.23033935479799725e+3,
+2.15311535474403846e-8]
+Q2_ERF = [
+1.57449261107098347e+1, 1.17693950891312499e+2,
+5.37181101862009858e+2, 1.62138957456669019e+3,
+3.29079923573345963e+3, 4.36261909014324716e+3,
+3.43936767414372164e+3, 1.23033935480374942e+3]
+P3_ERF = [
+3.16112374387056560e+0, 1.13864154151050156e+2,
+3.77485237685302021e+2, 3.20937758913846947e+3,
+1.85777706184603153e-1]
+Q3_ERF = [
+2.36012909523441209e+1, 2.44024637934444173e+2,
+1.28261652607737228e+3, 2.84423683343917062e+3]
diff --git a/MANIFEST.in b/MANIFEST.in
index 8e665256..cf220f31 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -16,6 +16,9 @@ recursive-include GPy *.c
recursive-include GPy *.h
recursive-include GPy *.pyx
+# LICENSE
+include LICENSE.txt
+
# Testing
#include GPy/testing/baseline/*.png
#include GPy/testing/pickle_test.pickle
diff --git a/README.md b/README.md
index 5a771e1b..ffbf6a34 100644
--- a/README.md
+++ b/README.md
@@ -76,7 +76,7 @@ If that is the case, it is best to clean the repo and reinstall.
[
](http://www.apple.com/osx/)
[
](https://en.wikipedia.org/wiki/List_of_Linux_distributions)
-Python 2.7, 3.4 and higher
+Python 2.7, 3.5 and higher
## Citation
diff --git a/appveyor.yml b/appveyor.yml
index 4ffda8f9..70b694de 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -3,12 +3,14 @@ environment:
secure: 8/ZjXFwtd1S7ixd7PJOpptupKKEDhm2da/q3unabJ00=
COVERALLS_REPO_TOKEN:
secure: d3Luic/ESkGaWnZrvWZTKrzO+xaVwJWaRCEP0F+K/9DQGPSRZsJ/Du5g3s4XF+tS
- gpy_version: 1.6.1
+ gpy_version: 1.7.2
matrix:
- PYTHON_VERSION: 2.7
MINICONDA: C:\Miniconda-x64
- PYTHON_VERSION: 3.5
MINICONDA: C:\Miniconda35-x64
+ - PYTHON_VERSION: 3.6
+ MINICONDA: C:\Miniconda36-x64
#configuration:
# - Debug
@@ -62,19 +64,19 @@ deploy_script:
- echo test >> %USERPROFILE%\\.pypirc
- echo[
- echo [pypi] >> %USERPROFILE%\\.pypirc
-- echo username:maxz >> %USERPROFILE%\\.pypirc
-- echo password:%pip_access% >> %USERPROFILE%\\.pypirc
+- echo username = maxz >> %USERPROFILE%\\.pypirc
+- echo password = %pip_access% >> %USERPROFILE%\\.pypirc
- echo[
- echo [test] >> %USERPROFILE%\\.pypirc
-- echo repository:https://test.pypi.org/legacy/ >> %USERPROFILE%\\.pypirc
-- echo username:maxz >> %USERPROFILE%\\.pypirc
-- echo password:%pip_access% >> %USERPROFILE%\\.pypirc
+- echo repository = https://testpypi.python.org/pypi >> %USERPROFILE%\\.pypirc
+- echo username = maxz >> %USERPROFILE%\\.pypirc
+- echo password = %pip_access% >> %USERPROFILE%\\.pypirc
- ps: >-
if ($env:APPVEYOR_REPO_BRANCH -eq 'devel') {
- twine upload --skip-existing -r test dist/*
+ twine upload --skip-existing -r test "dist/*"
}
elseif ($env:APPVEYOR_REPO_BRANCH -eq 'deploy') {
- twine upload --skip-existing dist/*
+ twine upload --skip-existing "dist/*"
}
else {
echo not deploying on other branches
diff --git a/setup.cfg b/setup.cfg
index 514f36eb..2bf0a993 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 1.6.1
+current_version = 1.7.2
tag = True
commit = True
diff --git a/setup.py b/setup.py
index ec18c338..318545b5 100644
--- a/setup.py
+++ b/setup.py
@@ -169,9 +169,14 @@ setup(name = 'GPy',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3.3',
- 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Framework :: IPython',
+ 'Intended Audience :: Science/Research',
+ 'Intended Audience :: Developers',
+ 'Topic :: Software Development',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+
]
)