Merge branch 'devel' into maintenance-support-numpy2

This commit is contained in:
Martin Bubel 2025-06-19 12:18:51 +02:00 committed by GitHub
commit 5def1c7110
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 14 additions and 14 deletions

View file

@ -39,7 +39,7 @@ jobs:
- name: Install test dependencies
run: |
pip install matplotlib
pip install "pytest<8"
pip install 'pytest<8'
- name: pytest
run: |
@ -71,7 +71,7 @@ jobs:
- name: Install test dependencies
run: |
pip install matplotlib
pip install "pytest<8"
pip install 'pytest<8'
- name: pytest
run: |
@ -103,7 +103,7 @@ jobs:
- name: Install test dependencies
run: |
pip install matplotlib
pip install "pytest<8"
pip install 'pytest<8'
- name: pytest
run: |

View file

@ -38,7 +38,7 @@ class SparseGP_MPI(SparseGP):
mean_function=None, inference_method=None, name='sparse gp',
Y_metadata=None, mpi_comm=None, normalizer=False):
self._IN_OPTIMIZATION_ = False
if mpi_comm != None:
if mpi_comm is not None:
if inference_method is None:
inference_method = VarDTC_minibatch(mpi_comm=mpi_comm)
else:
@ -52,7 +52,7 @@ class SparseGP_MPI(SparseGP):
self.mpi_comm = mpi_comm
# Manage the data (Y) division
if mpi_comm != None:
if mpi_comm is not None:
from ..util.parallel import divide_data
N_start, N_end, N_list = divide_data(Y.shape[0], mpi_comm.rank, mpi_comm.size)
self.N_range = (N_start, N_end)
@ -65,7 +65,7 @@ class SparseGP_MPI(SparseGP):
def __getstate__(self):
dc = super(SparseGP_MPI, self).__getstate__()
dc['mpi_comm'] = None
if self.mpi_comm != None:
if self.mpi_comm is not None:
del dc['N_range']
del dc['N_list']
del dc['Y_local']
@ -81,7 +81,7 @@ class SparseGP_MPI(SparseGP):
@SparseGP.optimizer_array.setter
def optimizer_array(self, p):
if self.mpi_comm != None:
if self.mpi_comm is not None:
if self._IN_OPTIMIZATION_ and self.mpi_comm.rank==0:
self.mpi_comm.Bcast(np.int32(1),root=0)
self.mpi_comm.Bcast(p, root=0)

View file

@ -118,7 +118,7 @@ class VarDTC_minibatch(LatentFunctionInference):
if not het_noise:
YRY_full = trYYT*beta
if self.mpi_comm != None:
if self.mpi_comm is not None:
from mpi4py import MPI
psi0_all = np.array(psi0_full)
psi1Y_all = psi1Y_full.copy()
@ -142,7 +142,7 @@ class VarDTC_minibatch(LatentFunctionInference):
num_data, output_dim = Y.shape
input_dim = Z.shape[0]
if self.mpi_comm != None:
if self.mpi_comm is not None:
from mpi4py import MPI
num_data_all = np.array(num_data,dtype=np.int32)
self.mpi_comm.Allreduce([np.int32(num_data), MPI.INT], [num_data_all, MPI.INT])
@ -384,7 +384,7 @@ def update_gradients(model, mpi_comm=None):
dL_dthetaL += grad_dict['dL_dthetaL']
# Gather the gradients from multiple MPI nodes
if mpi_comm != None:
if mpi_comm is not None:
from mpi4py import MPI
if het_noise:
raise "het_noise not implemented!"
@ -407,7 +407,7 @@ def update_gradients(model, mpi_comm=None):
# update for the KL divergence
model.variational_prior.update_gradients_KL(X)
if mpi_comm != None:
if mpi_comm is not None:
from mpi4py import MPI
KL_div_all = np.array(KL_div)
mpi_comm.Allreduce([np.float64(KL_div), MPI.DOUBLE], [KL_div_all, MPI.DOUBLE])
@ -467,7 +467,7 @@ def update_gradients_sparsegp(model, mpi_comm=None):
dL_dthetaL += grad_dict['dL_dthetaL']
# Gather the gradients from multiple MPI nodes
if mpi_comm != None:
if mpi_comm is not None:
from mpi4py import MPI
if het_noise:
raise "het_noise not implemented!"

View file

@ -183,7 +183,7 @@ class SSMRD(Model):
@Model.optimizer_array.setter
def optimizer_array(self, p):
if self.mpi_comm != None:
if self.mpi_comm is not None:
if self._IN_OPTIMIZATION_ and self.mpi_comm.rank == 0:
self.mpi_comm.Bcast(np.int32(1), root=0)
self.mpi_comm.Bcast(p, root=0)

View file

@ -167,7 +167,7 @@ class lvm(matplotlib_show):
def show_sensitivities(self):
# A click in the bar chart axis for selection a dimension.
if self.sense_axes != None:
if self.sense_axes is not None:
self.sense_axes.cla()
self.sense_axes.bar(np.arange(self.model.input_dim), self.model.input_sensitivity(), color='b')