From d911766624eb7bc64733471a374674d090b30d43 Mon Sep 17 00:00:00 2001 From: Zhenwen Dai Date: Wed, 14 May 2014 15:07:39 +0100 Subject: [PATCH] [mpi] add mpi into ssgplvm --- .../var_dtc_parallel.py | 144 +++++++--- GPy/models/ss_gplvm.py | 268 ++---------------- GPy/plotting/matplot_dep/variational_plots.py | 2 +- GPy/util/__init__.py | 1 + GPy/util/caching.py | 3 +- GPy/util/mpi.py | 35 +++ 6 files changed, 165 insertions(+), 288 deletions(-) create mode 100644 GPy/util/mpi.py diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py index f2a3c3fd..ef298a9b 100644 --- a/GPy/inference/latent_function_inference/var_dtc_parallel.py +++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py @@ -9,6 +9,11 @@ import numpy as np from ...util.misc import param_to_array log_2_pi = np.log(2*np.pi) +try: + from mpi4py import MPI +except: + pass + class VarDTC_minibatch(object): """ An object for inference when the likelihood is Gaussian, but we want to do sparse inference. @@ -20,9 +25,10 @@ class VarDTC_minibatch(object): """ const_jitter = 1e-6 - def __init__(self, batchsize, limit=1): + def __init__(self, batchsize, limit=1, mpi_comm=None): self.batchsize = batchsize + self.mpi_comm = mpi_comm # Cache functions from ...util.caching import Cacher @@ -51,42 +57,23 @@ class VarDTC_minibatch(object): else: return jitchol(tdot(Y)) - def inference_likelihood(self, kern, X, Z, likelihood, Y): - """ - The first phase of inference: - Compute: log-likelihood, dL_dKmm - - Cached intermediate results: Kmm, KmmInv, - """ - + def gatherPsiStat(self, kern, X, Z, Y, beta, uncertain_inputs, het_noise): + num_inducing = Z.shape[0] num_data, output_dim = Y.shape - if isinstance(X, VariationalPosterior): - uncertain_inputs = True - else: - uncertain_inputs = False - - #see whether we've got a different noise variance for each datum - beta = 1./np.fmax(likelihood.variance, 1e-6) - het_noise = beta.size > 1 - if het_noise: - self.batchsize = 1 - # VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency! - #self.YYTfactor = beta*self.get_YYTfactor(Y) - YYT_factor = Y trYYT = self.get_trYYT(Y) - + psi2_full = np.zeros((num_inducing,num_inducing)) psi1Y_full = np.zeros((output_dim,num_inducing)) # DxM - psi0_full = 0 - YRY_full = 0 + psi0_full = 0. + YRY_full = 0. for n_start in xrange(0,num_data,self.batchsize): n_end = min(self.batchsize+n_start, num_data) - Y_slice = YYT_factor[n_start:n_end] + Y_slice = Y[n_start:n_end] X_slice = X[n_start:n_end] if uncertain_inputs: @@ -123,7 +110,47 @@ class VarDTC_minibatch(object): psi1Y_full *= beta psi2_full *= beta YRY_full = trYYT*beta + + if self.mpi_comm != None: + psi0_all = np.array(psi0_full) + psi1Y_all = psi1Y_full.copy() + psi2_all = psi2_full.copy() + YRY_all = np.array(YRY_full) + self.mpi_comm.Allreduce([psi0_full, MPI.DOUBLE], [psi0_all, MPI.DOUBLE]) + self.mpi_comm.Allreduce([psi1Y_full, MPI.DOUBLE], [psi1Y_all, MPI.DOUBLE]) + self.mpi_comm.Allreduce([psi2_full, MPI.DOUBLE], [psi2_all, MPI.DOUBLE]) + self.mpi_comm.Allreduce([YRY_full, MPI.DOUBLE], [YRY_all, MPI.DOUBLE]) + return psi0_all, psi1Y_all, psi2_all, YRY_all + return psi0_full, psi1Y_full, psi2_full, YRY_full + + def inference_likelihood(self, kern, X, Z, likelihood, Y): + """ + The first phase of inference: + Compute: log-likelihood, dL_dKmm + + Cached intermediate results: Kmm, KmmInv, + """ + + num_data, output_dim = Y.shape + if self.mpi_comm != None: + num_data_all = np.array(num_data,dtype=np.int32) + self.mpi_comm.Allreduce([np.int32(num_data), MPI.INT], [num_data_all, MPI.INT]) + num_data = num_data_all + + if isinstance(X, VariationalPosterior): + uncertain_inputs = True + else: + uncertain_inputs = False + + #see whether we've got a different noise variance for each datum + beta = 1./np.fmax(likelihood.variance, 1e-6) + het_noise = beta.size > 1 + if het_noise: + self.batchsize = 1 + + psi0_full, psi1Y_full, psi2_full, YRY_full = self.gatherPsiStat(kern, X, Z, Y, beta, uncertain_inputs, het_noise) + #====================================================================== # Compute Common Components #====================================================================== @@ -212,7 +239,6 @@ class VarDTC_minibatch(object): isEnd = False self.batch_pos = n_end - num_slice = n_end-n_start Y_slice = YYT_factor[n_start:n_end] X_slice = X[n_start:n_end] @@ -283,28 +309,32 @@ class VarDTC_minibatch(object): return isEnd, (n_start,n_end), grad_dict -def update_gradients(model): - model._log_marginal_likelihood, dL_dKmm, model.posterior = model.inference_method.inference_likelihood(model.kern, model.X, model.Z, model.likelihood, model.Y) +def update_gradients(model, mpi_comm=None): + if mpi_comm == None: + Y = model.Y + X = model.X + else: + Y = model.Y_local + X = model.X_local + + model._log_marginal_likelihood, dL_dKmm, model.posterior = model.inference_method.inference_likelihood(model.kern, X, model.Z, model.likelihood, Y) het_noise = model.likelihood.variance.size > 1 if het_noise: dL_dthetaL = np.empty((model.Y.shape[0],)) else: - dL_dthetaL = 0 - - #gradients w.r.t. kernel - model.kern.update_gradients_full(dL_dKmm, model.Z, None) + dL_dthetaL = np.float64(0.) + kern_grad = model.kern.gradient.copy() - - #gradients w.r.t. Z - model.Z.gradient = model.kern.gradients_X(dL_dKmm, model.Z) + kern_grad[:] = 0. + model.Z.gradient = 0. isEnd = False while not isEnd: - isEnd, n_range, grad_dict = model.inference_method.inference_minibatch(model.kern, model.X, model.Z, model.likelihood, model.Y) + isEnd, n_range, grad_dict = model.inference_method.inference_minibatch(model.kern, X, model.Z, model.likelihood, Y) if isinstance(model.X, VariationalPosterior): - X_slice = model.X[n_range[0]:n_range[1]] + X_slice = model.X[model.Y_range[0]+n_range[0]:model.Y_range[0]+n_range[1]] #gradients w.r.t. kernel model.kern.update_gradients_expectations(variational_posterior=X_slice, Z=model.Z, dL_dpsi0=grad_dict['dL_dpsi0'], dL_dpsi1=grad_dict['dL_dpsi1'], dL_dpsi2=grad_dict['dL_dpsi2']) @@ -322,14 +352,36 @@ def update_gradients(model): dL_dthetaL[n_range[0]:n_range[1]] = grad_dict['dL_dthetaL'] else: dL_dthetaL += grad_dict['dL_dthetaL'] - - # Set the gradients w.r.t. kernel - model.kern.gradient = kern_grad - - # Update Log-likelihood - model._log_marginal_likelihood -= model.variational_prior.KL_divergence(model.X) - # update for the KL divergence - model.variational_prior.update_gradients_KL(model.X) + # Gather the gradients from multiple MPI nodes + if mpi_comm != None: + if het_noise: + assert False, "Not implemented!" + kern_grad_all = kern_grad.copy() + Z_grad_all = model.Z.gradient.copy() + mpi_comm.Allreduce([kern_grad, MPI.DOUBLE], [kern_grad_all, MPI.DOUBLE]) + mpi_comm.Allreduce([model.Z.gradient, MPI.DOUBLE], [Z_grad_all, MPI.DOUBLE]) + kern_grad = kern_grad_all + model.Z.gradient = Z_grad_all + + #gradients w.r.t. kernel + model.kern.update_gradients_full(dL_dKmm, model.Z, None) + model.kern.gradient += kern_grad + + #gradients w.r.t. Z + model.Z.gradient += model.kern.gradients_X(dL_dKmm, model.Z) + + # Update Log-likelihood + KL_div = model.variational_prior.KL_divergence(X) + # update for the KL divergence + model.variational_prior.update_gradients_KL(X) + + if mpi_comm != None: + KL_div_all = np.array(KL_div) + mpi_comm.Allreduce([np.float64(KL_div), MPI.DOUBLE], [KL_div_all, MPI.DOUBLE]) + KL_div = KL_div_all + [mpi_comm.Allgatherv([pp.copy(), MPI.DOUBLE], [pa, (model.Y_list*pa.shape[-1], None), MPI.DOUBLE]) for pp,pa in zip(model.get_X_gradients(X),model.get_X_gradients(model.X))] + model._log_marginal_likelihood -= KL_div + # dL_dthetaL model.likelihood.update_gradients(dL_dthetaL) diff --git a/GPy/models/ss_gplvm.py b/GPy/models/ss_gplvm.py index 7eff3f89..cc57b191 100644 --- a/GPy/models/ss_gplvm.py +++ b/GPy/models/ss_gplvm.py @@ -28,8 +28,10 @@ class SSGPLVM(SparseGP): """ def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10, - Z=None, kernel=None, inference_method=None, likelihood=None, name='Spike-and-Slab GPLVM', group_spike=False, **kwargs): + Z=None, kernel=None, inference_method=None, likelihood=None, name='Spike-and-Slab GPLVM', group_spike=False, mpi_comm=None, **kwargs): + self.mpi_comm = mpi_comm + if X == None: from ..util.initialization import initialize_latent X, fracs = initialize_latent(init, input_dim, Y) @@ -52,19 +54,25 @@ class SSGPLVM(SparseGP): Z = np.random.permutation(X.copy())[:num_inducing] assert Z.shape[1] == X.shape[1] + pi = np.empty((input_dim)) + pi[:] = 0.5 + + if mpi_comm != None: + mpi_comm.Bcast(X, root=0) + mpi_comm.Bcast(fracs, root=0) + mpi_comm.Bcast(X_variance, root=0) + mpi_comm.Bcast(gamma, root=0) + mpi_comm.Bcast(Z, root=0) + mpi_comm.Bcast(pi, root=0) + if likelihood is None: likelihood = Gaussian() if kernel is None: kernel = kern.RBF(input_dim, lengthscale=fracs, ARD=True) # + kern.white(input_dim) - - pi = np.empty((input_dim)) - pi[:] = 0.5 + self.variational_prior = SpikeAndSlabPrior(pi=pi) # the prior probability of the latent binary variable b - X = np.asfortranarray(X) - X_variance = np.asfortranarray(X_variance) - gamma = np.asfortranarray(gamma) X = SpikeAndSlabPosterior(X, X_variance, gamma) if group_spike: @@ -75,13 +83,25 @@ class SSGPLVM(SparseGP): self.add_parameter(self.X, index=0) self.add_parameter(self.variational_prior) + if mpi_comm != None: + from ..util.mpi import divide_data + Y_start, Y_end, Y_list = divide_data(Y.shape[0], mpi_comm) + self.Y_local = self.Y[Y_start:Y_end] + self.X_local = self.X[Y_start:Y_end] + self.Y_range = (Y_start, Y_end) + self.Y_list = np.array(Y_list) + def set_X_gradients(self, X, X_grad): """Set the gradients of the posterior distribution of X in its specific form.""" X.mean.gradient, X.variance.gradient, X.binary_prob.gradient = X_grad + + def get_X_gradients(self, X): + """Get the gradients of the posterior distribution of X in its specific form.""" + return X.mean.gradient, X.variance.gradient, X.binary_prob.gradient def parameters_changed(self): if isinstance(self.inference_method, VarDTC_GPU) or isinstance(self.inference_method, VarDTC_minibatch): - update_gradients(self) + update_gradients(self, mpi_comm=self.mpi_comm) return super(SSGPLVM, self).parameters_changed() @@ -105,235 +125,3 @@ class SSGPLVM(SparseGP): return dim_reduction_plots.plot_latent(self, plot_inducing=plot_inducing, *args, **kwargs) - def do_test_latents(self, Y): - """ - Compute the latent representation for a set of new points Y - - Notes: - This will only work with a univariate Gaussian likelihood (for now) - """ - assert not self.likelihood.is_heteroscedastic - N_test = Y.shape[0] - input_dim = self.Z.shape[1] - means = np.zeros((N_test, input_dim)) - covars = np.zeros((N_test, input_dim)) - - dpsi0 = -0.5 * self.output_dim * self.likelihood.precision - dpsi2 = self.dL_dpsi2[0][None, :, :] # TODO: this may change if we ignore het. likelihoods - V = self.likelihood.precision * Y - - #compute CPsi1V - if self.Cpsi1V is None: - psi1V = np.dot(self.psi1.T, self.likelihood.V) - tmp, _ = linalg.dtrtrs(self._Lm, np.asfortranarray(psi1V), lower=1, trans=0) - tmp, _ = linalg.dpotrs(self.LB, tmp, lower=1) - self.Cpsi1V, _ = linalg.dtrtrs(self._Lm, tmp, lower=1, trans=1) - - dpsi1 = np.dot(self.Cpsi1V, V.T) - - start = np.zeros(self.input_dim * 2) - - for n, dpsi1_n in enumerate(dpsi1.T[:, :, None]): - args = (self.kern, self.Z, dpsi0, dpsi1_n.T, dpsi2) - xopt, fopt, neval, status = SCG(f=latent_cost, gradf=latent_grad, x=start, optargs=args, display=False) - - mu, log_S = xopt.reshape(2, 1, -1) - means[n] = mu[0].copy() - covars[n] = np.exp(log_S[0]).copy() - - return means, covars - - def dmu_dX(self, Xnew): - """ - Calculate the gradient of the prediction at Xnew w.r.t Xnew. - """ - dmu_dX = np.zeros_like(Xnew) - for i in range(self.Z.shape[0]): - dmu_dX += self.kern.dK_dX(self.Cpsi1Vf[i:i + 1, :], Xnew, self.Z[i:i + 1, :]) - return dmu_dX - - def dmu_dXnew(self, Xnew): - """ - Individual gradient of prediction at Xnew w.r.t. each sample in Xnew - """ - dK_dX = np.zeros((Xnew.shape[0], self.num_inducing)) - ones = np.ones((1, 1)) - for i in range(self.Z.shape[0]): - dK_dX[:, i] = self.kern.dK_dX(ones, Xnew, self.Z[i:i + 1, :]).sum(-1) - return np.dot(dK_dX, self.Cpsi1Vf) - - def plot_steepest_gradient_map(self, fignum=None, ax=None, which_indices=None, labels=None, data_labels=None, data_marker='o', data_s=40, resolution=20, aspect='auto', updates=False, ** kwargs): - input_1, input_2 = significant_dims = most_significant_input_dimensions(self, which_indices) - - X = np.zeros((resolution ** 2, self.input_dim)) - indices = np.r_[:X.shape[0]] - if labels is None: - labels = range(self.output_dim) - - def plot_function(x): - X[:, significant_dims] = x - dmu_dX = self.dmu_dXnew(X) - argmax = np.argmax(dmu_dX, 1) - return dmu_dX[indices, argmax], np.array(labels)[argmax] - - if ax is None: - fig = pyplot.figure(num=fignum) - ax = fig.add_subplot(111) - - if data_labels is None: - data_labels = np.ones(self.num_data) - ulabels = [] - for lab in data_labels: - if not lab in ulabels: - ulabels.append(lab) - marker = itertools.cycle(list(data_marker)) - from GPy.util import Tango - for i, ul in enumerate(ulabels): - if type(ul) is np.string_: - this_label = ul - elif type(ul) is np.int64: - this_label = 'class %i' % ul - else: - this_label = 'class %i' % i - m = marker.next() - index = np.nonzero(data_labels == ul)[0] - x = self.X[index, input_1] - y = self.X[index, input_2] - ax.scatter(x, y, marker=m, s=data_s, color=Tango.nextMedium(), label=this_label) - - ax.set_xlabel('latent dimension %i' % input_1) - ax.set_ylabel('latent dimension %i' % input_2) - - from matplotlib.cm import get_cmap - from GPy.util.latent_space_visualizations.controllers.imshow_controller import ImAnnotateController - if not 'cmap' in kwargs.keys(): - kwargs.update(cmap=get_cmap('jet')) - controller = ImAnnotateController(ax, - plot_function, - tuple(self.X.min(0)[:, significant_dims]) + tuple(self.X.max(0)[:, significant_dims]), - resolution=resolution, - aspect=aspect, - **kwargs) - ax.legend() - ax.figure.tight_layout() - if updates: - pyplot.show() - clear = raw_input('Enter to continue') - if clear.lower() in 'yes' or clear == '': - controller.deactivate() - return controller.view - - def plot_X_1d(self, fignum=None, ax=None, colors=None): - """ - Plot latent space X in 1D: - - - if fig is given, create input_dim subplots in fig and plot in these - - if ax is given plot input_dim 1D latent space plots of X into each `axis` - - if neither fig nor ax is given create a figure with fignum and plot in there - - colors: - colors of different latent space dimensions input_dim - - """ - import pylab - if ax is None: - fig = pylab.figure(num=fignum, figsize=(8, min(12, (2 * self.X.shape[1])))) - if colors is None: - colors = pylab.gca()._get_lines.color_cycle - pylab.clf() - else: - colors = iter(colors) - plots = [] - x = np.arange(self.X.shape[0]) - for i in range(self.X.shape[1]): - if ax is None: - a = fig.add_subplot(self.X.shape[1], 1, i + 1) - elif isinstance(ax, (tuple, list)): - a = ax[i] - else: - raise ValueError("Need one ax per latent dimnesion input_dim") - a.plot(self.X, c='k', alpha=.3) - plots.extend(a.plot(x, self.X.T[i], c=colors.next(), label=r"$\mathbf{{X_{{{}}}}}$".format(i))) - a.fill_between(x, - self.X.T[i] - 2 * np.sqrt(self.X_variance.T[i]), - self.X.T[i] + 2 * np.sqrt(self.X_variance.T[i]), - facecolor=plots[-1].get_color(), - alpha=.3) - a.legend(borderaxespad=0.) - a.set_xlim(x.min(), x.max()) - if i < self.X.shape[1] - 1: - a.set_xticklabels('') - pylab.draw() - if ax is None: - fig.tight_layout(h_pad=.01) # , rect=(0, 0, 1, .95)) - return fig - - def getstate(self): - """ - Get the current state of the class, - here just all the indices, rest can get recomputed - """ - return SparseGP._getstate(self) + [self.init] - - def setstate(self, state): - self._const_jitter = None - self.init = state.pop() - SparseGP._setstate(self, state) - - -def latent_cost_and_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): - """ - objective function for fitting the latent variables for test points - (negative log-likelihood: should be minimised!) - """ - mu, log_S = mu_S.reshape(2, 1, -1) - S = np.exp(log_S) - - psi0 = kern.psi0(Z, mu, S) - psi1 = kern.psi1(Z, mu, S) - psi2 = kern.psi2(Z, mu, S) - - lik = dL_dpsi0 * psi0 + np.dot(dL_dpsi1.flatten(), psi1.flatten()) + np.dot(dL_dpsi2.flatten(), psi2.flatten()) - 0.5 * np.sum(np.square(mu) + S) + 0.5 * np.sum(log_S) - - mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0, Z, mu, S) - mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1, Z, mu, S) - mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2, Z, mu, S) - - dmu = mu0 + mu1 + mu2 - mu - # dS = S0 + S1 + S2 -0.5 + .5/S - dlnS = S * (S0 + S1 + S2 - 0.5) + .5 - return -lik, -np.hstack((dmu.flatten(), dlnS.flatten())) - -def latent_cost(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): - """ - objective function for fitting the latent variables (negative log-likelihood: should be minimised!) - This is the same as latent_cost_and_grad but only for the objective - """ - mu, log_S = mu_S.reshape(2, 1, -1) - S = np.exp(log_S) - - psi0 = kern.psi0(Z, mu, S) - psi1 = kern.psi1(Z, mu, S) - psi2 = kern.psi2(Z, mu, S) - - lik = dL_dpsi0 * psi0 + np.dot(dL_dpsi1.flatten(), psi1.flatten()) + np.dot(dL_dpsi2.flatten(), psi2.flatten()) - 0.5 * np.sum(np.square(mu) + S) + 0.5 * np.sum(log_S) - return -float(lik) - -def latent_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2): - """ - This is the same as latent_cost_and_grad but only for the grad - """ - mu, log_S = mu_S.reshape(2, 1, -1) - S = np.exp(log_S) - - mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0, Z, mu, S) - mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1, Z, mu, S) - mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2, Z, mu, S) - - dmu = mu0 + mu1 + mu2 - mu - # dS = S0 + S1 + S2 -0.5 + .5/S - dlnS = S * (S0 + S1 + S2 - 0.5) + .5 - - return -np.hstack((dmu.flatten(), dlnS.flatten())) - - diff --git a/GPy/plotting/matplot_dep/variational_plots.py b/GPy/plotting/matplot_dep/variational_plots.py index 27cb4051..e97f001b 100644 --- a/GPy/plotting/matplot_dep/variational_plots.py +++ b/GPy/plotting/matplot_dep/variational_plots.py @@ -93,7 +93,7 @@ def plot_SpikeSlab(parameterized, fignum=None, ax=None, colors=None, side_by_sid a.set_xticklabels('') # binary prob plot a = fig.add_subplot(*sub2) - a.bar(x,gamma[:,i],bottom=0.,linewidth=0,align='center') + a.bar(x,gamma[:,i],bottom=0.,linewidth=0,width=1.0,align='center') a.set_xlim(x.min(), x.max()) a.set_ylim([0.,1.]) pb.draw() diff --git a/GPy/util/__init__.py b/GPy/util/__init__.py index 77312278..3a9fbdb8 100644 --- a/GPy/util/__init__.py +++ b/GPy/util/__init__.py @@ -16,6 +16,7 @@ import diag import initialization import multioutput import linalg_gpu +import mpi try: import sympy diff --git a/GPy/util/caching.py b/GPy/util/caching.py index 1738cad1..8c26629c 100644 --- a/GPy/util/caching.py +++ b/GPy/util/caching.py @@ -102,7 +102,8 @@ class Cacher(object): return Cacher(self.operation, self.limit, self.ignore_args, self.force_kwargs) def __getstate__(self, memo=None): - raise NotImplementedError, "Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation)) + return (self.limit) +# raise NotImplementedError, "Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation)) def __setstate__(self, memo=None): raise NotImplementedError, "Trying to pickle Cacher object with function {}, pickling functions not possible.".format(str(self.operation)) diff --git a/GPy/util/mpi.py b/GPy/util/mpi.py new file mode 100644 index 00000000..e3d7d928 --- /dev/null +++ b/GPy/util/mpi.py @@ -0,0 +1,35 @@ +""" +The tools for mpi +""" + +try: + import numpy as np + from mpi4py import MPI + numpy_to_MPI_typemap = { + np.dtype(np.float64) : MPI.DOUBLE, + np.dtype(np.float32) : MPI.FLOAT, + np.dtype(np.int) : MPI.INT, + np.dtype(np.int8) : MPI.CHAR, + np.dtype(np.uint8) : MPI.UNSIGNED_CHAR, + np.dtype(np.int32) : MPI.INT, + np.dtype(np.uint32) : MPI.UNSIGNED_INT, + } +except: + pass + +def divide_data(datanum, comm): + + residue = (datanum)%comm.size + datanum_list = np.empty((comm.size),dtype=np.int32) + for i in xrange(comm.size): + if i