diff --git a/GPy/core/gp.py b/GPy/core/gp.py index 5110e9a5..0ef6e15e 100644 --- a/GPy/core/gp.py +++ b/GPy/core/gp.py @@ -82,7 +82,7 @@ class GP(Model): inference_method = exact_gaussian_inference.ExactGaussianInference() else: inference_method = expectation_propagation.EP() - print "defaulting to ", inference_method, "for latent function inference" + print("defaulting to ", inference_method, "for latent function inference") self.inference_method = inference_method logger.info("adding kernel and likelihood as parameters") @@ -441,7 +441,7 @@ class GP(Model): try: super(GP, self).optimize(optimizer, start, **kwargs) except KeyboardInterrupt: - print "KeyboardInterrupt caught, calling on_optimization_end() to round things up" + print("KeyboardInterrupt caught, calling on_optimization_end() to round things up") self.inference_method.on_optimization_end() raise diff --git a/GPy/core/model.py b/GPy/core/model.py index 8eb34f33..348cebf1 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -82,7 +82,7 @@ class Model(Parameterized): pool.close() # signal that no more data coming in pool.join() # wait for all the tasks to complete except KeyboardInterrupt: - print "Ctrl+c received, terminating and joining pool." + print("Ctrl+c received, terminating and joining pool.") pool.terminate() pool.join() @@ -95,10 +95,10 @@ class Model(Parameterized): self.optimization_runs.append(jobs[i].get()) if verbose: - print("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt)) + print(("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt))) except Exception as e: if robust: - print("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts)) + print(("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts))) else: raise e @@ -237,10 +237,10 @@ class Model(Parameterized): """ if self.is_fixed or self.size == 0: - print 'nothing to optimize' + print('nothing to optimize') if not self.update_model(): - print "updates were off, setting updates on again" + print("updates were off, setting updates on again") self.update_model(True) if start == None: @@ -305,7 +305,7 @@ class Model(Parameterized): transformed_index = (indices - (~self._fixes_).cumsum())[transformed_index[which[0]]] if transformed_index.size == 0: - print "No free parameters to check" + print("No free parameters to check") return # just check the global ratio @@ -342,7 +342,7 @@ class Model(Parameterized): header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))] header_string = map(lambda x: '|'.join(x), [header_string]) separator = '-' * len(header_string[0]) - print '\n'.join([header_string[0], separator]) + print('\n'.join([header_string[0], separator])) if target_param is None: param_index = range(len(x)) transformed_index = param_index @@ -358,7 +358,7 @@ class Model(Parameterized): transformed_index = param_index if param_index.size == 0: - print "No free parameters to check" + print("No free parameters to check") return gradient = self._grads(x).copy() @@ -392,7 +392,7 @@ class Model(Parameterized): ng = '%.6f' % float(numerical_gradient) df = '%1.e' % float(df_ratio) grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5]) - print grad_string + print(grad_string) self.optimizer_array = x return ret diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 5baa81c8..06991ab0 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -558,7 +558,7 @@ class Indexable(Nameable, Updateable): """ if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those - print "WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name) + print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)) index = self._raveled_index() which.add(what, index) return index diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 6bdd8036..1a5ff123 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -325,7 +325,7 @@ class Parameterized(Parameterizable): self._notify_parent_change() self.parameters_changed() except Exception as e: - print "WARNING: caught exception {!s}, trying to continue".format(e) + print("WARNING: caught exception {!s}, trying to continue".format(e)) def copy(self, memo=None): if memo is None: diff --git a/GPy/core/parameterization/ties_and_remappings.py b/GPy/core/parameterization/ties_and_remappings.py index f0bb2d61..bafa8a98 100644 --- a/GPy/core/parameterization/ties_and_remappings.py +++ b/GPy/core/parameterization/ties_and_remappings.py @@ -98,7 +98,7 @@ class Tie(Parameterized): if np.all(self.label_buf[idx]==0): # None of p has been tied before. tie_idx = self._expandTieParam(1) - print tie_idx + print(tie_idx) tie_id = self.label_buf.max()+1 self.label_buf[tie_idx] = tie_id else: @@ -189,14 +189,14 @@ class Tie(Parameterized): b0 = self.label_buf==self.label_buf[self.buf_idx[i]] b = self._highest_parent_.param_array[b0]!=self.tied_param[i] if b.sum()==0: - print 'XXX' + print('XXX') continue elif b.sum()==1: - print '!!!' + print('!!!') val = self._highest_parent_.param_array[b0][b][0] self._highest_parent_.param_array[b0] = val else: - print '@@@' + print('@@@') self._highest_parent_.param_array[b0] = self.tied_param[i] changed = True return changed diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py index 181c16e0..05051c92 100644 --- a/GPy/core/parameterization/transformations.py +++ b/GPy/core/parameterization/transformations.py @@ -72,7 +72,7 @@ class Logexp(Transformation): return np.einsum('i,i->i', df, np.where(f>_lim_val, 1., 1. - np.exp(-f))) def initialize(self, f): if np.any(f < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") return np.abs(f) def __str__(self): return '+ve' @@ -130,7 +130,7 @@ class NormalTheta(Transformation): def initialize(self, f): if np.any(f[self.var_indices] < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") f[self.var_indices] = np.abs(f[self.var_indices]) return f @@ -177,7 +177,7 @@ class NormalNaturalAntti(NormalTheta): def initialize(self, f): if np.any(f[self.var_indices] < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") f[self.var_indices] = np.abs(f[self.var_indices]) return f @@ -220,7 +220,7 @@ class NormalEta(Transformation): def initialize(self, f): if np.any(f[self.var_indices] < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") f[self.var_indices] = np.abs(f[self.var_indices]) return f @@ -360,7 +360,7 @@ class LogexpNeg(Transformation): return np.einsum('i,i->i', df, np.where(f>_lim_val, -1, -1 + np.exp(-f))) def initialize(self, f): if np.any(f < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") return np.abs(f) def __str__(self): return '+ve' @@ -412,7 +412,7 @@ class LogexpClipped(Logexp): return np.einsum('i,i->i', df, gf) # np.where(f < self.lower, 0, gf) def initialize(self, f): if np.any(f < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") return np.abs(f) def __str__(self): return '+ve_c' @@ -428,7 +428,7 @@ class Exponent(Transformation): return np.einsum('i,i->i', df, f) def initialize(self, f): if np.any(f < 0.): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") return np.abs(f) def __str__(self): return '+ve' @@ -486,7 +486,7 @@ class Logistic(Transformation): return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference) def initialize(self, f): if np.any(np.logical_or(f < self.lower, f > self.upper)): - print "Warning: changing parameters to satisfy constraints" + print("Warning: changing parameters to satisfy constraints") #return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f) #FIXME: Max, zeros_like right? return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f) diff --git a/GPy/core/parameterization/updateable.py b/GPy/core/parameterization/updateable.py index 86446fa0..6abf0280 100644 --- a/GPy/core/parameterization/updateable.py +++ b/GPy/core/parameterization/updateable.py @@ -36,7 +36,7 @@ class Updateable(Observable): self.trigger_update() def toggle_update(self): - print "deprecated: toggle_update was renamed to update_toggle for easier access" + print("deprecated: toggle_update was renamed to update_toggle for easier access") self.update_toggle() def update_toggle(self): self.update_model(not self.update_model()) diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py index a9866f48..96e3dbe7 100644 --- a/GPy/core/sparse_gp.py +++ b/GPy/core/sparse_gp.py @@ -49,7 +49,7 @@ class SparseGP(GP): else: #inference_method = ?? raise NotImplementedError, "what to do what to do?" - print "defaulting to ", inference_method, "for latent function inference" + print("defaulting to ", inference_method, "for latent function inference") self.Z = Param('inducing inputs', Z) self.num_inducing = Z.shape[0] diff --git a/GPy/core/sparse_gp_mpi.py b/GPy/core/sparse_gp_mpi.py index ac53d4ac..28de3124 100644 --- a/GPy/core/sparse_gp_mpi.py +++ b/GPy/core/sparse_gp_mpi.py @@ -56,7 +56,7 @@ class SparseGP_MPI(SparseGP): self.N_range = (N_start, N_end) self.N_list = np.array(N_list) self.Y_local = self.Y[N_start:N_end] - print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range) + print('MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range)) mpi_comm.Bcast(self.param_array, root=0) self.update_model(True) diff --git a/GPy/core/verbose_optimization.py b/GPy/core/verbose_optimization.py index 78b6127e..affa7d43 100644 --- a/GPy/core/verbose_optimization.py +++ b/GPy/core/verbose_optimization.py @@ -1,7 +1,7 @@ # Copyright (c) 2012-2014, Max Zwiessele. # Licensed under the BSD 3-clause license (see LICENSE.txt) - +from __future__ import print_function import numpy as np import sys import time @@ -65,8 +65,8 @@ class VerboseOptimization(object): #self.progress.add_class('box-flex1') else: self.exps = exponents(self.fnow, self.current_gradient) - print 'Running {} Code:'.format(self.opt_name) - print ' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters) + print('Running {} Code:'.format(self.opt_name)) + print(' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters)) def __enter__(self): self.start = time.time() @@ -107,11 +107,11 @@ class VerboseOptimization(object): b = np.any(n_exps < self.exps) if a or b: self.p_iter = self.iteration - print '' + print('') if b: self.exps = n_exps - print '\r', - print '{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r', + print('\r', end=' ') + print('{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r', sys.stdout.flush() def print_status(self, me, which=None): @@ -140,6 +140,6 @@ class VerboseOptimization(object): self.print_out() if not self.ipython_notebook: - print - print 'Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start) - print + print() + print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)) + print()