mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 11:32:39 +02:00
Convert print to function for Python 3 compatibility. This breaks compatibility for versions of Python < 2.6
This commit is contained in:
parent
2ca24a88f5
commit
1521b3e260
10 changed files with 37 additions and 37 deletions
|
|
@ -82,7 +82,7 @@ class GP(Model):
|
||||||
inference_method = exact_gaussian_inference.ExactGaussianInference()
|
inference_method = exact_gaussian_inference.ExactGaussianInference()
|
||||||
else:
|
else:
|
||||||
inference_method = expectation_propagation.EP()
|
inference_method = expectation_propagation.EP()
|
||||||
print "defaulting to ", inference_method, "for latent function inference"
|
print("defaulting to ", inference_method, "for latent function inference")
|
||||||
self.inference_method = inference_method
|
self.inference_method = inference_method
|
||||||
|
|
||||||
logger.info("adding kernel and likelihood as parameters")
|
logger.info("adding kernel and likelihood as parameters")
|
||||||
|
|
@ -441,7 +441,7 @@ class GP(Model):
|
||||||
try:
|
try:
|
||||||
super(GP, self).optimize(optimizer, start, **kwargs)
|
super(GP, self).optimize(optimizer, start, **kwargs)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print "KeyboardInterrupt caught, calling on_optimization_end() to round things up"
|
print("KeyboardInterrupt caught, calling on_optimization_end() to round things up")
|
||||||
self.inference_method.on_optimization_end()
|
self.inference_method.on_optimization_end()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -82,7 +82,7 @@ class Model(Parameterized):
|
||||||
pool.close() # signal that no more data coming in
|
pool.close() # signal that no more data coming in
|
||||||
pool.join() # wait for all the tasks to complete
|
pool.join() # wait for all the tasks to complete
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print "Ctrl+c received, terminating and joining pool."
|
print("Ctrl+c received, terminating and joining pool.")
|
||||||
pool.terminate()
|
pool.terminate()
|
||||||
pool.join()
|
pool.join()
|
||||||
|
|
||||||
|
|
@ -95,10 +95,10 @@ class Model(Parameterized):
|
||||||
self.optimization_runs.append(jobs[i].get())
|
self.optimization_runs.append(jobs[i].get())
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt))
|
print(("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt)))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if robust:
|
if robust:
|
||||||
print("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts))
|
print(("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts)))
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
@ -237,10 +237,10 @@ class Model(Parameterized):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self.is_fixed or self.size == 0:
|
if self.is_fixed or self.size == 0:
|
||||||
print 'nothing to optimize'
|
print('nothing to optimize')
|
||||||
|
|
||||||
if not self.update_model():
|
if not self.update_model():
|
||||||
print "updates were off, setting updates on again"
|
print("updates were off, setting updates on again")
|
||||||
self.update_model(True)
|
self.update_model(True)
|
||||||
|
|
||||||
if start == None:
|
if start == None:
|
||||||
|
|
@ -305,7 +305,7 @@ class Model(Parameterized):
|
||||||
transformed_index = (indices - (~self._fixes_).cumsum())[transformed_index[which[0]]]
|
transformed_index = (indices - (~self._fixes_).cumsum())[transformed_index[which[0]]]
|
||||||
|
|
||||||
if transformed_index.size == 0:
|
if transformed_index.size == 0:
|
||||||
print "No free parameters to check"
|
print("No free parameters to check")
|
||||||
return
|
return
|
||||||
|
|
||||||
# just check the global ratio
|
# just check the global ratio
|
||||||
|
|
@ -342,7 +342,7 @@ class Model(Parameterized):
|
||||||
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
|
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
|
||||||
header_string = map(lambda x: '|'.join(x), [header_string])
|
header_string = map(lambda x: '|'.join(x), [header_string])
|
||||||
separator = '-' * len(header_string[0])
|
separator = '-' * len(header_string[0])
|
||||||
print '\n'.join([header_string[0], separator])
|
print('\n'.join([header_string[0], separator]))
|
||||||
if target_param is None:
|
if target_param is None:
|
||||||
param_index = range(len(x))
|
param_index = range(len(x))
|
||||||
transformed_index = param_index
|
transformed_index = param_index
|
||||||
|
|
@ -358,7 +358,7 @@ class Model(Parameterized):
|
||||||
transformed_index = param_index
|
transformed_index = param_index
|
||||||
|
|
||||||
if param_index.size == 0:
|
if param_index.size == 0:
|
||||||
print "No free parameters to check"
|
print("No free parameters to check")
|
||||||
return
|
return
|
||||||
|
|
||||||
gradient = self._grads(x).copy()
|
gradient = self._grads(x).copy()
|
||||||
|
|
@ -392,7 +392,7 @@ class Model(Parameterized):
|
||||||
ng = '%.6f' % float(numerical_gradient)
|
ng = '%.6f' % float(numerical_gradient)
|
||||||
df = '%1.e' % float(df_ratio)
|
df = '%1.e' % float(df_ratio)
|
||||||
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
|
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
|
||||||
print grad_string
|
print(grad_string)
|
||||||
|
|
||||||
self.optimizer_array = x
|
self.optimizer_array = x
|
||||||
return ret
|
return ret
|
||||||
|
|
|
||||||
|
|
@ -558,7 +558,7 @@ class Indexable(Nameable, Updateable):
|
||||||
"""
|
"""
|
||||||
if warning and reconstrained.size > 0:
|
if warning and reconstrained.size > 0:
|
||||||
# TODO: figure out which parameters have changed and only print those
|
# TODO: figure out which parameters have changed and only print those
|
||||||
print "WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)
|
print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name))
|
||||||
index = self._raveled_index()
|
index = self._raveled_index()
|
||||||
which.add(what, index)
|
which.add(what, index)
|
||||||
return index
|
return index
|
||||||
|
|
|
||||||
|
|
@ -325,7 +325,7 @@ class Parameterized(Parameterizable):
|
||||||
self._notify_parent_change()
|
self._notify_parent_change()
|
||||||
self.parameters_changed()
|
self.parameters_changed()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print "WARNING: caught exception {!s}, trying to continue".format(e)
|
print("WARNING: caught exception {!s}, trying to continue".format(e))
|
||||||
|
|
||||||
def copy(self, memo=None):
|
def copy(self, memo=None):
|
||||||
if memo is None:
|
if memo is None:
|
||||||
|
|
|
||||||
|
|
@ -98,7 +98,7 @@ class Tie(Parameterized):
|
||||||
if np.all(self.label_buf[idx]==0):
|
if np.all(self.label_buf[idx]==0):
|
||||||
# None of p has been tied before.
|
# None of p has been tied before.
|
||||||
tie_idx = self._expandTieParam(1)
|
tie_idx = self._expandTieParam(1)
|
||||||
print tie_idx
|
print(tie_idx)
|
||||||
tie_id = self.label_buf.max()+1
|
tie_id = self.label_buf.max()+1
|
||||||
self.label_buf[tie_idx] = tie_id
|
self.label_buf[tie_idx] = tie_id
|
||||||
else:
|
else:
|
||||||
|
|
@ -189,14 +189,14 @@ class Tie(Parameterized):
|
||||||
b0 = self.label_buf==self.label_buf[self.buf_idx[i]]
|
b0 = self.label_buf==self.label_buf[self.buf_idx[i]]
|
||||||
b = self._highest_parent_.param_array[b0]!=self.tied_param[i]
|
b = self._highest_parent_.param_array[b0]!=self.tied_param[i]
|
||||||
if b.sum()==0:
|
if b.sum()==0:
|
||||||
print 'XXX'
|
print('XXX')
|
||||||
continue
|
continue
|
||||||
elif b.sum()==1:
|
elif b.sum()==1:
|
||||||
print '!!!'
|
print('!!!')
|
||||||
val = self._highest_parent_.param_array[b0][b][0]
|
val = self._highest_parent_.param_array[b0][b][0]
|
||||||
self._highest_parent_.param_array[b0] = val
|
self._highest_parent_.param_array[b0] = val
|
||||||
else:
|
else:
|
||||||
print '@@@'
|
print('@@@')
|
||||||
self._highest_parent_.param_array[b0] = self.tied_param[i]
|
self._highest_parent_.param_array[b0] = self.tied_param[i]
|
||||||
changed = True
|
changed = True
|
||||||
return changed
|
return changed
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ class Logexp(Transformation):
|
||||||
return np.einsum('i,i->i', df, np.where(f>_lim_val, 1., 1. - np.exp(-f)))
|
return np.einsum('i,i->i', df, np.where(f>_lim_val, 1., 1. - np.exp(-f)))
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
if np.any(f < 0.):
|
if np.any(f < 0.):
|
||||||
print "Warning: changing parameters to satisfy constraints"
|
print("Warning: changing parameters to satisfy constraints")
|
||||||
return np.abs(f)
|
return np.abs(f)
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '+ve'
|
return '+ve'
|
||||||
|
|
@ -130,7 +130,7 @@ class NormalTheta(Transformation):
|
||||||
|
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
if np.any(f[self.var_indices] < 0.):
|
if np.any(f[self.var_indices] < 0.):
|
||||||
print "Warning: changing parameters to satisfy constraints"
|
print("Warning: changing parameters to satisfy constraints")
|
||||||
f[self.var_indices] = np.abs(f[self.var_indices])
|
f[self.var_indices] = np.abs(f[self.var_indices])
|
||||||
return f
|
return f
|
||||||
|
|
||||||
|
|
@ -177,7 +177,7 @@ class NormalNaturalAntti(NormalTheta):
|
||||||
|
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
if np.any(f[self.var_indices] < 0.):
|
if np.any(f[self.var_indices] < 0.):
|
||||||
print "Warning: changing parameters to satisfy constraints"
|
print("Warning: changing parameters to satisfy constraints")
|
||||||
f[self.var_indices] = np.abs(f[self.var_indices])
|
f[self.var_indices] = np.abs(f[self.var_indices])
|
||||||
return f
|
return f
|
||||||
|
|
||||||
|
|
@ -220,7 +220,7 @@ class NormalEta(Transformation):
|
||||||
|
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
if np.any(f[self.var_indices] < 0.):
|
if np.any(f[self.var_indices] < 0.):
|
||||||
print "Warning: changing parameters to satisfy constraints"
|
print("Warning: changing parameters to satisfy constraints")
|
||||||
f[self.var_indices] = np.abs(f[self.var_indices])
|
f[self.var_indices] = np.abs(f[self.var_indices])
|
||||||
return f
|
return f
|
||||||
|
|
||||||
|
|
@ -360,7 +360,7 @@ class LogexpNeg(Transformation):
|
||||||
return np.einsum('i,i->i', df, np.where(f>_lim_val, -1, -1 + np.exp(-f)))
|
return np.einsum('i,i->i', df, np.where(f>_lim_val, -1, -1 + np.exp(-f)))
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
if np.any(f < 0.):
|
if np.any(f < 0.):
|
||||||
print "Warning: changing parameters to satisfy constraints"
|
print("Warning: changing parameters to satisfy constraints")
|
||||||
return np.abs(f)
|
return np.abs(f)
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '+ve'
|
return '+ve'
|
||||||
|
|
@ -412,7 +412,7 @@ class LogexpClipped(Logexp):
|
||||||
return np.einsum('i,i->i', df, gf) # np.where(f < self.lower, 0, gf)
|
return np.einsum('i,i->i', df, gf) # np.where(f < self.lower, 0, gf)
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
if np.any(f < 0.):
|
if np.any(f < 0.):
|
||||||
print "Warning: changing parameters to satisfy constraints"
|
print("Warning: changing parameters to satisfy constraints")
|
||||||
return np.abs(f)
|
return np.abs(f)
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '+ve_c'
|
return '+ve_c'
|
||||||
|
|
@ -428,7 +428,7 @@ class Exponent(Transformation):
|
||||||
return np.einsum('i,i->i', df, f)
|
return np.einsum('i,i->i', df, f)
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
if np.any(f < 0.):
|
if np.any(f < 0.):
|
||||||
print "Warning: changing parameters to satisfy constraints"
|
print("Warning: changing parameters to satisfy constraints")
|
||||||
return np.abs(f)
|
return np.abs(f)
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '+ve'
|
return '+ve'
|
||||||
|
|
@ -486,7 +486,7 @@ class Logistic(Transformation):
|
||||||
return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference)
|
return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference)
|
||||||
def initialize(self, f):
|
def initialize(self, f):
|
||||||
if np.any(np.logical_or(f < self.lower, f > self.upper)):
|
if np.any(np.logical_or(f < self.lower, f > self.upper)):
|
||||||
print "Warning: changing parameters to satisfy constraints"
|
print("Warning: changing parameters to satisfy constraints")
|
||||||
#return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f)
|
#return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f)
|
||||||
#FIXME: Max, zeros_like right?
|
#FIXME: Max, zeros_like right?
|
||||||
return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f)
|
return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f)
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,7 @@ class Updateable(Observable):
|
||||||
self.trigger_update()
|
self.trigger_update()
|
||||||
|
|
||||||
def toggle_update(self):
|
def toggle_update(self):
|
||||||
print "deprecated: toggle_update was renamed to update_toggle for easier access"
|
print("deprecated: toggle_update was renamed to update_toggle for easier access")
|
||||||
self.update_toggle()
|
self.update_toggle()
|
||||||
def update_toggle(self):
|
def update_toggle(self):
|
||||||
self.update_model(not self.update_model())
|
self.update_model(not self.update_model())
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ class SparseGP(GP):
|
||||||
else:
|
else:
|
||||||
#inference_method = ??
|
#inference_method = ??
|
||||||
raise NotImplementedError, "what to do what to do?"
|
raise NotImplementedError, "what to do what to do?"
|
||||||
print "defaulting to ", inference_method, "for latent function inference"
|
print("defaulting to ", inference_method, "for latent function inference")
|
||||||
|
|
||||||
self.Z = Param('inducing inputs', Z)
|
self.Z = Param('inducing inputs', Z)
|
||||||
self.num_inducing = Z.shape[0]
|
self.num_inducing = Z.shape[0]
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,7 @@ class SparseGP_MPI(SparseGP):
|
||||||
self.N_range = (N_start, N_end)
|
self.N_range = (N_start, N_end)
|
||||||
self.N_list = np.array(N_list)
|
self.N_list = np.array(N_list)
|
||||||
self.Y_local = self.Y[N_start:N_end]
|
self.Y_local = self.Y[N_start:N_end]
|
||||||
print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range)
|
print('MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range))
|
||||||
mpi_comm.Bcast(self.param_array, root=0)
|
mpi_comm.Bcast(self.param_array, root=0)
|
||||||
self.update_model(True)
|
self.update_model(True)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
# Copyright (c) 2012-2014, Max Zwiessele.
|
# Copyright (c) 2012-2014, Max Zwiessele.
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
@ -65,8 +65,8 @@ class VerboseOptimization(object):
|
||||||
#self.progress.add_class('box-flex1')
|
#self.progress.add_class('box-flex1')
|
||||||
else:
|
else:
|
||||||
self.exps = exponents(self.fnow, self.current_gradient)
|
self.exps = exponents(self.fnow, self.current_gradient)
|
||||||
print 'Running {} Code:'.format(self.opt_name)
|
print('Running {} Code:'.format(self.opt_name))
|
||||||
print ' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters)
|
print(' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters))
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.start = time.time()
|
self.start = time.time()
|
||||||
|
|
@ -107,11 +107,11 @@ class VerboseOptimization(object):
|
||||||
b = np.any(n_exps < self.exps)
|
b = np.any(n_exps < self.exps)
|
||||||
if a or b:
|
if a or b:
|
||||||
self.p_iter = self.iteration
|
self.p_iter = self.iteration
|
||||||
print ''
|
print('')
|
||||||
if b:
|
if b:
|
||||||
self.exps = n_exps
|
self.exps = n_exps
|
||||||
print '\r',
|
print('\r', end=' ')
|
||||||
print '{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
|
print('{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
|
|
||||||
def print_status(self, me, which=None):
|
def print_status(self, me, which=None):
|
||||||
|
|
@ -140,6 +140,6 @@ class VerboseOptimization(object):
|
||||||
self.print_out()
|
self.print_out()
|
||||||
|
|
||||||
if not self.ipython_notebook:
|
if not self.ipython_notebook:
|
||||||
print
|
print()
|
||||||
print 'Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)
|
print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start))
|
||||||
print
|
print()
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue