Convert print to function for Python 3 compatibility. This breaks compatibility for versions of Python < 2.6

This commit is contained in:
Mike Croucher 2015-02-26 08:58:05 +00:00
parent 2a43324428
commit 4512964f09
2 changed files with 11 additions and 10 deletions

View file

@ -30,7 +30,7 @@ class OneVsAllSparseClassification(object):
self.results = {} self.results = {}
for yj in labels: for yj in labels:
print 'Class %s vs all' %yj print('Class %s vs all' %yj)
Ynew = Y.copy() Ynew = Y.copy()
Ynew[Y.flatten()!=yj] = 0 Ynew[Y.flatten()!=yj] = 0
Ynew[Y.flatten()==yj] = 1 Ynew[Y.flatten()==yj] = 1

View file

@ -1,6 +1,7 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
import numpy as np import numpy as np
from ..core.parameterization.param import Param from ..core.parameterization.param import Param
from ..core.sparse_gp import SparseGP from ..core.sparse_gp import SparseGP
@ -50,7 +51,7 @@ class SparseGPMiniBatch(SparseGP):
else: else:
#inference_method = ?? #inference_method = ??
raise NotImplementedError, "what to do what to do?" raise NotImplementedError, "what to do what to do?"
print "defaulting to ", inference_method, "for latent function inference" print("defaulting to ", inference_method, "for latent function inference")
self.kl_factr = 1. self.kl_factr = 1.
self.Z = Param('inducing inputs', Z) self.Z = Param('inducing inputs', Z)
@ -80,13 +81,13 @@ class SparseGPMiniBatch(SparseGP):
overall = self.Y_normalized.shape[1] overall = self.Y_normalized.shape[1]
m_f = lambda i: "Precomputing Y for missing data: {: >7.2%}".format(float(i+1)/overall) m_f = lambda i: "Precomputing Y for missing data: {: >7.2%}".format(float(i+1)/overall)
message = m_f(-1) message = m_f(-1)
print message, print(message, end=' ')
for d in xrange(overall): for d in xrange(overall):
self.Ylist.append(self.Y_normalized[self.ninan[:, d], d][:, None]) self.Ylist.append(self.Y_normalized[self.ninan[:, d], d][:, None])
print ' '*(len(message)+1) + '\r', print(' '*(len(message)+1) + '\r', end=' ')
message = m_f(d) message = m_f(d)
print message, print(message, end=' ')
print '' print('')
self.posterior = None self.posterior = None
@ -241,15 +242,15 @@ class SparseGPMiniBatch(SparseGP):
if not self.stochastics: if not self.stochastics:
m_f = lambda i: "Inference with missing_data: {: >7.2%}".format(float(i+1)/self.output_dim) m_f = lambda i: "Inference with missing_data: {: >7.2%}".format(float(i+1)/self.output_dim)
message = m_f(-1) message = m_f(-1)
print message, print(message, end=' ')
for d in self.stochastics.d: for d in self.stochastics.d:
ninan = self.ninan[:, d] ninan = self.ninan[:, d]
if not self.stochastics: if not self.stochastics:
print ' '*(len(message)) + '\r', print(' '*(len(message)) + '\r', end=' ')
message = m_f(d) message = m_f(d)
print message, print(message, end=' ')
posterior, log_marginal_likelihood, \ posterior, log_marginal_likelihood, \
grad_dict, current_values, value_indices = self._inner_parameters_changed( grad_dict, current_values, value_indices = self._inner_parameters_changed(
@ -268,7 +269,7 @@ class SparseGPMiniBatch(SparseGP):
woodbury_vector[:, d:d+1] = posterior.woodbury_vector woodbury_vector[:, d:d+1] = posterior.woodbury_vector
self._log_marginal_likelihood += log_marginal_likelihood self._log_marginal_likelihood += log_marginal_likelihood
if not self.stochastics: if not self.stochastics:
print '' print('')
if self.posterior is None: if self.posterior is None:
self.posterior = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, self.posterior = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector,