diff --git a/.travis.yml b/.travis.yml
index d7e3f7cf..093fc49a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -18,7 +18,8 @@ before_install:
install:
- conda install --yes python=$TRAVIS_PYTHON_VERSION atlas numpy=1.7 scipy=0.12 matplotlib nose sphinx pip nose
- - pip install .
+ #- pip install .
+ - python setup.py build_ext --inplace
#--use-mirrors
#
# command to run tests, e.g. python setup.py test
diff --git a/GPy/__init__.py b/GPy/__init__.py
index 5e091170..26713406 100644
--- a/GPy/__init__.py
+++ b/GPy/__init__.py
@@ -3,23 +3,23 @@
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
-import core
-from core.parameterization import transformations, priors
+from . import core
+from .core.parameterization import transformations, priors
constraints = transformations
-import models
-import mappings
-import inference
-import util
-import examples
-import likelihoods
-import testing
+from . import models
+from . import mappings
+from . import inference
+from . import util
+from . import examples
+from . import likelihoods
+from . import testing
from numpy.testing import Tester
-import kern
-import plotting
+from . import kern
+from . import plotting
# Direct imports for convenience:
-from core import Model
-from core.parameterization import Param, Parameterized, ObsAr
+from .core import Model
+from .core.parameterization import Param, Parameterized, ObsAr
#@nottest
try:
diff --git a/GPy/core/__init__.py b/GPy/core/__init__.py
index ebed29bb..142eccbf 100644
--- a/GPy/core/__init__.py
+++ b/GPy/core/__init__.py
@@ -1,12 +1,12 @@
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from model import *
-from parameterization.parameterized import adjust_name_for_printing, Parameterizable
-from parameterization.param import Param, ParamConcatenation
-from parameterization.observable_array import ObsAr
+from .model import *
+from .parameterization.parameterized import adjust_name_for_printing, Parameterizable
+from .parameterization.param import Param, ParamConcatenation
+from .parameterization.observable_array import ObsAr
-from gp import GP
-from svgp import SVGP
-from sparse_gp import SparseGP
-from mapping import *
+from .gp import GP
+from .svgp import SVGP
+from .sparse_gp import SparseGP
+from .mapping import *
diff --git a/GPy/core/gp.py b/GPy/core/gp.py
index 3252ac08..4b6231af 100644
--- a/GPy/core/gp.py
+++ b/GPy/core/gp.py
@@ -4,13 +4,15 @@
import numpy as np
import sys
from .. import kern
-from model import Model
-from parameterization import ObsAr
+from .model import Model
+from .parameterization import ObsAr
+from .mapping import Mapping
from .. import likelihoods
from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation
-from parameterization.variational import VariationalPosterior
+from .parameterization.variational import VariationalPosterior
import logging
+import warnings
from GPy.util.normalizer import MeanNorm
logger = logging.getLogger("GP")
@@ -34,7 +36,7 @@ class GP(Model):
"""
- def __init__(self, X, Y, kernel, likelihood, inference_method=None, name='gp', Y_metadata=None, normalizer=False):
+ def __init__(self, X, Y, kernel, likelihood, mean_function=None, inference_method=None, name='gp', Y_metadata=None, normalizer=False):
super(GP, self).__init__(name)
assert X.ndim == 2
@@ -62,10 +64,14 @@ class GP(Model):
self.Y = ObsAr(Y)
self.Y_normalized = self.Y
- assert Y.shape[0] == self.num_data
+ if Y.shape[0] != self.num_data:
+ #There can be cases where we want inputs than outputs, for example if we have multiple latent
+ #function values
+ warnings.warn("There are more rows in your input data X, \
+ than in your output data Y, be VERY sure this is what you want")
_, self.output_dim = self.Y.shape
- #TODO: check the type of this is okay?
+ assert ((Y_metadata is None) or isinstance(Y_metadata, dict))
self.Y_metadata = Y_metadata
assert isinstance(kernel, kern.Kern)
@@ -75,6 +81,15 @@ class GP(Model):
assert isinstance(likelihood, likelihoods.Likelihood)
self.likelihood = likelihood
+ #handle the mean function
+ self.mean_function = mean_function
+ if mean_function is not None:
+ assert isinstance(self.mean_function, Mapping)
+ assert mean_function.input_dim == self.input_dim
+ assert mean_function.output_dim == self.output_dim
+ self.link_parameter(mean_function)
+
+
#find a sensible inference method
logger.info("initializing inference method")
if inference_method is None:
@@ -82,14 +97,16 @@ class GP(Model):
inference_method = exact_gaussian_inference.ExactGaussianInference()
else:
inference_method = expectation_propagation.EP()
- print "defaulting to ", inference_method, "for latent function inference"
+ print("defaulting to ", inference_method, "for latent function inference")
self.inference_method = inference_method
logger.info("adding kernel and likelihood as parameters")
self.link_parameter(self.kern)
self.link_parameter(self.likelihood)
+ self.posterior = None
- def set_XY(self, X=None, Y=None):
+
+ def set_XY(self, X=None, Y=None, trigger_update=True):
"""
Set the input / output data of the model
This is useful if we wish to change our existing data but maintain the same model
@@ -99,7 +116,7 @@ class GP(Model):
:param Y: output observations
:type Y: np.ndarray
"""
- self.update_model(False)
+ if trigger_update: self.update_model(False)
if Y is not None:
if self.normalizer is not None:
self.normalizer.scale_by(Y)
@@ -123,26 +140,26 @@ class GP(Model):
self.link_parameters(self.X)
else:
self.X = ObsAr(X)
- self.update_model(True)
- self._trigger_params_changed()
+ if trigger_update: self.update_model(True)
+ if trigger_update: self._trigger_params_changed()
- def set_X(self,X):
+ def set_X(self,X, trigger_update=True):
"""
Set the input data of the model
:param X: input observations
:type X: np.ndarray
"""
- self.set_XY(X=X)
+ self.set_XY(X=X, trigger_update=trigger_update)
- def set_Y(self,Y):
+ def set_Y(self,Y, trigger_update=True):
"""
Set the output data of the model
:param X: output observations
:type X: np.ndarray
"""
- self.set_XY(Y=Y)
+ self.set_XY(Y=Y, trigger_update=trigger_update)
def parameters_changed(self):
"""
@@ -153,9 +170,11 @@ class GP(Model):
This method is not designed to be called manually, the framework is set up to automatically call this method upon changes to parameters, if you call
this method yourself, there may be unexpected consequences.
"""
- self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y_normalized, self.Y_metadata)
+ self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y_normalized, self.mean_function, self.Y_metadata)
self.likelihood.update_gradients(self.grad_dict['dL_dthetaL'])
self.kern.update_gradients_full(self.grad_dict['dL_dK'], self.X)
+ if self.mean_function is not None:
+ self.mean_function.update_gradients(self.grad_dict['dL_dm'], self.X)
def log_likelihood(self):
"""
@@ -192,6 +211,10 @@ class GP(Model):
#force mu to be a column vector
if len(mu.shape)==1: mu = mu[:,None]
+
+ #add the mean function in
+ if not self.mean_function is None:
+ mu += self.mean_function.f(_Xnew)
return mu, var
def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None):
@@ -241,12 +264,14 @@ class GP(Model):
def predictive_gradients(self, Xnew):
"""
- Compute the derivatives of the latent function with respect to X*
+ Compute the derivatives of the predicted latent function with respect to X*
Given a set of points at which to predict X* (size [N*,Q]), compute the
derivatives of the mean and variance. Resulting arrays are sized:
dmu_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one).
+ Note that this is not the same as computing the mean and variance of the derivative of the function!
+
dv_dX* -- [N*, Q], (since all outputs have the same variance)
:param X: The points at which to get the predictive gradients
:type X: np.ndarray (Xnew x self.input_dim)
@@ -276,7 +301,7 @@ class GP(Model):
:type size: int.
:param full_cov: whether to return the full covariance matrix, or just the diagonal.
:type full_cov: bool.
- :returns: Ysim: set of simulations
+ :returns: fsim: set of simulations
:rtype: np.ndarray (N x samples)
"""
m, v = self._raw_predict(X, full_cov=full_cov)
@@ -284,11 +309,11 @@ class GP(Model):
m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
v = v.reshape(m.size,-1) if len(v.shape)==3 else v
if not full_cov:
- Ysim = np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T
+ fsim = np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T
else:
- Ysim = np.random.multivariate_normal(m.flatten(), v, size).T
+ fsim = np.random.multivariate_normal(m.flatten(), v, size).T
- return Ysim
+ return fsim
def posterior_samples(self, X, size=10, full_cov=False, Y_metadata=None):
"""
@@ -304,16 +329,16 @@ class GP(Model):
:type noise_model: integer.
:returns: Ysim: set of simulations, a Numpy array (N x samples).
"""
- Ysim = self.posterior_samples_f(X, size, full_cov=full_cov)
- Ysim = self.likelihood.samples(Ysim, Y_metadata)
-
+ fsim = self.posterior_samples_f(X, size, full_cov=full_cov)
+ Ysim = self.likelihood.samples(fsim, Y_metadata)
return Ysim
def plot_f(self, plot_limits=None, which_data_rows='all',
which_data_ycols='all', fixed_inputs=[],
levels=20, samples=0, fignum=None, ax=None, resolution=None,
plot_raw=True,
- linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx'):
+ linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx',
+ apply_link=False):
"""
Plot the GP's view of the world, where the data is normalized and before applying a likelihood.
This is a call to plot with plot_raw=True.
@@ -350,6 +375,8 @@ class GP(Model):
:type Y_metadata: dict
:param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx')
:type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib.
+ :param apply_link: if there is a link function of the likelihood, plot the link(f*) rather than f*
+ :type apply_link: boolean
"""
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import models_plots
@@ -362,13 +389,13 @@ class GP(Model):
which_data_ycols, fixed_inputs,
levels, samples, fignum, ax, resolution,
plot_raw=plot_raw, Y_metadata=Y_metadata,
- data_symbol=data_symbol, **kw)
+ data_symbol=data_symbol, apply_link=apply_link, **kw)
def plot(self, plot_limits=None, which_data_rows='all',
which_data_ycols='all', fixed_inputs=[],
levels=20, samples=0, fignum=None, ax=None, resolution=None,
plot_raw=False,
- linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx'):
+ linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx', predict_kw=None):
"""
Plot the posterior of the GP.
- In one dimension, the function is plotted with a shaded region identifying two standard deviations.
@@ -417,7 +444,7 @@ class GP(Model):
which_data_ycols, fixed_inputs,
levels, samples, fignum, ax, resolution,
plot_raw=plot_raw, Y_metadata=Y_metadata,
- data_symbol=data_symbol, **kw)
+ data_symbol=data_symbol, predict_kw=predict_kw, **kw)
def input_sensitivity(self, summarize=True):
"""
@@ -441,7 +468,7 @@ class GP(Model):
try:
super(GP, self).optimize(optimizer, start, **kwargs)
except KeyboardInterrupt:
- print "KeyboardInterrupt caught, calling on_optimization_end() to round things up"
+ print("KeyboardInterrupt caught, calling on_optimization_end() to round things up")
self.inference_method.on_optimization_end()
raise
@@ -458,3 +485,38 @@ class GP(Model):
"""
from ..inference.latent_function_inference.inferenceX import infer_newX
return infer_newX(self, Y_new, optimize=optimize)
+
+ def log_predictive_density(self, x_test, y_test, Y_metadata=None):
+ """
+ Calculation of the log predictive density
+
+ .. math:
+ p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
+
+ :param x_test: test locations (x_{*})
+ :type x_test: (Nx1) array
+ :param y_test: test observations (y_{*})
+ :type y_test: (Nx1) array
+ :param Y_metadata: metadata associated with the test points
+ """
+ mu_star, var_star = self._raw_predict(x_test)
+ return self.likelihood.log_predictive_density(y_test, mu_star, var_star, Y_metadata=Y_metadata)
+
+ def log_predictive_density_sampling(self, x_test, y_test, Y_metadata=None, num_samples=1000):
+ """
+ Calculation of the log predictive density by sampling
+
+ .. math:
+ p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
+
+ :param x_test: test locations (x_{*})
+ :type x_test: (Nx1) array
+ :param y_test: test observations (y_{*})
+ :type y_test: (Nx1) array
+ :param Y_metadata: metadata associated with the test points
+ :param num_samples: number of samples to use in monte carlo integration
+ :type num_samples: int
+ """
+ mu_star, var_star = self._raw_predict(x_test)
+ return self.likelihood.log_predictive_density_sampling(y_test, mu_star, var_star, Y_metadata=Y_metadata, num_samples=num_samples)
+
diff --git a/GPy/core/mapping.py b/GPy/core/mapping.py
index 111fec6f..30614384 100644
--- a/GPy/core/mapping.py
+++ b/GPy/core/mapping.py
@@ -1,13 +1,14 @@
# Copyright (c) 2013,2014, GPy authors (see AUTHORS.txt).
+# Copyright (c) 2015, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import sys
-from parameterization import Parameterized
+from .parameterization import Parameterized
import numpy as np
class Mapping(Parameterized):
"""
- Base model for shared behavior between models that can act like a mapping.
+ Base model for shared mapping behaviours
"""
def __init__(self, input_dim, output_dim, name='mapping'):
@@ -18,49 +19,12 @@ class Mapping(Parameterized):
def f(self, X):
raise NotImplementedError
- def df_dX(self, dL_df, X):
- """Evaluate derivatives of mapping outputs with respect to inputs.
-
- :param dL_df: gradient of the objective with respect to the function.
- :type dL_df: ndarray (num_data x output_dim)
- :param X: the input locations where derivatives are to be evaluated.
- :type X: ndarray (num_data x input_dim)
- :returns: matrix containing gradients of the function with respect to the inputs.
- """
+ def gradients_X(self, dL_dF, X):
raise NotImplementedError
- def df_dtheta(self, dL_df, X):
- """The gradient of the outputs of the mapping with respect to each of the parameters.
-
- :param dL_df: gradient of the objective with respect to the function.
- :type dL_df: ndarray (num_data x output_dim)
- :param X: input locations where the function is evaluated.
- :type X: ndarray (num_data x input_dim)
- :returns: Matrix containing gradients with respect to parameters of each output for each input data.
- :rtype: ndarray (num_params length)
- """
-
+ def update_gradients(self, dL_dF, X):
raise NotImplementedError
- def plot(self, *args):
- """
- Plots the mapping associated with the model.
- - In one dimension, the function is plotted.
- - In two dimensions, a contour-plot shows the function
- - In higher dimensions, we've not implemented this yet !TODO!
-
- Can plot only part of the data and part of the posterior functions
- using which_data and which_functions
-
- This is a convenience function: arguments are passed to
- GPy.plotting.matplot_dep.models_plots.plot_mapping
- """
-
- if "matplotlib" in sys.modules:
- from ..plotting.matplot_dep import models_plots
- mapping_plots.plot_mapping(self,*args)
- else:
- raise NameError, "matplotlib package has not been imported."
class Bijective_mapping(Mapping):
"""
@@ -74,72 +38,4 @@ class Bijective_mapping(Mapping):
"""Inverse mapping from output domain of the function to the inputs."""
raise NotImplementedError
-from model import Model
-
-class Mapping_check_model(Model):
- """
- This is a dummy model class used as a base class for checking that the
- gradients of a given mapping are implemented correctly. It enables
- checkgradient() to be called independently on each mapping.
- """
- def __init__(self, mapping=None, dL_df=None, X=None):
- num_samples = 20
- if mapping==None:
- mapping = GPy.mapping.linear(1, 1)
- if X==None:
- X = np.random.randn(num_samples, mapping.input_dim)
- if dL_df==None:
- dL_df = np.ones((num_samples, mapping.output_dim))
-
- self.mapping=mapping
- self.X = X
- self.dL_df = dL_df
- self.num_params = self.mapping.num_params
- Model.__init__(self)
-
-
- def _get_params(self):
- return self.mapping._get_params()
-
- def _get_param_names(self):
- return self.mapping._get_param_names()
-
- def _set_params(self, x):
- self.mapping._set_params(x)
-
- def log_likelihood(self):
- return (self.dL_df*self.mapping.f(self.X)).sum()
-
- def _log_likelihood_gradients(self):
- raise NotImplementedError, "This needs to be implemented to use the Mapping_check_model class."
-
-class Mapping_check_df_dtheta(Mapping_check_model):
- """This class allows gradient checks for the gradient of a mapping with respect to parameters. """
- def __init__(self, mapping=None, dL_df=None, X=None):
- Mapping_check_model.__init__(self,mapping=mapping,dL_df=dL_df, X=X)
-
- def _log_likelihood_gradients(self):
- return self.mapping.df_dtheta(self.dL_df, self.X)
-
-
-class Mapping_check_df_dX(Mapping_check_model):
- """This class allows gradient checks for the gradient of a mapping with respect to X. """
- def __init__(self, mapping=None, dL_df=None, X=None):
- Mapping_check_model.__init__(self,mapping=mapping,dL_df=dL_df, X=X)
-
- if dL_df==None:
- dL_df = np.ones((self.X.shape[0],self.mapping.output_dim))
- self.num_params = self.X.shape[0]*self.mapping.input_dim
-
- def _log_likelihood_gradients(self):
- return self.mapping.df_dX(self.dL_df, self.X).flatten()
-
- def _get_param_names(self):
- return ['X_' +str(i) + ','+str(j) for j in range(self.X.shape[1]) for i in range(self.X.shape[0])]
-
- def _get_params(self):
- return self.X.flatten()
-
- def _set_params(self, x):
- self.X=x.reshape(self.X.shape)
diff --git a/GPy/core/model.py b/GPy/core/model.py
index c5d318e7..937d30e5 100644
--- a/GPy/core/model.py
+++ b/GPy/core/model.py
@@ -5,7 +5,7 @@
from .. import likelihoods
from ..inference import optimization
from ..util.misc import opt_wrapper
-from parameterization import Parameterized
+from .parameterization import Parameterized
import multiprocessing as mp
import numpy as np
from numpy.linalg.linalg import LinAlgError
@@ -13,6 +13,7 @@ import itertools
import sys
from .verbose_optimization import VerboseOptimization
# import numdifftools as ndt
+from functools import reduce
class Model(Parameterized):
_fail_count = 0 # Count of failed optimization steps (see objective)
@@ -30,7 +31,7 @@ class Model(Parameterized):
self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500)
def log_likelihood(self):
- raise NotImplementedError, "this needs to be implemented to use the model class"
+ raise NotImplementedError("this needs to be implemented to use the model class")
def _log_likelihood_gradients(self):
return self.gradient.copy()
@@ -82,7 +83,7 @@ class Model(Parameterized):
pool.close() # signal that no more data coming in
pool.join() # wait for all the tasks to complete
except KeyboardInterrupt:
- print "Ctrl+c received, terminating and joining pool."
+ print("Ctrl+c received, terminating and joining pool.")
pool.terminate()
pool.join()
@@ -95,10 +96,10 @@ class Model(Parameterized):
self.optimization_runs.append(jobs[i].get())
if verbose:
- print("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt))
+ print(("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt)))
except Exception as e:
if robust:
- print("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts))
+ print(("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts)))
else:
raise e
@@ -119,7 +120,7 @@ class Model(Parameterized):
DEPRECATED.
"""
- raise DeprecationWarning, 'parameters now have default constraints'
+ raise DeprecationWarning('parameters now have default constraints')
def objective_function(self):
"""
@@ -213,14 +214,14 @@ class Model(Parameterized):
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e10, 1e10)
return obj_f, self.obj_grads
- def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, **kwargs):
+ def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs):
"""
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
kwargs are passed to the optimizer. They can be:
- :param max_f_eval: maximum number of function evaluations
- :type max_f_eval: int
+ :param max_iters: maximum number of function evaluations
+ :type max_iters: int
:messages: True: Display messages during optimisation, "ipython_notebook":
:type messages: bool"string
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
@@ -237,10 +238,10 @@ class Model(Parameterized):
"""
if self.is_fixed or self.size == 0:
- print 'nothing to optimize'
+ print('nothing to optimize')
if not self.update_model():
- print "updates were off, setting updates on again"
+ print("updates were off, setting updates on again")
self.update_model(True)
if start == None:
@@ -255,7 +256,7 @@ class Model(Parameterized):
else:
optimizer = optimization.get_optimizer(optimizer)
opt = optimizer(start, model=self, max_iters=max_iters, **kwargs)
-
+
with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook) as vo:
opt.run(f_fp=self._objective_grads, f=self._objective, fp=self._grads)
vo.finish(opt)
@@ -305,7 +306,7 @@ class Model(Parameterized):
transformed_index = (indices - (~self._fixes_).cumsum())[transformed_index[which[0]]]
if transformed_index.size == 0:
- print "No free parameters to check"
+ print("No free parameters to check")
return
# just check the global ratio
@@ -340,9 +341,9 @@ class Model(Parameterized):
cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])
cols = np.array(cols) + 5
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
- header_string = map(lambda x: '|'.join(x), [header_string])
+ header_string = list(map(lambda x: '|'.join(x), [header_string]))
separator = '-' * len(header_string[0])
- print '\n'.join([header_string[0], separator])
+ print('\n'.join([header_string[0], separator]))
if target_param is None:
param_index = range(len(x))
transformed_index = param_index
@@ -358,19 +359,24 @@ class Model(Parameterized):
transformed_index = param_index
if param_index.size == 0:
- print "No free parameters to check"
+ print("No free parameters to check")
return
gradient = self._grads(x).copy()
np.where(gradient == 0, 1e-312, gradient)
ret = True
- for nind, xind in itertools.izip(param_index, transformed_index):
+ for nind, xind in zip(param_index, transformed_index):
xx = x.copy()
xx[xind] += step
f1 = self._objective(xx)
xx[xind] -= 2.*step
f2 = self._objective(xx)
- df_ratio = np.abs((f1 - f2) / min(f1, f2))
+ #Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall
+ #the same
+ if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15:
+ df_ratio = np.abs((f1 - f2) / min(f1, f2))
+ else:
+ df_ratio = 1.0
df_unstable = df_ratio < df_tolerance
numerical_gradient = (f1 - f2) / (2 * step)
if np.all(gradient[xind] == 0): ratio = (f1 - f2) == gradient[xind]
@@ -392,7 +398,7 @@ class Model(Parameterized):
ng = '%.6f' % float(numerical_gradient)
df = '%1.e' % float(df_ratio)
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
- print grad_string
+ print(grad_string)
self.optimizer_array = x
return ret
@@ -402,6 +408,7 @@ class Model(Parameterized):
model_details = [['Model', self.name + ' '],
['Log-likelihood', '{} '.format(float(self.log_likelihood()))],
["Number of Parameters", '{} '.format(self.size)],
+ ["Number of Optimization Parameters", '{} '.format(self._size_transformed())],
["Updates", '{} '.format(self._update_on)],
]
from operator import itemgetter
@@ -419,6 +426,7 @@ class Model(Parameterized):
model_details = [['Name', self.name],
['Log-likelihood', '{}'.format(float(self.log_likelihood()))],
["Number of Parameters", '{}'.format(self.size)],
+ ["Number of Optimization Parameters", '{}'.format(self._size_transformed())],
["Updates", '{}'.format(self._update_on)],
]
from operator import itemgetter
diff --git a/GPy/core/parameterization/__init__.py b/GPy/core/parameterization/__init__.py
index 8e9aa094..de736671 100644
--- a/GPy/core/parameterization/__init__.py
+++ b/GPy/core/parameterization/__init__.py
@@ -1,5 +1,5 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from param import Param, ObsAr
-from parameterized import Parameterized
+from .param import Param, ObsAr
+from .parameterized import Parameterized
diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py
index 61c82da1..5c3e350f 100644
--- a/GPy/core/parameterization/index_operations.py
+++ b/GPy/core/parameterization/index_operations.py
@@ -3,7 +3,9 @@
import numpy
from numpy.lib.function_base import vectorize
-from lists_and_dicts import IntArrayDict
+from .lists_and_dicts import IntArrayDict
+from functools import reduce
+from transformations import Transformation
def extract_properties_to_index(index, props):
prop_index = dict()
@@ -62,12 +64,15 @@ class ParameterIndexOperations(object):
def __init__(self, constraints=None):
self._properties = IntArrayDict()
if constraints is not None:
- for t, i in constraints.iteritems():
+ #python 3 fix
+ #for t, i in constraints.iteritems():
+ for t, i in constraints.items():
self.add(t, i)
- def iteritems(self):
- return self._properties.iteritems()
-
+ #iteritems has gone in python 3
+ #def iteritems(self):
+ # return self._properties.iteritems()
+
def items(self):
return self._properties.items()
@@ -75,7 +80,7 @@ class ParameterIndexOperations(object):
return self._properties.keys()
def iterproperties(self):
- return self._properties.iterkeys()
+ return iter(self._properties)
def shift_right(self, start, size):
for ind in self.iterindices():
@@ -83,7 +88,7 @@ class ParameterIndexOperations(object):
ind[toshift] += size
def shift_left(self, start, size):
- for v, ind in self.items():
+ for v, ind in list(self.items()):
todelete = (ind>=start) * (ind= self._offset) * (ind < (self._offset + self._size))] - self._offset
-
- def iteritems(self):
- for i, ind in self._param_index_ops.iteritems():
+ #iteritems has gone in python 3. It has been renamed items()
+ def items(self):
+ _items_list = list(self._param_index_ops.items())
+ for i, ind in _items_list:
ind2 = self._filter_index(ind)
if ind2.size > 0:
yield i, ind2
-
- def items(self):
- return [[i,v] for i,v in self.iteritems()]
+
+ #Python 3 items() is now implemented as per py2 iteritems
+ #def items(self):
+ # return [[i,v] for i,v in self.iteritems()]
def properties(self):
return [i for i in self.iterproperties()]
def iterproperties(self):
- for i, _ in self.iteritems():
+ #py3 fix
+ #for i, _ in self.iteritems():
+ for i, _ in self.items():
yield i
@@ -230,7 +247,9 @@ class ParameterIndexOperationsView(object):
def iterindices(self):
- for _, ind in self.iteritems():
+ #py3 fix
+ #for _, ind in self.iteritems():
+ for _, ind in self.items():
yield ind
@@ -286,10 +305,14 @@ class ParameterIndexOperationsView(object):
def __str__(self, *args, **kwargs):
import pprint
- return pprint.pformat(dict(self.iteritems()))
+ #py3 fixes
+ #return pprint.pformat(dict(self.iteritems()))
+ return pprint.pformat(dict(self.items()))
def update(self, parameter_index_view, offset=0):
- for i, v in parameter_index_view.iteritems():
+ #py3 fixes
+ #for i, v in parameter_index_view.iteritems():
+ for i, v in parameter_index_view.items():
self.add(i, v+offset)
@@ -297,6 +320,8 @@ class ParameterIndexOperationsView(object):
return self.__deepcopy__(None)
def __deepcopy__(self, memo):
- return ParameterIndexOperations(dict(self.iteritems()))
+ #py3 fix
+ #return ParameterIndexOperations(dict(self.iteritems()))
+ return ParameterIndexOperations(dict(self.items()))
pass
diff --git a/GPy/core/parameterization/lists_and_dicts.py b/GPy/core/parameterization/lists_and_dicts.py
index 5afbb8ed..2d774a76 100644
--- a/GPy/core/parameterization/lists_and_dicts.py
+++ b/GPy/core/parameterization/lists_and_dicts.py
@@ -32,7 +32,7 @@ class ArrayList(list):
if el is item:
return index
index += 1
- raise ValueError, "{} is not in list".format(item)
+ raise ValueError("{} is not in list".format(item))
pass
class ObserverList(object):
@@ -75,7 +75,7 @@ class ObserverList(object):
def __str__(self):
from . import ObsAr, Param
- from parameter_core import Parameterizable
+ from .parameter_core import Parameterizable
ret = []
curr_p = None
diff --git a/GPy/core/parameterization/observable.py b/GPy/core/parameterization/observable.py
index 8a85c6ca..0836b5d6 100644
--- a/GPy/core/parameterization/observable.py
+++ b/GPy/core/parameterization/observable.py
@@ -12,7 +12,7 @@ class Observable(object):
"""
def __init__(self, *args, **kwargs):
super(Observable, self).__init__()
- from lists_and_dicts import ObserverList
+ from .lists_and_dicts import ObserverList
self.observers = ObserverList()
self._update_on = True
diff --git a/GPy/core/parameterization/observable_array.py b/GPy/core/parameterization/observable_array.py
index 271fe7b9..c6fea497 100644
--- a/GPy/core/parameterization/observable_array.py
+++ b/GPy/core/parameterization/observable_array.py
@@ -3,8 +3,8 @@
import numpy as np
-from parameter_core import Pickleable
-from observable import Observable
+from .parameter_core import Pickleable
+from .observable import Observable
class ObsAr(np.ndarray, Pickleable, Observable):
"""
@@ -39,7 +39,7 @@ class ObsAr(np.ndarray, Pickleable, Observable):
return self.view(np.ndarray)
def copy(self):
- from lists_and_dicts import ObserverList
+ from .lists_and_dicts import ObserverList
memo = {}
memo[id(self)] = self
memo[id(self.observers)] = ObserverList()
diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py
index 1246bc18..1838f2bf 100644
--- a/GPy/core/parameterization/param.py
+++ b/GPy/core/parameterization/param.py
@@ -4,8 +4,9 @@
import itertools
import numpy
np = numpy
-from parameter_core import Parameterizable, adjust_name_for_printing, Pickleable
-from observable_array import ObsAr
+from .parameter_core import Parameterizable, adjust_name_for_printing, Pickleable
+from .observable_array import ObsAr
+from functools import reduce
###### printing
__constraints_name__ = "Constraint"
@@ -156,7 +157,7 @@ class Param(Parameterizable, ObsAr):
#===========================================================================
@property
def is_fixed(self):
- from transformations import __fixed__
+ from .transformations import __fixed__
return self.constraints[__fixed__].size == self.size
def _get_original(self, param):
@@ -207,10 +208,14 @@ class Param(Parameterizable, ObsAr):
return 0
@property
def _constraints_str(self):
- return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.iteritems()))]
+ #py3 fix
+ #return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.iteritems()))]
+ return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.items()))]
@property
def _priors_str(self):
- return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.iteritems()))]
+ #py3 fix
+ #return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.iteritems()))]
+ return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.items()))]
@property
def _ties_str(self):
return ['']
@@ -279,7 +284,7 @@ class Param(Parameterizable, ObsAr):
.tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;}
.tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;}
-"""] + ['
'] + [header] + ["
{i}
{x}
{c}
{p}
{t}
".format(x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in itertools.izip(indices, vals, constr_matrix, ties, prirs)] + ["
"])
+"""] + ['
'] + [header] + ["
{i}
{x}
{c}
{p}
{t}
".format(x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in zip(indices, vals, constr_matrix, ties, prirs)] + ["
"])
def __str__(self, constr_matrix=None, indices=None, prirs=None, ties=None, lc=None, lx=None, li=None, lp=None, lt=None, only_name=False):
filter_ = self._current_slice_
@@ -300,7 +305,7 @@ class Param(Parameterizable, ObsAr):
if only_name: header = header_format.format(lc, lx, li, lt, lp, ' ', x=self.hierarchy_name(), c=sep*lc, i=sep*li, t=sep*lt, p=sep*lp) # nice header for printing
else: header = header_format.format(lc, lx, li, lt, lp, ' ', x=self.hierarchy_name(), c=__constraints_name__, i=__index_name__, t=__tie_name__, p=__priors_name__) # nice header for printing
if not ties: ties = itertools.cycle([''])
- return "\n".join([header] + [" {i!s:^{3}s} | {x: >{1}.{2}g} | {c:^{0}s} | {p:^{5}s} | {t:^{4}s} ".format(lc, lx, __precision__, li, lt, lp, x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in itertools.izip(indices, vals, constr_matrix, ties, prirs)]) # return all the constraints with right indices
+ return "\n".join([header] + [" {i!s:^{3}s} | {x: >{1}.{2}g} | {c:^{0}s} | {p:^{5}s} | {t:^{4}s} ".format(lc, lx, __precision__, li, lt, lp, x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in zip(indices, vals, constr_matrix, ties, prirs)]) # return all the constraints with right indices
# except: return super(Param, self).__str__()
class ParamConcatenation(object):
@@ -313,7 +318,7 @@ class ParamConcatenation(object):
See :py:class:`GPy.core.parameter.Param` for more details on constraining.
"""
# self.params = params
- from lists_and_dicts import ArrayList
+ from .lists_and_dicts import ArrayList
self.params = ArrayList([])
for p in params:
for p in p.flattened_parameters:
@@ -336,7 +341,9 @@ class ParamConcatenation(object):
level += 1
parent = parent._parent_
import operator
- self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1)))
+ #py3 fix
+ #self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1)))
+ self.parents = map(lambda x: x[0], sorted(parents.items(), key=operator.itemgetter(1)))
#===========================================================================
# Get/set items, enable broadcasting
#===========================================================================
@@ -429,14 +436,14 @@ class ParamConcatenation(object):
params = self.params
constr_matrices, ties_matrices, prior_matrices = zip(*map(f, params))
indices = [p._indices() for p in params]
- lc = max([p._max_len_names(cm, __constraints_name__) for p, cm in itertools.izip(params, constr_matrices)])
+ lc = max([p._max_len_names(cm, __constraints_name__) for p, cm in zip(params, constr_matrices)])
lx = max([p._max_len_values() for p in params])
- li = max([p._max_len_index(i) for p, i in itertools.izip(params, indices)])
- lt = max([p._max_len_names(tm, __tie_name__) for p, tm in itertools.izip(params, ties_matrices)])
- lp = max([p._max_len_names(pm, __constraints_name__) for p, pm in itertools.izip(params, prior_matrices)])
+ li = max([p._max_len_index(i) for p, i in zip(params, indices)])
+ lt = max([p._max_len_names(tm, __tie_name__) for p, tm in zip(params, ties_matrices)])
+ lp = max([p._max_len_names(pm, __constraints_name__) for p, pm in zip(params, prior_matrices)])
strings = []
start = True
- for p, cm, i, tm, pm in itertools.izip(params,constr_matrices,indices,ties_matrices,prior_matrices):
+ for p, cm, i, tm, pm in zip(params,constr_matrices,indices,ties_matrices,prior_matrices):
strings.append(p.__str__(constr_matrix=cm, indices=i, prirs=pm, ties=tm, lc=lc, lx=lx, li=li, lp=lp, lt=lt, only_name=(1-start)))
start = False
return "\n".join(strings)
diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py
index bee160b2..1bc6a29e 100644
--- a/GPy/core/parameterization/parameter_core.py
+++ b/GPy/core/parameterization/parameter_core.py
@@ -13,11 +13,12 @@ Observable Pattern for patameterization
"""
-from transformations import Transformation,Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED
+from .transformations import Transformation,Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED
import numpy as np
import re
import logging
-from updateable import Updateable
+from .updateable import Updateable
+from functools import reduce
class HierarchyError(Exception):
"""
@@ -36,7 +37,7 @@ def adjust_name_for_printing(name):
name = name.replace("/", "_l_").replace("@", '_at_')
name = name.replace("(", "_of_").replace(")", "")
if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None:
- raise NameError, "name {} converted to {} cannot be further converted to valid python variable name!".format(name2, name)
+ raise NameError("name {} converted to {} cannot be further converted to valid python variable name!".format(name2, name))
return name
return ''
@@ -65,13 +66,13 @@ class Parentable(object):
Gets called, when the parent changed, so we can adjust our
inner attributes according to the new parent.
"""
- raise NotImplementedError, "shouldnt happen, Parentable objects need to be able to change their parent"
+ raise NotImplementedError("shouldnt happen, Parentable objects need to be able to change their parent")
def _disconnect_parent(self, *args, **kw):
"""
Disconnect this object from its parent
"""
- raise NotImplementedError, "Abstract superclass"
+ raise NotImplementedError("Abstract superclass")
@property
def _highest_parent_(self):
@@ -109,7 +110,10 @@ class Pickleable(object):
it properly.
:param protocol: pickling protocol to use, python-pickle for details.
"""
- import cPickle as pickle
+ try: #Py2
+ import cPickle as pickle
+ except ImportError: #Py3
+ import pickle
if isinstance(f, str):
with open(f, 'wb') as f:
pickle.dump(self, f, protocol)
@@ -138,9 +142,9 @@ class Pickleable(object):
which = self
which.traverse_parents(parents.append) # collect parents
for p in parents:
- if not memo.has_key(id(p)):memo[id(p)] = None # set all parents to be None, so they will not be copied
- if not memo.has_key(id(self.gradient)):memo[id(self.gradient)] = None # reset the gradient
- if not memo.has_key(id(self._fixes_)):memo[id(self._fixes_)] = None # fixes have to be reset, as this is now highest parent
+ if not id(p) in memo :memo[id(p)] = None # set all parents to be None, so they will not be copied
+ if not id(self.gradient) in memo:memo[id(self.gradient)] = None # reset the gradient
+ if not id(self._fixes_) in memo :memo[id(self._fixes_)] = None # fixes have to be reset, as this is now highest parent
copy = copy.deepcopy(self, memo) # and start the copy
copy._parent_index_ = None
copy._trigger_params_changed()
@@ -163,14 +167,16 @@ class Pickleable(object):
'_Cacher_wrap__cachers', # never pickle cachers
]
dc = dict()
- for k,v in self.__dict__.iteritems():
+ #py3 fix
+ #for k,v in self.__dict__.iteritems():
+ for k,v in self.__dict__.items():
if k not in ignore_list:
dc[k] = v
return dc
def __setstate__(self, state):
self.__dict__.update(state)
- from lists_and_dicts import ObserverList
+ from .lists_and_dicts import ObserverList
self.observers = ObserverList()
self._setup_observers()
self._optimizer_copy_transformed = False
@@ -214,7 +220,7 @@ class Gradcheckable(Pickleable, Parentable):
Perform the checkgrad on the model.
TODO: this can be done more efficiently, when doing it inside here
"""
- raise HierarchyError, "This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!"
+ raise HierarchyError("This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!")
class Nameable(Gradcheckable):
"""
@@ -268,7 +274,7 @@ class Indexable(Nameable, Updateable):
def __init__(self, name, default_constraint=None, *a, **kw):
super(Indexable, self).__init__(name=name, *a, **kw)
self._default_constraint_ = default_constraint
- from index_operations import ParameterIndexOperations
+ from .index_operations import ParameterIndexOperations
self.constraints = ParameterIndexOperations()
self.priors = ParameterIndexOperations()
if self._default_constraint_ is not None:
@@ -310,7 +316,7 @@ class Indexable(Nameable, Updateable):
that is an int array, containing the indexes for the flattened
param inside this parameterized logic.
"""
- from param import ParamConcatenation
+ from .param import ParamConcatenation
if isinstance(param, ParamConcatenation):
return np.hstack((self._raveled_index_for(p) for p in param.params))
return param._raveled_index() + self._offset_for(param)
@@ -407,7 +413,7 @@ class Indexable(Nameable, Updateable):
repriorized = self.unset_priors()
self._add_to_index_operations(self.priors, repriorized, prior, warning)
- from domains import _REAL, _POSITIVE, _NEGATIVE
+ from .domains import _REAL, _POSITIVE, _NEGATIVE
if prior.domain is _POSITIVE:
self.constrain_positive(warning)
elif prior.domain is _NEGATIVE:
@@ -426,7 +432,9 @@ class Indexable(Nameable, Updateable):
"""evaluate the prior"""
if self.priors.size > 0:
x = self.param_array
- return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()), 0)
+ #py3 fix
+ #return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()), 0)
+ return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.items()), 0)
return 0.
def _log_prior_gradients(self):
@@ -434,7 +442,9 @@ class Indexable(Nameable, Updateable):
if self.priors.size > 0:
x = self.param_array
ret = np.zeros(x.size)
- [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()]
+ #py3 fix
+ #[np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()]
+ [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()]
return ret
return 0.
@@ -536,7 +546,7 @@ class Indexable(Nameable, Updateable):
update the constraints and priors view, so that
constraining is automized for the parent.
"""
- from index_operations import ParameterIndexOperationsView
+ from .index_operations import ParameterIndexOperationsView
#if getattr(self, "_in_init_"):
#import ipdb;ipdb.set_trace()
#self.constraints.update(param.constraints, start)
@@ -558,7 +568,7 @@ class Indexable(Nameable, Updateable):
"""
if warning and reconstrained.size > 0:
# TODO: figure out which parameters have changed and only print those
- print "WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)
+ print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name))
index = self._raveled_index()
which.add(what, index)
return index
@@ -571,7 +581,7 @@ class Indexable(Nameable, Updateable):
if len(transforms) == 0:
transforms = which.properties()
removed = np.empty((0,), dtype=int)
- for t in transforms:
+ for t in list(transforms):
unconstrained = which.remove(t, self._raveled_index())
removed = np.union1d(removed, unconstrained)
if t is __fixed__:
@@ -612,7 +622,9 @@ class OptimizationHandlable(Indexable):
if not self._optimizer_copy_transformed:
self._optimizer_copy_.flat = self.param_array.flat
- [np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
+ #py3 fix
+ #[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
+ [np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.items() if c != __fixed__]
if self.has_parent() and (self.constraints[__fixed__].size != 0 or self._has_ties()):
fixes = np.ones(self.size).astype(bool)
fixes[self.constraints[__fixed__]] = FIXED
@@ -641,21 +653,25 @@ class OptimizationHandlable(Indexable):
if f is None:
self.param_array.flat = p
[np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))
- for c, ind in self.constraints.iteritems() if c != __fixed__]
+ #py3 fix
+ #for c, ind in self.constraints.iteritems() if c != __fixed__]
+ for c, ind in self.constraints.items() if c != __fixed__]
else:
self.param_array.flat[f] = p
[np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))
- for c, ind in self.constraints.iteritems() if c != __fixed__]
+ #py3 fix
+ #for c, ind in self.constraints.iteritems() if c != __fixed__]
+ for c, ind in self.constraints.items() if c != __fixed__]
#self._highest_parent_.tie.propagate_val()
self._optimizer_copy_transformed = False
self.trigger_update()
def _get_params_transformed(self):
- raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!"
+ raise DeprecationWarning("_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!")
#
def _set_params_transformed(self, p):
- raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!"
+ raise DeprecationWarning("_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!")
def _trigger_params_changed(self, trigger_parent=True):
"""
@@ -680,7 +696,9 @@ class OptimizationHandlable(Indexable):
constraint to it.
"""
self._highest_parent_.tie.collate_gradient()
- [np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
+ #py3 fix
+ #[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
+ [np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
if self._has_fixes(): return g[self._fixes_]
return g
@@ -690,7 +708,9 @@ class OptimizationHandlable(Indexable):
constraint to it.
"""
self._highest_parent_.tie.collate_gradient()
- [np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
+ #py3 fix
+ #[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
+ [np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
if self._has_fixes(): return g[self._fixes_]
return g
@@ -701,7 +721,7 @@ class OptimizationHandlable(Indexable):
Return the number of parameters of this parameter_handle.
Param objects will always return 0.
"""
- raise NotImplemented, "Abstract, please implement in respective classes"
+ raise NotImplemented("Abstract, please implement in respective classes")
def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True):
"""
@@ -750,7 +770,9 @@ class OptimizationHandlable(Indexable):
self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)
# now draw from prior where possible
x = self.param_array.copy()
- [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]
+ #Py3 fix
+ #[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]
+ [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.items() if not p is None]
unfixlist = np.ones((self.size,),dtype=np.bool)
unfixlist[self.constraints[__fixed__]] = False
self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]
@@ -947,7 +969,7 @@ class Parameterizable(OptimizationHandlable):
self._add_parameter_name(param, ignore_added_names)
# and makes sure to not delete programmatically added parameters
for other in self.parameters[::-1]:
- if other is not param and other.name.startswith(param.name):
+ if other is not param and other.name == param.name:
warn_and_retry(param, _name_digit.match(other.name))
return
if pname not in dir(self):
diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py
index 44173f58..d2d06fe3 100644
--- a/GPy/core/parameterization/parameterized.py
+++ b/GPy/core/parameterization/parameterized.py
@@ -1,15 +1,15 @@
# Copyright (c) 2014, Max Zwiessele, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-
+import six # For metaclass support in Python 2 and 3 simultaneously
import numpy; np = numpy
import itertools
from re import compile, _pattern_type
-from param import ParamConcatenation
+from .param import ParamConcatenation
from parameter_core import HierarchyError, Parameterizable, adjust_name_for_printing
import logging
-from GPy.core.parameterization.index_operations import ParameterIndexOperationsView
+from index_operations import ParameterIndexOperationsView
logger = logging.getLogger("parameters changed meta")
class ParametersChangedMeta(type):
@@ -27,6 +27,7 @@ class ParametersChangedMeta(type):
self.parameters_changed()
return self
+@six.add_metaclass(ParametersChangedMeta)
class Parameterized(Parameterizable):
"""
Parameterized class
@@ -73,7 +74,9 @@ class Parameterized(Parameterizable):
# Metaclass for parameters changed after init.
# This makes sure, that parameters changed will always be called after __init__
# **Never** call parameters_changed() yourself
- __metaclass__ = ParametersChangedMeta
+ #This is ignored in Python 3 -- you need to put the meta class in the function definition.
+ #__metaclass__ = ParametersChangedMeta
+ #The six module is used to support both Python 2 and 3 simultaneously
#===========================================================================
def __init__(self, name=None, parameters=[], *a, **kw):
super(Parameterized, self).__init__(name=name, *a, **kw)
@@ -131,7 +134,7 @@ class Parameterized(Parameterizable):
if param.has_parent():
def visit(parent, self):
if parent is self:
- raise HierarchyError, "You cannot add a parameter twice into the hierarchy"
+ raise HierarchyError("You cannot add a parameter twice into the hierarchy")
param.traverse_parents(visit, self)
param._parent_.unlink_parameter(param)
# make sure the size is set
@@ -173,7 +176,7 @@ class Parameterized(Parameterizable):
self._highest_parent_._connect_fixes()
else:
- raise HierarchyError, """Parameter exists already, try making a copy"""
+ raise HierarchyError("""Parameter exists already, try making a copy""")
def link_parameters(self, *parameters):
@@ -189,9 +192,9 @@ class Parameterized(Parameterizable):
"""
if not param in self.parameters:
try:
- raise RuntimeError, "{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name)
+ raise RuntimeError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name))
except AttributeError:
- raise RuntimeError, "{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param))
+ raise RuntimeError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param)))
start = sum([p.size for p in self.parameters[:param._parent_index_]])
self._remove_parameter_name(param)
@@ -215,9 +218,9 @@ class Parameterized(Parameterizable):
self._highest_parent_._notify_parent_change()
def add_parameter(self, *args, **kwargs):
- raise DeprecationWarning, "add_parameter was renamed to link_parameter to avoid confusion of setting variables, use link_parameter instead"
+ raise DeprecationWarning("add_parameter was renamed to link_parameter to avoid confusion of setting variables, use link_parameter instead")
def remove_parameter(self, *args, **kwargs):
- raise DeprecationWarning, "remove_parameter was renamed to unlink_parameter to avoid confusion of setting variables, use unlink_parameter instead"
+ raise DeprecationWarning("remove_parameter was renamed to unlink_parameter to avoid confusion of setting variables, use unlink_parameter instead")
def _connect_parameters(self, ignore_added_names=False):
# connect parameterlist to this parameterized object
@@ -237,7 +240,7 @@ class Parameterized(Parameterizable):
self._param_slices_ = []
for i, p in enumerate(self.parameters):
if not p.param_array.flags['C_CONTIGUOUS']:
- raise ValueError, "This should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS"
+ raise ValueError("This should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS")
p._parent_ = self
p._parent_index_ = i
@@ -268,7 +271,7 @@ class Parameterized(Parameterizable):
"""
if not isinstance(regexp, _pattern_type): regexp = compile(regexp)
found_params = []
- for n, p in itertools.izip(self.parameter_names(False, False, True), self.flattened_parameters):
+ for n, p in zip(self.parameter_names(False, False, True), self.flattened_parameters):
if regexp.match(n) is not None:
found_params.append(p)
return found_params
@@ -279,7 +282,7 @@ class Parameterized(Parameterizable):
else:
if paramlist is None:
paramlist = self.grep_param_names(name)
- if len(paramlist) < 1: raise AttributeError, name
+ if len(paramlist) < 1: raise AttributeError(name)
if len(paramlist) == 1:
if isinstance(paramlist[-1], Parameterized):
paramlist = paramlist[-1].flattened_parameters
@@ -295,7 +298,7 @@ class Parameterized(Parameterizable):
try:
self.param_array[name] = value
except:
- raise ValueError, "Setting by slice or index only allowed with array-like"
+ raise ValueError("Setting by slice or index only allowed with array-like")
self.trigger_update()
else:
try: param = self.__getitem__(name, paramlist)
@@ -325,7 +328,7 @@ class Parameterized(Parameterizable):
self._notify_parent_change()
self.parameters_changed()
except Exception as e:
- print "WARNING: caught exception {!s}, trying to continue".format(e)
+ print("WARNING: caught exception {!s}, trying to continue".format(e))
def copy(self, memo=None):
if memo is None:
@@ -379,7 +382,7 @@ class Parameterized(Parameterizable):
pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]])
format_spec = "
{{name:<{0}s}}
{{desc:>{1}s}}
{{const:^{2}s}}
{{pri:^{3}s}}
{{t:^{4}s}}
".format(nl, sl, cl, pl, tl)
to_print = []
- for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs):
+ for n, d, c, t, p in zip(names, desc, constrs, ts, prirs):
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))
sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3)
if header:
@@ -414,7 +417,7 @@ class Parameterized(Parameterizable):
pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]])
format_spec = " \033[1m{{name:<{0}s}}\033[0;0m | {{desc:>{1}s}} | {{const:^{2}s}} | {{pri:^{3}s}} | {{t:^{4}s}}".format(nl, sl, cl, pl, tl)
to_print = []
- for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs):
+ for n, d, c, t, p in zip(names, desc, constrs, ts, prirs):
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))
sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3)
if header:
diff --git a/GPy/core/parameterization/priors.py b/GPy/core/parameterization/priors.py
index 20a78691..3a213fcd 100644
--- a/GPy/core/parameterization/priors.py
+++ b/GPy/core/parameterization/priors.py
@@ -5,7 +5,7 @@
import numpy as np
from scipy.special import gammaln, digamma
from ...util.linalg import pdinv
-from domains import _REAL, _POSITIVE
+from .domains import _REAL, _POSITIVE
import warnings
import weakref
@@ -15,8 +15,12 @@ class Prior(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance or cls._instance.__class__ is not cls:
- cls._instance = super(Prior, cls).__new__(cls, *args, **kwargs)
- return cls._instance
+ newfunc = super(Prior, cls).__new__
+ if newfunc is object.__new__:
+ cls._instance = newfunc(cls)
+ else:
+ cls._instance = newfunc(cls, *args, **kwargs)
+ return cls._instance
def pdf(self, x):
return np.exp(self.lnpdf(x))
@@ -52,7 +56,11 @@ class Gaussian(Prior):
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma:
return instance()
- o = super(Prior, cls).__new__(cls, mu, sigma)
+ newfunc = super(Prior, cls).__new__
+ if newfunc is object.__new__:
+ o = newfunc(cls)
+ else:
+ o = newfunc(cls, mu, sigma)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
@@ -140,7 +148,11 @@ class LogGaussian(Gaussian):
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma:
return instance()
- o = super(Prior, cls).__new__(cls, mu, sigma)
+ newfunc = super(Prior, cls).__new__
+ if newfunc is object.__new__:
+ o = newfunc(cls)
+ else:
+ o = newfunc(cls, mu, sigma)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
@@ -258,7 +270,11 @@ class Gamma(Prior):
for instance in cls._instances:
if instance().a == a and instance().b == b:
return instance()
- o = super(Prior, cls).__new__(cls, a, b)
+ newfunc = super(Prior, cls).__new__
+ if newfunc is object.__new__:
+ o = newfunc(cls)
+ else:
+ o = newfunc(cls, a, b)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
@@ -398,7 +414,7 @@ class DGPLVM_KFDA(Prior):
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
- for j in xrange(self.datanum):
+ for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
@@ -532,6 +548,230 @@ class DGPLVM(Prior):
return idx
return -1
+ # This function assigns each data point to its own class
+ # and returns the dictionary which contains the class name and parameters.
+ def compute_cls(self, x):
+ cls = {}
+ # Appending each data point to its proper class
+ for j in range(self.datanum):
+ class_label = self.get_class_label(self.lbl[j])
+ if class_label not in cls:
+ cls[class_label] = []
+ cls[class_label].append(x[j])
+ return cls
+
+ # This function computes mean of each class. The mean is calculated through each dimension
+ def compute_Mi(self, cls):
+ M_i = np.zeros((self.classnum, self.dim))
+ for i in cls:
+ # Mean of each class
+ class_i = cls[i]
+ M_i[i] = np.mean(class_i, axis=0)
+ return M_i
+
+ # Adding data points as tuple to the dictionary so that we can access indices
+ def compute_indices(self, x):
+ data_idx = {}
+ for j in range(self.datanum):
+ class_label = self.get_class_label(self.lbl[j])
+ if class_label not in data_idx:
+ data_idx[class_label] = []
+ t = (j, x[j])
+ data_idx[class_label].append(t)
+ return data_idx
+
+ # Adding indices to the list so we can access whole the indices
+ def compute_listIndices(self, data_idx):
+ lst_idx = []
+ lst_idx_all = []
+ for i in data_idx:
+ if len(lst_idx) == 0:
+ pass
+ #Do nothing, because it is the first time list is created so is empty
+ else:
+ lst_idx = []
+ # Here we put indices of each class in to the list called lst_idx_all
+ for m in range(len(data_idx[i])):
+ lst_idx.append(data_idx[i][m][0])
+ lst_idx_all.append(lst_idx)
+ return lst_idx_all
+
+ # This function calculates between classes variances
+ def compute_Sb(self, cls, M_i, M_0):
+ Sb = np.zeros((self.dim, self.dim))
+ for i in cls:
+ B = (M_i[i] - M_0).reshape(self.dim, 1)
+ B_trans = B.transpose()
+ Sb += (float(len(cls[i])) / self.datanum) * B.dot(B_trans)
+ return Sb
+
+ # This function calculates within classes variances
+ def compute_Sw(self, cls, M_i):
+ Sw = np.zeros((self.dim, self.dim))
+ for i in cls:
+ N_i = float(len(cls[i]))
+ W_WT = np.zeros((self.dim, self.dim))
+ for xk in cls[i]:
+ W = (xk - M_i[i])
+ W_WT += np.outer(W, W)
+ Sw += (N_i / self.datanum) * ((1. / N_i) * W_WT)
+ return Sw
+
+ # Calculating beta and Bi for Sb
+ def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
+ # import pdb
+ # pdb.set_trace()
+ B_i = np.zeros((self.classnum, self.dim))
+ Sig_beta_B_i_all = np.zeros((self.datanum, self.dim))
+ for i in data_idx:
+ # pdb.set_trace()
+ # Calculating Bi
+ B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
+ for k in range(self.datanum):
+ for i in data_idx:
+ N_i = float(len(data_idx[i]))
+ if k in lst_idx_all[i]:
+ beta = (float(1) / N_i) - (float(1) / self.datanum)
+ Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
+ else:
+ beta = -(float(1) / self.datanum)
+ Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
+ Sig_beta_B_i_all = Sig_beta_B_i_all.transpose()
+ return Sig_beta_B_i_all
+
+
+ # Calculating W_j s separately so we can access all the W_j s anytime
+ def compute_wj(self, data_idx, M_i):
+ W_i = np.zeros((self.datanum, self.dim))
+ for i in data_idx:
+ N_i = float(len(data_idx[i]))
+ for tpl in data_idx[i]:
+ xj = tpl[1]
+ j = tpl[0]
+ W_i[j] = (xj - M_i[i])
+ return W_i
+
+ # Calculating alpha and Wj for Sw
+ def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
+ Sig_alpha_W_i = np.zeros((self.datanum, self.dim))
+ for i in data_idx:
+ N_i = float(len(data_idx[i]))
+ for tpl in data_idx[i]:
+ k = tpl[0]
+ for j in lst_idx_all[i]:
+ if k == j:
+ alpha = 1 - (float(1) / N_i)
+ Sig_alpha_W_i[k] += (alpha * W_i[j])
+ else:
+ alpha = 0 - (float(1) / N_i)
+ Sig_alpha_W_i[k] += (alpha * W_i[j])
+ Sig_alpha_W_i = (1. / self.datanum) * np.transpose(Sig_alpha_W_i)
+ return Sig_alpha_W_i
+
+ # This function calculates log of our prior
+ def lnpdf(self, x):
+ x = x.reshape(self.x_shape)
+ cls = self.compute_cls(x)
+ M_0 = np.mean(x, axis=0)
+ M_i = self.compute_Mi(cls)
+ Sb = self.compute_Sb(cls, M_i, M_0)
+ Sw = self.compute_Sw(cls, M_i)
+ # Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
+ #Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
+ #Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
+ Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
+ return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
+
+ # This function calculates derivative of the log of prior function
+ def lnpdf_grad(self, x):
+ x = x.reshape(self.x_shape)
+ cls = self.compute_cls(x)
+ M_0 = np.mean(x, axis=0)
+ M_i = self.compute_Mi(cls)
+ Sb = self.compute_Sb(cls, M_i, M_0)
+ Sw = self.compute_Sw(cls, M_i)
+ data_idx = self.compute_indices(x)
+ lst_idx_all = self.compute_listIndices(data_idx)
+ Sig_beta_B_i_all = self.compute_sig_beta_Bi(data_idx, M_i, M_0, lst_idx_all)
+ W_i = self.compute_wj(data_idx, M_i)
+ Sig_alpha_W_i = self.compute_sig_alpha_W(data_idx, lst_idx_all, W_i)
+
+ # Calculating inverse of Sb and its transpose and minus
+ # Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
+ #Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
+ #Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
+ Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
+ Sb_inv_N_trans = np.transpose(Sb_inv_N)
+ Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
+ Sw_trans = np.transpose(Sw)
+
+ # Calculating DJ/DXk
+ DJ_Dxk = 2 * (
+ Sb_inv_N_trans_minus.dot(Sw_trans).dot(Sb_inv_N_trans).dot(Sig_beta_B_i_all) + Sb_inv_N_trans.dot(
+ Sig_alpha_W_i))
+ # Calculating derivative of the log of the prior
+ DPx_Dx = ((-1 / self.sigma2) * DJ_Dxk)
+ return DPx_Dx.T
+
+ # def frb(self, x):
+ # from functools import partial
+ # from GPy.models import GradientChecker
+ # f = partial(self.lnpdf)
+ # df = partial(self.lnpdf_grad)
+ # grad = GradientChecker(f, df, x, 'X')
+ # grad.checkgrad(verbose=1)
+
+ def rvs(self, n):
+ return np.random.rand(n) # A WRONG implementation
+
+ def __str__(self):
+ return 'DGPLVM_prior_Raq'
+
+
+# ******************************************
+
+from .. import Parameterized
+from .. import Param
+class DGPLVM_Lamda(Prior, Parameterized):
+ """
+ Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel.
+
+ :param sigma2: constant
+
+ .. Note:: DGPLVM for Classification paper implementation
+
+ """
+ domain = _REAL
+ # _instances = []
+ # def __new__(cls, mu, sigma): # Singleton:
+ # if cls._instances:
+ # cls._instances[:] = [instance for instance in cls._instances if instance()]
+ # for instance in cls._instances:
+ # if instance().mu == mu and instance().sigma == sigma:
+ # return instance()
+ # o = super(Prior, cls).__new__(cls, mu, sigma)
+ # cls._instances.append(weakref.ref(o))
+ # return cls._instances[-1]()
+
+ def __init__(self, sigma2, lbl, x_shape, lamda, name='DP_prior'):
+ super(DGPLVM_Lamda, self).__init__(name=name)
+ self.sigma2 = sigma2
+ # self.x = x
+ self.lbl = lbl
+ self.lamda = lamda
+ self.classnum = lbl.shape[1]
+ self.datanum = lbl.shape[0]
+ self.x_shape = x_shape
+ self.dim = x_shape[1]
+ self.lamda = Param('lamda', np.diag(lamda))
+ self.link_parameter(self.lamda)
+
+ def get_class_label(self, y):
+ for idx, v in enumerate(y):
+ if v == 1:
+ return idx
+ return -1
+
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
@@ -603,7 +843,7 @@ class DGPLVM(Prior):
# Calculating beta and Bi for Sb
def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
- # import pdb
+ import pdb
# pdb.set_trace()
B_i = np.zeros((self.classnum, self.dim))
Sig_beta_B_i_all = np.zeros((self.datanum, self.dim))
@@ -655,6 +895,13 @@ class DGPLVM(Prior):
# This function calculates log of our prior
def lnpdf(self, x):
x = x.reshape(self.x_shape)
+
+ #!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ #self.lamda.values[:] = self.lamda.values/self.lamda.values.sum()
+
+ xprime = x.dot(np.diagflat(self.lamda))
+ x = xprime
+ # print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
@@ -669,6 +916,9 @@ class DGPLVM(Prior):
# This function calculates derivative of the log of prior function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
+ xprime = x.dot(np.diagflat(self.lamda))
+ x = xprime
+ # print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
@@ -695,7 +945,21 @@ class DGPLVM(Prior):
Sig_alpha_W_i))
# Calculating derivative of the log of the prior
DPx_Dx = ((-1 / self.sigma2) * DJ_Dxk)
- return DPx_Dx.T
+
+ DPxprim_Dx = np.diagflat(self.lamda).dot(DPx_Dx)
+
+ # Because of the GPy we need to transpose our matrix so that it gets the same shape as out matrix (denominator layout!!!)
+ DPxprim_Dx = DPxprim_Dx.T
+
+ DPxprim_Dlamda = DPx_Dx.dot(x)
+
+ # Because of the GPy we need to transpose our matrix so that it gets the same shape as out matrix (denominator layout!!!)
+ DPxprim_Dlamda = DPxprim_Dlamda.T
+
+ self.lamda.gradient = np.diag(DPxprim_Dlamda)
+ # print DPxprim_Dx
+ return DPxprim_Dx
+
# def frb(self, x):
# from functools import partial
@@ -709,9 +973,9 @@ class DGPLVM(Prior):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
- return 'DGPLVM_prior_Raq'
-
+ return 'DGPLVM_prior_Raq_Lamda'
+# ******************************************
class DGPLVM_T(Prior):
"""
@@ -742,7 +1006,7 @@ class DGPLVM_T(Prior):
self.datanum = lbl.shape[0]
self.x_shape = x_shape
self.dim = x_shape[1]
- self.vec = vec
+ self.vec = vec
def get_class_label(self, y):
@@ -756,7 +1020,7 @@ class DGPLVM_T(Prior):
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
- for j in xrange(self.datanum):
+ for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
@@ -764,18 +1028,19 @@ class DGPLVM_T(Prior):
return cls
# This function computes mean of each class. The mean is calculated through each dimension
- def compute_Mi(self, cls, vec):
+ def compute_Mi(self, cls):
M_i = np.zeros((self.classnum, self.dim))
for i in cls:
# Mean of each class
- class_i = np.multiply(cls[i],vec)
+ # class_i = np.multiply(cls[i],vec)
+ class_i = cls[i]
M_i[i] = np.mean(class_i, axis=0)
return M_i
# Adding data points as tuple to the dictionary so that we can access indices
def compute_indices(self, x):
data_idx = {}
- for j in xrange(self.datanum):
+ for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in data_idx:
data_idx[class_label] = []
@@ -794,7 +1059,7 @@ class DGPLVM_T(Prior):
else:
lst_idx = []
# Here we put indices of each class in to the list called lst_idx_all
- for m in xrange(len(data_idx[i])):
+ for m in range(len(data_idx[i])):
lst_idx.append(data_idx[i][m][0])
lst_idx_all.append(lst_idx)
return lst_idx_all
@@ -830,7 +1095,7 @@ class DGPLVM_T(Prior):
# pdb.set_trace()
# Calculating Bi
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
- for k in xrange(self.datanum):
+ for k in range(self.datanum):
for i in data_idx:
N_i = float(len(data_idx[i]))
if k in lst_idx_all[i]:
@@ -874,24 +1139,30 @@ class DGPLVM_T(Prior):
# This function calculates log of our prior
def lnpdf(self, x):
x = x.reshape(self.x_shape)
+ xprim = x.dot(self.vec)
+ x = xprim
+ # print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
- M_i = self.compute_Mi(cls, self.vec)
+ M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#print 'SB_inv: ', Sb_inv_N
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
- Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
+ Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
# This function calculates derivative of the log of prior function
def lnpdf_grad(self, x):
- x = x.reshape(self.x_shape)
- cls = self.compute_cls(x)
+ x = x.reshape(self.x_shape)
+ xprim = x.dot(self.vec)
+ x = xprim
+ # print x
+ cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
- M_i = self.compute_Mi(cls, self.vec)
+ M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
data_idx = self.compute_indices(x)
@@ -905,7 +1176,7 @@ class DGPLVM_T(Prior):
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#print 'SB_inv: ',Sb_inv_N
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
- Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
+ Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
Sb_inv_N_trans = np.transpose(Sb_inv_N)
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
Sw_trans = np.transpose(Sw)
diff --git a/GPy/core/parameterization/ties_and_remappings.py b/GPy/core/parameterization/ties_and_remappings.py
index a81b8d61..527bc47c 100644
--- a/GPy/core/parameterization/ties_and_remappings.py
+++ b/GPy/core/parameterization/ties_and_remappings.py
@@ -2,8 +2,8 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
-from parameterized import Parameterized
-from param import Param
+from .parameterized import Parameterized
+from .param import Param
class Remapping(Parameterized):
def mapping(self):
@@ -98,7 +98,7 @@ class Tie(Parameterized):
if np.all(self.label_buf[idx]==0):
# None of p has been tied before.
tie_idx = self._expandTieParam(1)
- print tie_idx
+ print(tie_idx)
tie_id = self.label_buf.max()+1
self.label_buf[tie_idx] = tie_id
else:
@@ -185,18 +185,18 @@ class Tie(Parameterized):
def _check_change(self):
changed = False
if self.tied_param is not None:
- for i in xrange(self.tied_param.size):
+ for i in range(self.tied_param.size):
b0 = self.label_buf==self.label_buf[self.buf_idx[i]]
b = self._highest_parent_.param_array[b0]!=self.tied_param[i]
if b.sum()==0:
- print 'XXX'
+ print('XXX')
continue
elif b.sum()==1:
- print '!!!'
+ print('!!!')
val = self._highest_parent_.param_array[b0][b][0]
self._highest_parent_.param_array[b0] = val
else:
- print '@@@'
+ print('@@@')
self._highest_parent_.param_array[b0] = self.tied_param[i]
changed = True
return changed
@@ -212,11 +212,11 @@ class Tie(Parameterized):
if self.tied_param is not None:
self.tied_param.gradient = 0.
[np.put(self.tied_param.gradient, i, self._highest_parent_.gradient[self.label_buf==self.label_buf[self.buf_idx[i]]].sum())
- for i in xrange(self.tied_param.size)]
+ for i in range(self.tied_param.size)]
def propagate_val(self):
if self.tied_param is not None:
- for i in xrange(self.tied_param.size):
+ for i in range(self.tied_param.size):
self._highest_parent_.param_array[self.label_buf==self.label_buf[self.buf_idx[i]]] = self.tied_param[i]
diff --git a/GPy/core/parameterization/transformations.py b/GPy/core/parameterization/transformations.py
index d929b1d9..7e15cee9 100644
--- a/GPy/core/parameterization/transformations.py
+++ b/GPy/core/parameterization/transformations.py
@@ -3,7 +3,7 @@
import numpy as np
-from domains import _POSITIVE,_NEGATIVE, _BOUNDED
+from .domains import _POSITIVE,_NEGATIVE, _BOUNDED
import weakref
import sys
@@ -72,7 +72,7 @@ class Logexp(Transformation):
return np.einsum('i,i->i', df, np.where(f>_lim_val, 1., 1. - np.exp(-f)))
def initialize(self, f):
if np.any(f < 0.):
- print "Warning: changing parameters to satisfy constraints"
+ print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve'
@@ -130,7 +130,7 @@ class NormalTheta(Transformation):
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
- print "Warning: changing parameters to satisfy constraints"
+ print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
@@ -177,7 +177,7 @@ class NormalNaturalAntti(NormalTheta):
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
- print "Warning: changing parameters to satisfy constraints"
+ print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
@@ -220,7 +220,7 @@ class NormalEta(Transformation):
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
- print "Warning: changing parameters to satisfy constraints"
+ print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
@@ -360,7 +360,7 @@ class LogexpNeg(Transformation):
return np.einsum('i,i->i', df, np.where(f>_lim_val, -1, -1 + np.exp(-f)))
def initialize(self, f):
if np.any(f < 0.):
- print "Warning: changing parameters to satisfy constraints"
+ print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve'
@@ -412,7 +412,7 @@ class LogexpClipped(Logexp):
return np.einsum('i,i->i', df, gf) # np.where(f < self.lower, 0, gf)
def initialize(self, f):
if np.any(f < 0.):
- print "Warning: changing parameters to satisfy constraints"
+ print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve_c'
@@ -428,7 +428,7 @@ class Exponent(Transformation):
return np.einsum('i,i->i', df, f)
def initialize(self, f):
if np.any(f < 0.):
- print "Warning: changing parameters to satisfy constraints"
+ print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve'
@@ -468,7 +468,11 @@ class Logistic(Transformation):
for instance in cls._instances:
if instance().lower == lower and instance().upper == upper:
return instance()
- o = super(Transformation, cls).__new__(cls, lower, upper, *args, **kwargs)
+ newfunc = super(Transformation, cls).__new__
+ if newfunc is object.__new__:
+ o = newfunc(cls)
+ else:
+ o = newfunc(cls, lower, upper, *args, **kwargs)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower, upper):
@@ -486,7 +490,7 @@ class Logistic(Transformation):
return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference)
def initialize(self, f):
if np.any(np.logical_or(f < self.lower, f > self.upper)):
- print "Warning: changing parameters to satisfy constraints"
+ print("Warning: changing parameters to satisfy constraints")
#return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f)
#FIXME: Max, zeros_like right?
return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f)
diff --git a/GPy/core/parameterization/updateable.py b/GPy/core/parameterization/updateable.py
index 379e92e1..07083ce0 100644
--- a/GPy/core/parameterization/updateable.py
+++ b/GPy/core/parameterization/updateable.py
@@ -3,7 +3,7 @@ Created on 11 Nov 2014
@author: maxz
'''
-from observable import Observable
+from .observable import Observable
class Updateable(Observable):
@@ -35,7 +35,7 @@ class Updateable(Observable):
self.trigger_update()
def toggle_update(self):
- print "deprecated: toggle_update was renamed to update_toggle for easier access"
+ print("deprecated: toggle_update was renamed to update_toggle for easier access")
self.update_toggle()
def update_toggle(self):
self.update_model(not self.update_model())
diff --git a/GPy/core/parameterization/variational.py b/GPy/core/parameterization/variational.py
index 7cc5c99a..ab196b98 100644
--- a/GPy/core/parameterization/variational.py
+++ b/GPy/core/parameterization/variational.py
@@ -5,9 +5,9 @@ Created on 6 Nov 2013
'''
import numpy as np
-from parameterized import Parameterized
-from param import Param
-from transformations import Logexp, Logistic,__fixed__
+from .parameterized import Parameterized
+from .param import Param
+from .transformations import Logexp, Logistic,__fixed__
from GPy.util.misc import param_to_array
from GPy.util.caching import Cache_this
@@ -16,13 +16,13 @@ class VariationalPrior(Parameterized):
super(VariationalPrior, self).__init__(name=name, **kw)
def KL_divergence(self, variational_posterior):
- raise NotImplementedError, "override this for variational inference of latent space"
+ raise NotImplementedError("override this for variational inference of latent space")
def update_gradients_KL(self, variational_posterior):
"""
updates the gradients for mean and variance **in place**
"""
- raise NotImplementedError, "override this for variational inference of latent space"
+ raise NotImplementedError("override this for variational inference of latent space")
class NormalPrior(VariationalPrior):
def KL_divergence(self, variational_posterior):
@@ -50,31 +50,29 @@ class SpikeAndSlabPrior(VariationalPrior):
def KL_divergence(self, variational_posterior):
mu = variational_posterior.mean
S = variational_posterior.variance
- gamma,gamma1 = variational_posterior.gamma_probabilities()
- log_gamma,log_gamma1 = variational_posterior.gamma_log_prob()
+ gamma = variational_posterior.gamma.values
if len(self.pi.shape)==2:
- idx = np.unique(gamma._raveled_index()/gamma.shape[-1])
+ idx = np.unique(variational_posterior.gamma._raveled_index()/gamma.shape[-1])
pi = self.pi[idx]
else:
pi = self.pi
var_mean = np.square(mu)/self.variance
var_S = (S/self.variance - np.log(S))
- var_gamma = (gamma*(log_gamma-np.log(pi))).sum()+(gamma1*(log_gamma1-np.log(1-pi))).sum()
+ var_gamma = (gamma*np.log(gamma/pi)).sum()+((1-gamma)*np.log((1-gamma)/(1-pi))).sum()
return var_gamma+ (gamma* (np.log(self.variance)-1. +var_mean + var_S)).sum()/2.
def update_gradients_KL(self, variational_posterior):
mu = variational_posterior.mean
S = variational_posterior.variance
- gamma,gamma1 = variational_posterior.gamma_probabilities()
- log_gamma,log_gamma1 = variational_posterior.gamma_log_prob()
+ gamma = variational_posterior.gamma.values
if len(self.pi.shape)==2:
- idx = np.unique(gamma._raveled_index()/gamma.shape[-1])
+ idx = np.unique(variational_posterior.gamma._raveled_index()/gamma.shape[-1])
pi = self.pi[idx]
else:
pi = self.pi
- variational_posterior.binary_prob.gradient -= (np.log((1-pi)/pi)+log_gamma-log_gamma1+((np.square(mu)+S)/self.variance-np.log(S)+np.log(self.variance)-1.)/2.)*gamma*gamma1
+ variational_posterior.binary_prob.gradient -= np.log((1-pi)/pi*gamma/(1.-gamma))+((np.square(mu)+S)/self.variance-np.log(S)+np.log(self.variance)-1.)/2.
mu.gradient -= gamma*mu/self.variance
S.gradient -= (1./self.variance - 1./S) * gamma /2.
if self.learnPi:
@@ -141,7 +139,7 @@ class NormalPosterior(VariationalPosterior):
holds the means and variances for a factorizing multivariate normal distribution
'''
- def plot(self, *args):
+ def plot(self, *args, **kwargs):
"""
Plot latent space X in 1D:
@@ -150,8 +148,7 @@ class NormalPosterior(VariationalPosterior):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import variational_plots
- import matplotlib
- return variational_plots.plot(self,*args)
+ return variational_plots.plot(self, *args, **kwargs)
class SpikeAndSlabPosterior(VariationalPosterior):
'''
@@ -162,24 +159,8 @@ class SpikeAndSlabPosterior(VariationalPosterior):
binary_prob : the probability of the distribution on the slab part.
"""
super(SpikeAndSlabPosterior, self).__init__(means, variances, name)
- self.gamma = Param("binary_prob",binary_prob)
+ self.gamma = Param("binary_prob",binary_prob,Logistic(0.,1.))
self.link_parameter(self.gamma)
-
- @Cache_this(limit=5)
- def gamma_probabilities(self):
- prob = np.zeros_like(param_to_array(self.gamma))
- prob[self.gamma>-710] = 1./(1.+np.exp(-self.gamma[self.gamma>-710]))
- prob1 = -np.zeros_like(param_to_array(self.gamma))
- prob1[self.gamma<710] = 1./(1.+np.exp(self.gamma[self.gamma<710]))
- return prob, prob1
-
- @Cache_this(limit=5)
- def gamma_log_prob(self):
- loggamma = param_to_array(self.gamma).copy()
- loggamma[loggamma>-40] = -np.log1p(np.exp(-loggamma[loggamma>-40]))
- loggamma1 = -param_to_array(self.gamma).copy()
- loggamma1[loggamma1>-40] = -np.log1p(np.exp(-loggamma1[loggamma1>-40]))
- return loggamma,loggamma1
def set_gradients(self, grad):
self.mean.gradient, self.variance.gradient, self.gamma.gradient = grad
diff --git a/GPy/core/sparse_gp.py b/GPy/core/sparse_gp.py
index 005ef2ac..0c5e1dd2 100644
--- a/GPy/core/sparse_gp.py
+++ b/GPy/core/sparse_gp.py
@@ -2,19 +2,15 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
-from gp import GP
-from parameterization.param import Param
+from .gp import GP
+from .parameterization.param import Param
from ..inference.latent_function_inference import var_dtc
from .. import likelihoods
-from parameterization.variational import VariationalPosterior, NormalPosterior
+from .parameterization.variational import VariationalPosterior, NormalPosterior
from ..util.linalg import mdot
import logging
-from GPy.inference.latent_function_inference.posterior import Posterior
-from GPy.inference.optimization.stochastics import SparseGPStochastics,\
- SparseGPMissing
-#no stochastics.py file added! from GPy.inference.optimization.stochastics import SparseGPStochastics,\
- #SparseGPMissing
+import itertools
logger = logging.getLogger("sparse gp")
class SparseGP(GP):
@@ -25,6 +21,10 @@ class SparseGP(GP):
(Gaussian likelihoods) as well as non-conjugate sparse methods based on
these.
+ This is not for missing data, as the implementation for missing data involves
+ some inefficient optimization routine decisions.
+ See missing data SparseGP implementation in py:class:'~GPy.models.sparse_gp_minibatch.SparseGPMiniBatch'.
+
:param X: inputs
:type X: np.ndarray (num_data x input_dim)
:param likelihood: a likelihood instance, containing the observed data
@@ -40,7 +40,7 @@ class SparseGP(GP):
"""
- def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None,
+ def __init__(self, X, Y, Z, kernel, likelihood, mean_function=None, inference_method=None,
name='sparse gp', Y_metadata=None, normalizer=False):
#pick a sensible inference method
if inference_method is None:
@@ -48,13 +48,13 @@ class SparseGP(GP):
inference_method = var_dtc.VarDTC(limit=1 if not self.missing_data else Y.shape[1])
else:
#inference_method = ??
- raise NotImplementedError, "what to do what to do?"
- print "defaulting to ", inference_method, "for latent function inference"
+ raise NotImplementedError("what to do what to do?")
+ print("defaulting to ", inference_method, "for latent function inference")
self.Z = Param('inducing inputs', Z)
self.num_inducing = Z.shape[0]
- GP.__init__(self, X, Y, kernel, likelihood, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer)
+ GP.__init__(self, X, Y, kernel, likelihood, mean_function, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=normalizer)
logger.info("Adding Z as parameter")
self.link_parameter(self.Z, index=0)
@@ -63,6 +63,14 @@ class SparseGP(GP):
def has_uncertain_inputs(self):
return isinstance(self.X, VariationalPosterior)
+ def set_Z(self, Z, trigger_update=True):
+ if trigger_update: self.update_model(False)
+ self.unlink_parameter(self.Z)
+ self.Z = Param('inducing inputs',Z)
+ self.link_parameter(self.Z, index=0)
+ if trigger_update: self.update_model(True)
+ if trigger_update: self._trigger_params_changed()
+
def parameters_changed(self):
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y, self.Y_metadata)
@@ -103,15 +111,15 @@ class SparseGP(GP):
def _raw_predict(self, Xnew, full_cov=False, kern=None):
"""
- Make a prediction for the latent function values.
-
+ Make a prediction for the latent function values.
+
For certain inputs we give back a full_cov of shape NxN,
if there is missing data, each dimension has its own full_cov of shape NxNxD, and if full_cov is of,
we take only the diagonal elements across N.
For uncertain inputs, the SparseGP bound produces a full covariance structure across D, so for full_cov we
return a NxDxD matrix and in the not full_cov case, we return the diagonal elements across D (NxD).
- This is for both with and without missing data.
+ This is for both with and without missing data. See for missing data SparseGP implementation py:class:'~GPy.models.sparse_gp_minibatch.SparseGPMiniBatch'.
"""
if kern is None: kern = self.kern
@@ -124,15 +132,26 @@ class SparseGP(GP):
if self.posterior.woodbury_inv.ndim == 2:
var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
elif self.posterior.woodbury_inv.ndim == 3:
- var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2)
+ var = np.empty((Kxx.shape[0],Kxx.shape[1],self.posterior.woodbury_inv.shape[2]))
+ for i in range(var.shape[1]):
+ var[:, :, i] = (Kxx - mdot(Kx.T, self.posterior.woodbury_inv[:, :, i], Kx))
var = var
else:
Kxx = kern.Kdiag(Xnew)
- var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T
+ if self.posterior.woodbury_inv.ndim == 2:
+ var = (Kxx - np.sum(np.dot(self.posterior.woodbury_inv.T, Kx) * Kx, 0))[:,None]
+ elif self.posterior.woodbury_inv.ndim == 3:
+ var = np.empty((Kxx.shape[0],self.posterior.woodbury_inv.shape[2]))
+ for i in range(var.shape[1]):
+ var[:, i] = (Kxx - (np.sum(np.dot(self.posterior.woodbury_inv[:, :, i].T, Kx) * Kx, 0)))
+ var = var
+ #add in the mean function
+ if self.mean_function is not None:
+ mu += self.mean_function.f(Xnew)
else:
- psi0_star = self.kern.psi0(self.Z, Xnew)
- psi1_star = self.kern.psi1(self.Z, Xnew)
- #psi2_star = self.kern.psi2(self.Z, Xnew) # Only possible if we get NxMxM psi2 out of the code.
+ psi0_star = kern.psi0(self.Z, Xnew)
+ psi1_star = kern.psi1(self.Z, Xnew)
+ #psi2_star = kern.psi2(self.Z, Xnew) # Only possible if we get NxMxM psi2 out of the code.
la = self.posterior.woodbury_vector
mu = np.dot(psi1_star, la) # TODO: dimensions?
@@ -144,7 +163,7 @@ class SparseGP(GP):
for i in range(Xnew.shape[0]):
_mu, _var = Xnew.mean.values[[i]], Xnew.variance.values[[i]]
- psi2_star = self.kern.psi2(self.Z, NormalPosterior(_mu, _var))
+ psi2_star = kern.psi2(self.Z, NormalPosterior(_mu, _var))
tmp = (psi2_star[:, :] - psi1_star[[i]].T.dot(psi1_star[[i]]))
var_ = mdot(la.T, tmp, la)
@@ -158,4 +177,5 @@ class SparseGP(GP):
var[i] = var_
else:
var[i] = np.diag(var_)+p0-t2
+
return mu, var
diff --git a/GPy/core/sparse_gp_mpi.py b/GPy/core/sparse_gp_mpi.py
index 15d3ad76..28de3124 100644
--- a/GPy/core/sparse_gp_mpi.py
+++ b/GPy/core/sparse_gp_mpi.py
@@ -2,7 +2,7 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
-from sparse_gp import SparseGP
+from .sparse_gp import SparseGP
from numpy.linalg.linalg import LinAlgError
from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch
@@ -56,7 +56,7 @@ class SparseGP_MPI(SparseGP):
self.N_range = (N_start, N_end)
self.N_list = np.array(N_list)
self.Y_local = self.Y[N_start:N_end]
- print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range)
+ print('MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range))
mpi_comm.Bcast(self.param_array, root=0)
self.update_model(True)
diff --git a/GPy/core/svgp.py b/GPy/core/svgp.py
index 1966dbef..b8df625e 100644
--- a/GPy/core/svgp.py
+++ b/GPy/core/svgp.py
@@ -3,13 +3,13 @@
import numpy as np
from ..util import choleskies
-from sparse_gp import SparseGP
-from parameterization.param import Param
+from .sparse_gp import SparseGP
+from .parameterization.param import Param
from ..inference.latent_function_inference import SVGP as svgp_inf
class SVGP(SparseGP):
- def __init__(self, X, Y, Z, kernel, likelihood, name='SVGP', Y_metadata=None, batchsize=None):
+ def __init__(self, X, Y, Z, kernel, likelihood, mean_function=None, name='SVGP', Y_metadata=None, batchsize=None, num_latent_functions=None):
"""
Stochastic Variational GP.
@@ -38,33 +38,45 @@ class SVGP(SparseGP):
#create the SVI inference method
inf_method = svgp_inf()
- SparseGP.__init__(self, X_batch, Y_batch, Z, kernel, likelihood, inference_method=inf_method,
+ SparseGP.__init__(self, X_batch, Y_batch, Z, kernel, likelihood, mean_function=mean_function, inference_method=inf_method,
name=name, Y_metadata=Y_metadata, normalizer=False)
- self.m = Param('q_u_mean', np.zeros((self.num_inducing, Y.shape[1])))
- chol = choleskies.triang_to_flat(np.tile(np.eye(self.num_inducing)[:,:,None], (1,1,Y.shape[1])))
+ #assume the number of latent functions is one per col of Y unless specified
+ if num_latent_functions is None:
+ num_latent_functions = Y.shape[1]
+
+ self.m = Param('q_u_mean', np.zeros((self.num_inducing, num_latent_functions)))
+ chol = choleskies.triang_to_flat(np.tile(np.eye(self.num_inducing)[:,:,None], (1,1,num_latent_functions)))
self.chol = Param('q_u_chol', chol)
self.link_parameter(self.chol)
self.link_parameter(self.m)
def parameters_changed(self):
- self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.q_u_mean, self.q_u_chol, self.kern, self.X, self.Z, self.likelihood, self.Y, self.Y_metadata, KL_scale=1.0, batch_scale=float(self.X_all.shape[0])/float(self.X.shape[0]))
+ self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.q_u_mean, self.q_u_chol, self.kern, self.X, self.Z, self.likelihood, self.Y, self.mean_function, self.Y_metadata, KL_scale=1.0, batch_scale=float(self.X_all.shape[0])/float(self.X.shape[0]))
#update the kernel gradients
self.kern.update_gradients_full(self.grad_dict['dL_dKmm'], self.Z)
grad = self.kern.gradient.copy()
self.kern.update_gradients_full(self.grad_dict['dL_dKmn'], self.Z, self.X)
- grad += self.kern.gradient
+ grad += self.kern.gradient.copy()
self.kern.update_gradients_diag(self.grad_dict['dL_dKdiag'], self.X)
self.kern.gradient += grad
if not self.Z.is_fixed:# only compute these expensive gradients if we need them
self.Z.gradient = self.kern.gradients_X(self.grad_dict['dL_dKmm'], self.Z) + self.kern.gradients_X(self.grad_dict['dL_dKmn'], self.Z, self.X)
+
self.likelihood.update_gradients(self.grad_dict['dL_dthetaL'])
#update the variational parameter gradients:
self.m.gradient = self.grad_dict['dL_dm']
self.chol.gradient = self.grad_dict['dL_dchol']
+ if self.mean_function is not None:
+ self.mean_function.update_gradients(self.grad_dict['dL_dmfX'], self.X)
+ g = self.mean_function.gradient[:].copy()
+ self.mean_function.update_gradients(self.grad_dict['dL_dmfZ'], self.Z)
+ self.mean_function.gradient[:] += g
+ self.Z.gradient[:] += self.mean_function.gradients_X(self.grad_dict['dL_dmfZ'], self.Z)
+
def set_data(self, X, Y):
"""
Set the data without calling parameters_changed to avoid wasted computation
diff --git a/GPy/core/symbolic.py b/GPy/core/symbolic.py
index ed3a9d59..4a9fcb76 100644
--- a/GPy/core/symbolic.py
+++ b/GPy/core/symbolic.py
@@ -223,7 +223,7 @@ class Symbolic_core():
def code_gradients_cacheable(self, function, variable):
if variable not in self.cacheable:
- raise RuntimeError, variable + ' must be a cacheable.'
+ raise RuntimeError(variable + ' must be a cacheable.')
lcode = 'gradients_' + variable + ' = np.zeros_like(' + variable + ')\n'
lcode += 'self.update_cache(' + ', '.join(self.cacheable) + ')\n'
for i, theta in enumerate(self.variables[variable]):
diff --git a/GPy/core/verbose_optimization.py b/GPy/core/verbose_optimization.py
index 1a87b3da..f882f228 100644
--- a/GPy/core/verbose_optimization.py
+++ b/GPy/core/verbose_optimization.py
@@ -1,7 +1,7 @@
# Copyright (c) 2012-2014, Max Zwiessele.
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-
+from __future__ import print_function
import numpy as np
import sys
import time
@@ -11,7 +11,7 @@ def exponents(fnow, current_grad):
return np.sign(exps) * np.log10(exps).astype(int)
class VerboseOptimization(object):
- def __init__(self, model, opt, maxiters, verbose=False, current_iteration=0, ipython_notebook=True):
+ def __init__(self, model, opt, maxiters, verbose=False, current_iteration=0, ipython_notebook=True, clear_after_finish=False):
self.verbose = verbose
if self.verbose:
self.model = model
@@ -22,55 +22,59 @@ class VerboseOptimization(object):
self.opt_name = opt.opt_name
self.model.add_observer(self, self.print_status)
self.status = 'running'
+ self.clear = clear_after_finish
self.update()
try:
from IPython.display import display
- from IPython.html.widgets import FloatProgressWidget, HTMLWidget, ContainerWidget
- self.text = HTMLWidget()
- self.progress = FloatProgressWidget()
- self.model_show = HTMLWidget()
+ from IPython.html.widgets import IntProgress, HTML, Box, VBox, HBox, FlexBox
+ self.text = HTML(width='100%')
+ self.progress = IntProgress(min=0, max=maxiters)
+ #self.progresstext = Text(width='100%', disabled=True, value='0/{}'.format(maxiters))
+ self.model_show = HTML()
self.ipython_notebook = ipython_notebook
except:
# Not in Ipython notebook
self.ipython_notebook = False
if self.ipython_notebook:
- self.text.set_css('width', '100%')
- #self.progress.set_css('width', '100%')
+ left_col = VBox(children=[self.progress, self.text], padding=2, width='40%')
+ right_col = Box(children=[self.model_show], padding=2, width='60%')
+ self.hor_align = FlexBox(children = [left_col, right_col], width='100%', orientation='horizontal')
- left_col = ContainerWidget(children = [self.progress, self.text])
- right_col = ContainerWidget(children = [self.model_show])
- hor_align = ContainerWidget(children = [left_col, right_col])
+ display(self.hor_align)
+
+ try:
+ self.text.set_css('width', '100%')
+ left_col.set_css({
+ 'padding': '2px',
+ 'width': "100%",
+ })
+
+ right_col.set_css({
+ 'padding': '2px',
+ })
+
+ self.hor_align.set_css({
+ 'width': "100%",
+ })
- display(hor_align)
+ self.hor_align.remove_class('vbox')
+ self.hor_align.add_class('hbox')
+
+ left_col.add_class("box-flex1")
+ right_col.add_class('box-flex0')
- left_col.set_css({
- 'padding': '2px',
- 'width': "100%",
- })
-
- right_col.set_css({
- 'padding': '2px',
- })
-
- hor_align.set_css({
- 'width': "100%",
- })
-
- hor_align.remove_class('vbox')
- hor_align.add_class('hbox')
-
- left_col.add_class("box-flex1")
- right_col.add_class('box-flex0')
+ except:
+ pass
#self.text.add_class('box-flex2')
#self.progress.add_class('box-flex1')
else:
self.exps = exponents(self.fnow, self.current_gradient)
- print 'Running {} Code:'.format(self.opt_name)
- print ' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters)
+ print('Running {} Code:'.format(self.opt_name))
+ print(' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters))
def __enter__(self):
self.start = time.time()
@@ -102,7 +106,8 @@ class VerboseOptimization(object):
html_body += "
{}
".format(val)
html_body += ""
self.text.value = html_begin + html_body + html_end
- self.progress.value = 100*(self.iteration+1)/self.maxiters
+ self.progress.value = (self.iteration+1)
+ #self.progresstext.value = '0/{}'.format((self.iteration+1))
self.model_show.value = self.model._repr_html_()
else:
n_exps = exponents(self.fnow, self.current_gradient)
@@ -111,11 +116,11 @@ class VerboseOptimization(object):
b = np.any(n_exps < self.exps)
if a or b:
self.p_iter = self.iteration
- print ''
+ print('')
if b:
self.exps = n_exps
- print '\r',
- print '{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
+ print('\r', end=' ')
+ print('{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
sys.stdout.flush()
def print_status(self, me, which=None):
@@ -136,6 +141,13 @@ class VerboseOptimization(object):
def finish(self, opt):
self.status = opt.status
+ if self.verbose and self.ipython_notebook:
+ if 'conv' in self.status.lower():
+ self.progress.bar_style = 'success'
+ elif self.iteration >= self.maxiters:
+ self.progress.bar_style = 'warning'
+ else:
+ self.progress.bar_style = 'danger'
def __exit__(self, type, value, traceback):
if self.verbose:
@@ -144,7 +156,9 @@ class VerboseOptimization(object):
self.print_out()
if not self.ipython_notebook:
- print ''
- print 'Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)
- print 'Optimization status: {0:.5g}'.format(self.status)
- print
+ print()
+ print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start))
+ print('Optimization status: {0}'.format(self.status))
+ print()
+ elif self.clear:
+ self.hor_align.close()
diff --git a/GPy/defaults.cfg b/GPy/defaults.cfg
index 306543ed..aa68a421 100644
--- a/GPy/defaults.cfg
+++ b/GPy/defaults.cfg
@@ -25,3 +25,6 @@ MKL = False
[weave]
#if true, try to use weave, and fall back to numpy. if false, just use numpy.
working = True
+
+[cython]
+working = True
diff --git a/GPy/examples/__init__.py b/GPy/examples/__init__.py
index 968333e0..4e9e984e 100644
--- a/GPy/examples/__init__.py
+++ b/GPy/examples/__init__.py
@@ -1,7 +1,7 @@
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-import classification
-import regression
-import dimensionality_reduction
-import non_gaussian
+from . import classification
+from . import regression
+from . import dimensionality_reduction
+from . import non_gaussian
diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py
index b3780073..d4518f24 100644
--- a/GPy/examples/classification.py
+++ b/GPy/examples/classification.py
@@ -15,7 +15,7 @@ def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
"""
try:import pods
- except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
data = pods.datasets.oil()
X = data['X']
Xtest = data['Xtest']
@@ -52,7 +52,7 @@ def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True):
"""
try:import pods
- except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
@@ -75,7 +75,7 @@ def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True):
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
- print m
+ print(m)
return m
def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=True):
@@ -88,7 +88,7 @@ def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=
"""
try:import pods
- except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
@@ -114,7 +114,7 @@ def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
- print m
+ print(m)
return m
def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, optimize=True, plot=True):
@@ -127,7 +127,7 @@ def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, opti
"""
try:import pods
- except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
@@ -147,7 +147,7 @@ def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, opti
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
- print m
+ print(m)
return m
def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
@@ -160,7 +160,7 @@ def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
"""
try:import pods
- except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
data = pods.datasets.toy_linear_1d_classification(seed=seed)
Y = data['Y'][:, 0:1]
Y[Y.flatten() == -1] = 0
@@ -177,7 +177,7 @@ def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
# Parameters optimization:
for _ in range(5):
m.optimize(max_iters=int(max_iters/5))
- print m
+ print(m)
# Plot
if plot:
@@ -186,7 +186,7 @@ def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
m.plot_f(ax=axes[0])
m.plot(ax=axes[1])
- print m
+ print(m)
return m
def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True):
@@ -202,7 +202,7 @@ def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=
:type kernel: a GPy kernel
"""
try:import pods
- except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
data = pods.datasets.crescent_data(seed=seed)
Y = data['Y']
Y[Y.flatten()==-1] = 0
@@ -224,5 +224,5 @@ def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=
if plot:
m.plot()
- print m
+ print(m)
return m
diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py
index c14d6db5..9ae16be5 100644
--- a/GPy/examples/dimensionality_reduction.py
+++ b/GPy/examples/dimensionality_reduction.py
@@ -335,7 +335,7 @@ def bgplvm_simulation(optimize=True, verbose=1,
m.likelihood.variance = .1
if optimize:
- print "Optimizing model:"
+ print("Optimizing model:")
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
gtol=.05)
if plot:
@@ -360,7 +360,7 @@ def ssgplvm_simulation(optimize=True, verbose=1,
m.likelihood.variance = .1
if optimize:
- print "Optimizing model:"
+ print("Optimizing model:")
m.optimize('scg', messages=verbose, max_iters=max_iters,
gtol=.05)
if plot:
@@ -390,7 +390,7 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1,
m.Yreal = Y
if optimize:
- print "Optimizing model:"
+ print("Optimizing model:")
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
gtol=.05)
if plot:
@@ -414,7 +414,7 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw):
m['.*noise'] = [Y.var() / 40. for Y in Ylist]
if optimize:
- print "Optimizing Model:"
+ print("Optimizing Model:")
m.optimize(messages=verbose, max_iters=8e3)
if plot:
m.X.plot("MRD Latent Space 1D")
@@ -442,7 +442,7 @@ def mrd_simulation_missing_data(optimize=True, verbose=True, plot=True, plot_sim
initx="random", initz='permute', **kw)
if optimize:
- print "Optimizing Model:"
+ print("Optimizing Model:")
m.optimize('bfgs', messages=verbose, max_iters=8e3, gtol=.1)
if plot:
m.X.plot("MRD Latent Space 1D")
@@ -607,7 +607,7 @@ def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
try:
if optimize: m.optimize('bfgs', messages=verbose, max_iters=5e3, bfgs_factor=10)
except KeyboardInterrupt:
- print "Keyboard interrupt, continuing to plot and return"
+ print("Keyboard interrupt, continuing to plot and return")
if plot:
fig, (latent_axes, sense_axes) = plt.subplots(1, 2)
@@ -658,7 +658,7 @@ def ssgplvm_simulation_linear():
def sample_X(Q, pi):
x = np.empty(Q)
dies = np.random.rand(Q)
- for q in xrange(Q):
+ for q in range(Q):
if dies[q] < pi:
x[q] = np.random.randn()
else:
@@ -668,7 +668,7 @@ def ssgplvm_simulation_linear():
Y = np.empty((N, D))
X = np.empty((N, Q))
# Generate data from random sampled weight matrices
- for n in xrange(N):
+ for n in range(N):
X[n] = sample_X(Q, pi)
w = np.random.randn(D, Q)
Y[n] = np.dot(w, X[n])
diff --git a/GPy/examples/non_gaussian.py b/GPy/examples/non_gaussian.py
index ddac8813..3652b4d3 100644
--- a/GPy/examples/non_gaussian.py
+++ b/GPy/examples/non_gaussian.py
@@ -37,7 +37,7 @@ def student_t_approx(optimize=True, plot=True):
#Add student t random noise to datapoints
deg_free = 1
- print "Real noise: ", real_std
+ print("Real noise: ", real_std)
initial_var_guess = 0.5
edited_real_sd = initial_var_guess
@@ -73,7 +73,7 @@ def student_t_approx(optimize=True, plot=True):
m4['.*t_scale2'].constrain_bounded(1e-6, 10.)
m4['.*white'].constrain_fixed(1e-5)
m4.randomize()
- print m4
+ print(m4)
debug=True
if debug:
m4.optimize(messages=1)
@@ -81,18 +81,18 @@ def student_t_approx(optimize=True, plot=True):
pb.plot(m4.X, m4.inference_method.f_hat)
pb.plot(m4.X, m4.Y, 'rx')
m4.plot()
- print m4
+ print(m4)
return m4
if optimize:
optimizer='scg'
- print "Clean Gaussian"
+ print("Clean Gaussian")
m1.optimize(optimizer, messages=1)
- print "Corrupt Gaussian"
+ print("Corrupt Gaussian")
m2.optimize(optimizer, messages=1)
- print "Clean student t"
+ print("Clean student t")
m3.optimize(optimizer, messages=1)
- print "Corrupt student t"
+ print("Corrupt student t")
m4.optimize(optimizer, messages=1)
if plot:
@@ -151,7 +151,7 @@ def boston_example(optimize=True, plot=True):
for n, (train, test) in enumerate(kf):
X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
- print "Fold {}".format(n)
+ print("Fold {}".format(n))
noise = 1e-1 #np.exp(-2)
rbf_len = 0.5
@@ -163,21 +163,21 @@ def boston_example(optimize=True, plot=True):
score_folds[0, n] = rmse(Y_test, np.mean(Y_train))
#Gaussian GP
- print "Gauss GP"
+ print("Gauss GP")
mgp = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelgp.copy())
mgp.constrain_fixed('.*white', 1e-5)
mgp['.*len'] = rbf_len
mgp['.*noise'] = noise
- print mgp
+ print(mgp)
if optimize:
mgp.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mgp.predict(X_test)
score_folds[1, n] = rmse(Y_test, Y_test_pred[0])
pred_density[1, n] = np.mean(mgp.log_predictive_density(X_test, Y_test))
- print mgp
- print pred_density
+ print(mgp)
+ print(pred_density)
- print "Gaussian Laplace GP"
+ print("Gaussian Laplace GP")
N, D = Y_train.shape
g_distribution = GPy.likelihoods.noise_model_constructors.gaussian(variance=noise, N=N, D=D)
g_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), g_distribution)
@@ -186,18 +186,18 @@ def boston_example(optimize=True, plot=True):
mg.constrain_fixed('.*white', 1e-5)
mg['rbf_len'] = rbf_len
mg['noise'] = noise
- print mg
+ print(mg)
if optimize:
mg.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mg.predict(X_test)
score_folds[2, n] = rmse(Y_test, Y_test_pred[0])
pred_density[2, n] = np.mean(mg.log_predictive_density(X_test, Y_test))
- print pred_density
- print mg
+ print(pred_density)
+ print(mg)
for stu_num, df in enumerate(degrees_freedoms):
#Student T
- print "Student-T GP {}df".format(df)
+ print("Student-T GP {}df".format(df))
t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=df, sigma2=noise)
stu_t_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), t_distribution)
mstu_t = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=stu_t_likelihood)
@@ -205,14 +205,14 @@ def boston_example(optimize=True, plot=True):
mstu_t.constrain_bounded('.*t_scale2', 0.0001, 1000)
mstu_t['rbf_len'] = rbf_len
mstu_t['.*t_scale2'] = noise
- print mstu_t
+ print(mstu_t)
if optimize:
mstu_t.optimize(optimizer=optimizer, messages=messages)
Y_test_pred = mstu_t.predict(X_test)
score_folds[3+stu_num, n] = rmse(Y_test, Y_test_pred[0])
pred_density[3+stu_num, n] = np.mean(mstu_t.log_predictive_density(X_test, Y_test))
- print pred_density
- print mstu_t
+ print(pred_density)
+ print(mstu_t)
if plot:
plt.figure()
@@ -230,8 +230,8 @@ def boston_example(optimize=True, plot=True):
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
plt.title('Stu t {}df'.format(df))
- print "Average scores: {}".format(np.mean(score_folds, 1))
- print "Average pred density: {}".format(np.mean(pred_density, 1))
+ print("Average scores: {}".format(np.mean(score_folds, 1)))
+ print("Average pred density: {}".format(np.mean(pred_density, 1)))
if plot:
#Plotting
diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py
index 37a18f63..267c6d1e 100644
--- a/GPy/examples/regression.py
+++ b/GPy/examples/regression.py
@@ -15,7 +15,7 @@ def olympic_marathon_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Olympic marathon data."""
try:import pods
except ImportError:
- print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_marathon_men()
@@ -88,7 +88,7 @@ def epomeo_gpx(max_iters=200, optimize=True, plot=True):
"""
try:import pods
except ImportError:
- print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.epomeo_gpx()
num_data_list = []
@@ -135,7 +135,7 @@ def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=1000
try:import pods
except ImportError:
- print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number)
# data['Y'] = data['Y'][0::2, :]
@@ -219,7 +219,7 @@ def olympic_100m_men(optimize=True, plot=True):
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
try:import pods
except ImportError:
- print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.olympic_100m_men()
@@ -240,7 +240,7 @@ def toy_rbf_1d(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
- print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d()
@@ -258,7 +258,7 @@ def toy_rbf_1d_50(optimize=True, plot=True):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
try:import pods
except ImportError:
- print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.toy_rbf_1d_50()
@@ -377,7 +377,7 @@ def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
"""Predict the location of a robot given wirelss signal strength readings."""
try:import pods
except ImportError:
- print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.robot_wireless()
@@ -398,14 +398,14 @@ def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
sse = ((data['Xtest'] - Xpredict)**2).sum()
- print('Sum of squares error on test data: ' + str(sse))
+ print(('Sum of squares error on test data: ' + str(sse)))
return m
def silhouette(max_iters=100, optimize=True, plot=True):
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
try:import pods
except ImportError:
- print 'pods unavailable, see https://github.com/sods/ods for example datasets'
+ print('pods unavailable, see https://github.com/sods/ods for example datasets')
return
data = pods.datasets.silhouette()
@@ -416,7 +416,7 @@ def silhouette(max_iters=100, optimize=True, plot=True):
if optimize:
m.optimize(messages=True, max_iters=max_iters)
- print m
+ print(m)
return m
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True, checkgrad=False):
@@ -468,7 +468,7 @@ def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, opt
if plot:
m.plot()
- print m
+ print(m)
return m
def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
@@ -492,7 +492,7 @@ def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
if plot:
m.plot(ax=axes[0])
axes[0].set_title('no input uncertainty')
- print m
+ print(m)
# the same Model with uncertainty
m = GPy.models.SparseGPRegression(X, Y, kernel=GPy.kern.RBF(1), Z=Z, X_variance=S)
@@ -503,5 +503,50 @@ def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
axes[1].set_title('with input uncertainty')
fig.canvas.draw()
- print m
+ print(m)
return m
+
+def simple_mean_function(max_iters=100, optimize=True, plot=True):
+ """
+ The simplest possible mean function. No parameters, just a simple Sinusoid.
+ """
+ #create simple mean function
+ mf = GPy.core.Mapping(1,1)
+ mf.f = np.sin
+ mf.update_gradients = lambda a,b: None
+
+ X = np.linspace(0,10,50).reshape(-1,1)
+ Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape)
+
+ k =GPy.kern.RBF(1)
+ lik = GPy.likelihoods.Gaussian()
+ m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
+ if optimize:
+ m.optimize(max_iters=max_iters)
+ if plot:
+ m.plot(plot_limits=(-10,15))
+ return m
+
+def parametric_mean_function(max_iters=100, optimize=True, plot=True):
+ """
+ A linear mean function with parameters that we'll learn alongside the kernel
+ """
+ #create simple mean function
+ mf = GPy.core.Mapping(1,1)
+ mf.f = np.sin
+
+ X = np.linspace(0,10,50).reshape(-1,1)
+ Y = np.sin(X) + 0.5*np.cos(3*X) + 0.1*np.random.randn(*X.shape) + 3*X
+
+ mf = GPy.mappings.Linear(1,1)
+
+ k =GPy.kern.RBF(1)
+ lik = GPy.likelihoods.Gaussian()
+ m = GPy.core.GP(X, Y, kernel=k, likelihood=lik, mean_function=mf)
+ if optimize:
+ m.optimize(max_iters=max_iters)
+ if plot:
+ m.plot()
+ return m
+
+
diff --git a/GPy/inference/__init__.py b/GPy/inference/__init__.py
index 7b1307e3..c5044582 100644
--- a/GPy/inference/__init__.py
+++ b/GPy/inference/__init__.py
@@ -1,3 +1,3 @@
-import latent_function_inference
-import optimization
-import mcmc
+from . import latent_function_inference
+from . import optimization
+from . import mcmc
diff --git a/GPy/inference/latent_function_inference/__init__.py b/GPy/inference/latent_function_inference/__init__.py
index 67f57638..6754000d 100644
--- a/GPy/inference/latent_function_inference/__init__.py
+++ b/GPy/inference/latent_function_inference/__init__.py
@@ -50,26 +50,26 @@ class InferenceMethodList(LatentFunctionInference, list):
def on_optimization_end(self):
for inf in self:
inf.on_optimization_end()
-
+
def __getstate__(self):
state = []
for inf in self:
state.append(inf)
return state
-
+
def __setstate__(self, state):
for inf in state:
self.append(inf)
-from exact_gaussian_inference import ExactGaussianInference
-from laplace import Laplace
+from .exact_gaussian_inference import ExactGaussianInference
+from .laplace import Laplace,LaplaceBlock
from GPy.inference.latent_function_inference.var_dtc import VarDTC
-from expectation_propagation import EP
-from expectation_propagation_dtc import EPDTC
-from dtc import DTC
-from fitc import FITC
-from var_dtc_parallel import VarDTC_minibatch
-from svgp import SVGP
+from .expectation_propagation import EP
+from .expectation_propagation_dtc import EPDTC
+from .dtc import DTC
+from .fitc import FITC
+from .var_dtc_parallel import VarDTC_minibatch
+from .svgp import SVGP
# class FullLatentFunctionData(object):
#
@@ -78,9 +78,9 @@ from svgp import SVGP
# class EMLikeLatentFunctionInference(LatentFunctionInference):
# def update_approximation(self):
# """
-# This function gets called when the
+# This function gets called when the
# """
-#
+#
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """
# Do inference on the latent functions given a covariance function `kern`,
@@ -88,7 +88,7 @@ from svgp import SVGP
# Additional metadata for the outputs `Y` can be given in `Y_metadata`.
# """
# raise NotImplementedError, "Abstract base class for full inference"
-#
+#
# class VariationalLatentFunctionInference(LatentFunctionInference):
# def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
# """
diff --git a/GPy/inference/latent_function_inference/dtc.py b/GPy/inference/latent_function_inference/dtc.py
index 5590a079..0aa990c1 100644
--- a/GPy/inference/latent_function_inference/dtc.py
+++ b/GPy/inference/latent_function_inference/dtc.py
@@ -1,7 +1,7 @@
# Copyright (c) 2012-2014, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from posterior import Posterior
+from .posterior import Posterior
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
import numpy as np
from . import LatentFunctionInference
@@ -20,7 +20,8 @@ class DTC(LatentFunctionInference):
def __init__(self):
self.const_jitter = 1e-6
- def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
+ def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None):
+ assert mean_function is None, "inference with a mean function not implemented"
assert X_variance is None, "cannot use X_variance with DTC. Try varDTC."
num_inducing, _ = Z.shape
@@ -29,7 +30,7 @@ class DTC(LatentFunctionInference):
#make sure the noise is not hetero
beta = 1./likelihood.gaussian_variance(Y_metadata)
if beta.size > 1:
- raise NotImplementedError, "no hetero noise with this implementation of DTC"
+ raise NotImplementedError("no hetero noise with this implementation of DTC")
Kmm = kern.K(Z)
Knn = kern.Kdiag(X)
@@ -88,7 +89,8 @@ class vDTC(object):
def __init__(self):
self.const_jitter = 1e-6
- def inference(self, kern, X, X_variance, Z, likelihood, Y, Y_metadata):
+ def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None):
+ assert mean_function is None, "inference with a mean function not implemented"
assert X_variance is None, "cannot use X_variance with DTC. Try varDTC."
num_inducing, _ = Z.shape
@@ -97,7 +99,7 @@ class vDTC(object):
#make sure the noise is not hetero
beta = 1./likelihood.gaussian_variance(Y_metadata)
if beta.size > 1:
- raise NotImplementedError, "no hetero noise with this implementation of DTC"
+ raise NotImplementedError("no hetero noise with this implementation of DTC")
Kmm = kern.K(Z)
Knn = kern.Kdiag(X)
diff --git a/GPy/inference/latent_function_inference/exact_gaussian_inference.py b/GPy/inference/latent_function_inference/exact_gaussian_inference.py
index 1312d36a..343387a7 100644
--- a/GPy/inference/latent_function_inference/exact_gaussian_inference.py
+++ b/GPy/inference/latent_function_inference/exact_gaussian_inference.py
@@ -1,7 +1,7 @@
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from posterior import Posterior
+from .posterior import Posterior
from ...util.linalg import pdinv, dpotrs, tdot
from ...util import diag
import numpy as np
@@ -36,16 +36,23 @@ class ExactGaussianInference(LatentFunctionInference):
#print "WARNING: N>D of Y, we need caching of L, such that L*L^T = Y, returning Y still!"
return Y
- def inference(self, kern, X, likelihood, Y, Y_metadata=None):
+ def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None):
"""
Returns a Posterior class containing essential quantities of the posterior
"""
- YYT_factor = self.get_YYTfactor(Y)
+
+ if mean_function is None:
+ m = 0
+ else:
+ m = mean_function.f(X)
+
+
+ YYT_factor = self.get_YYTfactor(Y-m)
K = kern.K(X)
Ky = K.copy()
- diag.add(Ky, likelihood.gaussian_variance(Y_metadata))
+ diag.add(Ky, likelihood.gaussian_variance(Y_metadata)+1e-8)
Wi, LW, LWi, W_logdet = pdinv(Ky)
alpha, _ = dpotrs(LW, YYT_factor, lower=1)
@@ -56,4 +63,18 @@ class ExactGaussianInference(LatentFunctionInference):
dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK),Y_metadata)
- return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL}
+ return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha}
+
+ def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None):
+ """
+ Leave one out error as found in
+ "Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models"
+ Vehtari et al. 2014.
+ """
+ g = posterior.woodbury_vector
+ c = posterior.woodbury_inv
+ c_diag = np.diag(c)[:, None]
+ neg_log_marginal_LOO = 0.5*np.log(2*np.pi) - 0.5*np.log(c_diag) + 0.5*(g**2)/c_diag
+ #believe from Predictive Approaches for Choosing Hyperparameters in Gaussian Processes
+ #this is the negative marginal LOO
+ return -neg_log_marginal_LOO
diff --git a/GPy/inference/latent_function_inference/expectation_propagation.py b/GPy/inference/latent_function_inference/expectation_propagation.py
index 26144974..85841a33 100644
--- a/GPy/inference/latent_function_inference/expectation_propagation.py
+++ b/GPy/inference/latent_function_inference/expectation_propagation.py
@@ -2,7 +2,7 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ...util.linalg import pdinv,jitchol,DSYR,tdot,dtrtrs, dpotrs
-from posterior import Posterior
+from .posterior import Posterior
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
@@ -33,15 +33,19 @@ class EP(LatentFunctionInference):
# TODO: update approximation in the end as well? Maybe even with a switch?
pass
- def inference(self, kern, X, likelihood, Y, Y_metadata=None, Z=None):
+ def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None, Z=None):
+ assert mean_function is None, "inference with a mean function not implemented"
num_data, output_dim = Y.shape
assert output_dim ==1, "ep in 1D only (for now!)"
K = kern.K(X)
if self._ep_approximation is None:
+
+ #if we don't yet have the results of runnign EP, run EP and store the computed factors in self._ep_approximation
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation = self.expectation_propagation(K, Y, likelihood, Y_metadata)
else:
+ #if we've already run EP, just use the existing approximation stored in self._ep_approximation
mu, Sigma, mu_tilde, tau_tilde, Z_hat = self._ep_approximation
Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde))
diff --git a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py
index 35b1b7dc..e182c9f7 100644
--- a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py
+++ b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py
@@ -6,7 +6,7 @@ from ...util import diag
from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify, DSYR
from ...core.parameterization.variational import VariationalPosterior
from . import LatentFunctionInference
-from posterior import Posterior
+from .posterior import Posterior
log_2_pi = np.log(2*np.pi)
class EPDTC(LatentFunctionInference):
@@ -64,7 +64,8 @@ class EPDTC(LatentFunctionInference):
self.old_mutilde, self.old_vtilde = None, None
self._ep_approximation = None
- def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
+ def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None):
+ assert mean_function is None, "inference with a mean function not implemented"
num_data, output_dim = Y.shape
assert output_dim ==1, "ep in 1D only (for now!)"
@@ -179,7 +180,7 @@ class EPDTC(LatentFunctionInference):
if VVT_factor.shape[1] == Y.shape[1]:
woodbury_vector = Cpsi1Vf # == Cpsi1V
else:
- print 'foobar'
+ print('foobar')
psi1V = np.dot(mu_tilde[:,None].T*beta, psi1).T
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
tmp, _ = dpotrs(LB, tmp, lower=1)
@@ -314,7 +315,7 @@ def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf,
dL_dR = None
elif het_noise:
if uncertain_inputs:
- raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented"
+ raise NotImplementedError("heteroscedatic derivates with uncertain inputs not implemented")
else:
#from ...util.linalg import chol_inv
#LBi = chol_inv(LB)
diff --git a/GPy/inference/latent_function_inference/fitc.py b/GPy/inference/latent_function_inference/fitc.py
index a184c6c4..f38eb52b 100644
--- a/GPy/inference/latent_function_inference/fitc.py
+++ b/GPy/inference/latent_function_inference/fitc.py
@@ -1,7 +1,7 @@
# Copyright (c) 2012, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from posterior import Posterior
+from .posterior import Posterior
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
from ...util import diag
import numpy as np
@@ -18,7 +18,8 @@ class FITC(LatentFunctionInference):
"""
const_jitter = 1e-6
- def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None):
+ def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None):
+ assert mean_function is None, "inference with a mean function not implemented"
num_inducing, _ = Z.shape
num_data, output_dim = Y.shape
@@ -26,7 +27,7 @@ class FITC(LatentFunctionInference):
#make sure the noise is not hetero
sigma_n = likelihood.gaussian_variance(Y_metadata)
if sigma_n.size >1:
- raise NotImplementedError, "no hetero noise with this implementation of FITC"
+ raise NotImplementedError("no hetero noise with this implementation of FITC")
Kmm = kern.K(Z)
Knn = kern.Kdiag(X)
diff --git a/GPy/inference/latent_function_inference/laplace.py b/GPy/inference/latent_function_inference/laplace.py
index 05711b0b..aefc82ac 100644
--- a/GPy/inference/latent_function_inference/laplace.py
+++ b/GPy/inference/latent_function_inference/laplace.py
@@ -12,13 +12,14 @@
import numpy as np
from ...util.linalg import mdot, jitchol, dpotrs, dtrtrs, dpotri, symmetrify, pdinv
-from posterior import Posterior
+from .posterior import Posterior
import warnings
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return ' %s:%s: %s:%s\n' % (filename, lineno, category.__name__, message)
warnings.formatwarning = warning_on_one_line
from scipy import optimize
from . import LatentFunctionInference
+from scipy.integrate import quad
class Laplace(LatentFunctionInference):
@@ -39,10 +40,90 @@ class Laplace(LatentFunctionInference):
self.first_run = True
self._previous_Ki_fhat = None
- def inference(self, kern, X, likelihood, Y, Y_metadata=None):
+ def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None, f_hat=None, W=None, Ki_W_i=None):
+ """
+ Leave one out log predictive density as found in
+ "Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models"
+ Vehtari et al. 2014.
+ """
+ Ki_f_init = np.zeros_like(Y)
+
+ if K is None:
+ K = kern.K(X)
+
+ if f_hat is None:
+ f_hat, _ = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata)
+
+ if W is None:
+ W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata)
+
+ if Ki_W_i is None:
+ _, _, _, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave)
+
+ logpdf_dfhat = likelihood.dlogpdf_df(f_hat, Y, Y_metadata=Y_metadata)
+
+ if W.shape[1] == 1:
+ W = np.diagflat(W)
+
+ #Eq 14, and 16
+ var_site = 1./np.diag(W)[:, None]
+ mu_site = f_hat + var_site*logpdf_dfhat
+ prec_site = 1./var_site
+ #Eq 19
+ marginal_cov = Ki_W_i
+ marginal_mu = marginal_cov.dot(np.diagflat(prec_site)).dot(mu_site)
+ marginal_var = np.diag(marginal_cov)[:, None]
+ #Eq 30 with using site parameters instead of Gaussian site parameters
+ #(var_site instead of sigma^{2} )
+ posterior_cav_var = 1./(1./marginal_var - 1./var_site)
+ posterior_cav_mean = posterior_cav_var*((1./marginal_var)*marginal_mu - (1./var_site)*Y)
+
+ flat_y = Y.flatten()
+ flat_mu = posterior_cav_mean.flatten()
+ flat_var = posterior_cav_var.flatten()
+
+ if Y_metadata is not None:
+ #Need to zip individual elements of Y_metadata aswell
+ Y_metadata_flat = {}
+ if Y_metadata is not None:
+ for key, val in Y_metadata.items():
+ Y_metadata_flat[key] = np.atleast_1d(val).reshape(-1, 1)
+
+ zipped_values = []
+
+ for i in range(Y.shape[0]):
+ y_m = {}
+ for key, val in Y_metadata_flat.items():
+ if np.isscalar(val) or val.shape[0] == 1:
+ y_m[key] = val
+ else:
+ #Won't broadcast yet
+ y_m[key] = val[i]
+ zipped_values.append((flat_y[i], flat_mu[i], flat_var[i], y_m))
+ else:
+ #Otherwise just pass along None's
+ zipped_values = zip(flat_y, flat_mu, flat_var, [None]*Y.shape[0])
+
+ def integral_generator(yi, mi, vi, yi_m):
+ def f(fi_star):
+ #More stable in the log space
+ p_fi = np.exp(likelihood.logpdf(fi_star, yi, yi_m)
+ - 0.5*np.log(2*np.pi*vi)
+ - 0.5*np.square(mi-fi_star)/vi)
+ return p_fi
+ return f
+
+ #Eq 30
+ p_ystar, _ = zip(*[quad(integral_generator(y, m, v, yi_m), -np.inf, np.inf)
+ for y, m, v, yi_m in zipped_values])
+ p_ystar = np.array(p_ystar).reshape(-1, 1)
+ return np.log(p_ystar)
+
+ def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None):
"""
Returns a Posterior class containing essential quantities of the posterior
"""
+ assert mean_function is None, "inference with a mean function not implemented"
# Compute K
K = kern.K(X)
@@ -50,21 +131,25 @@ class Laplace(LatentFunctionInference):
#Find mode
if self.bad_fhat or self.first_run:
Ki_f_init = np.zeros_like(Y)
- first_run = False
+ self.first_run = False
else:
Ki_f_init = self._previous_Ki_fhat
+ Ki_f_init = np.zeros_like(Y)# FIXME: take this out
+
f_hat, Ki_fhat = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata)
+
self.f_hat = f_hat
- self.Ki_fhat = Ki_fhat
- self.K = K.copy()
+ #self.Ki_fhat = Ki_fhat
+ #self.K = K.copy()
+
#Compute hessian and other variables at mode
log_marginal, woodbury_inv, dL_dK, dL_dthetaL = self.mode_computations(f_hat, Ki_fhat, K, Y, likelihood, kern, Y_metadata)
self._previous_Ki_fhat = Ki_fhat.copy()
return Posterior(woodbury_vector=Ki_fhat, woodbury_inv=woodbury_inv, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL}
- def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None):
+ def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None, *args, **kwargs):
"""
Rasmussen's numerically stable mode finding
For nomenclature see Rasmussen & Williams 2006
@@ -89,7 +174,12 @@ class Laplace(LatentFunctionInference):
#define the objective function (to be maximised)
def obj(Ki_f, f):
- return -0.5*np.dot(Ki_f.flatten(), f.flatten()) + np.sum(likelihood.logpdf(f, Y, Y_metadata=Y_metadata))
+ ll = -0.5*np.sum(np.dot(Ki_f.T, f)) + np.sum(likelihood.logpdf(f, Y, Y_metadata=Y_metadata))
+ if np.isnan(ll):
+ return -np.inf
+ else:
+ return ll
+
difference = np.inf
iteration = 0
@@ -104,7 +194,7 @@ class Laplace(LatentFunctionInference):
W_f = W*f
b = W_f + grad # R+W p46 line 6.
- W12BiW12, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave)
+ W12BiW12, _, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave, *args, **kwargs)
W12BiW12Kb = np.dot(W12BiW12, np.dot(K, b))
#Work out the DIRECTION that we want to move in, but don't choose the stepsize yet
@@ -121,7 +211,9 @@ class Laplace(LatentFunctionInference):
step = optimize.brent(inner_obj, tol=1e-4, maxiter=12)
Ki_f_new = Ki_f + step*dKi_f
f_new = np.dot(K, Ki_f_new)
-
+ #print "new {} vs old {}".format(obj(Ki_f_new, f_new), obj(Ki_f, f))
+ if obj(Ki_f_new, f_new) < obj(Ki_f, f):
+ raise ValueError("Shouldn't happen, brent optimization failing")
difference = np.abs(np.sum(f_new - f)) + np.abs(np.sum(Ki_f_new - Ki_f))
Ki_f = Ki_f_new
f = f_new
@@ -152,14 +244,10 @@ class Laplace(LatentFunctionInference):
if np.any(np.isnan(W)):
raise ValueError('One or more element(s) of W is NaN')
- K_Wi_i, L, LiW12 = self._compute_B_statistics(K, W, likelihood.log_concave)
-
- #compute vital matrices
- C = np.dot(LiW12, K)
- Ki_W_i = K - C.T.dot(C)
+ K_Wi_i, logdet_I_KW, I_KW_i, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave)
#compute the log marginal
- log_marginal = -0.5*np.dot(Ki_f.flatten(), f_hat.flatten()) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - np.sum(np.log(np.diag(L)))
+ log_marginal = -0.5*np.sum(np.dot(Ki_f.T, f_hat)) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - 0.5*logdet_I_KW
# Compute matrices for derivatives
dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat
@@ -196,23 +284,23 @@ class Laplace(LatentFunctionInference):
dL_dthetaL = np.zeros(num_params)
for thetaL_i in range(num_params):
#Explicit
- dL_dthetaL_exp = ( np.sum(dlik_dthetaL[thetaL_i])
+ dL_dthetaL_exp = ( np.sum(dlik_dthetaL[thetaL_i,:, :])
# The + comes from the fact that dlik_hess_dthetaL == -dW_dthetaL
- + 0.5*np.sum(np.diag(Ki_W_i).flatten()*dlik_hess_dthetaL[:, thetaL_i].flatten())
+ + 0.5*np.sum(np.diag(Ki_W_i)*np.squeeze(dlik_hess_dthetaL[thetaL_i, :, :]))
)
#Implicit
- dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[:, thetaL_i])
- #dfhat_dthetaL = mdot(Ki_W_i, dlik_grad_dthetaL[:, thetaL_i])
+ dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[thetaL_i, :, :])
+ #dfhat_dthetaL = mdot(Ki_W_i, dlik_grad_dthetaL[thetaL_i, :, :])
dL_dthetaL_imp = np.dot(dL_dfhat.T, dfhat_dthetaL)
- dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp
+ dL_dthetaL[thetaL_i] = np.sum(dL_dthetaL_exp + dL_dthetaL_imp)
else:
dL_dthetaL = np.zeros(likelihood.size)
return log_marginal, K_Wi_i, dL_dK, dL_dthetaL
- def _compute_B_statistics(self, K, W, log_concave):
+ def _compute_B_statistics(self, K, W, log_concave, *args, **kwargs):
"""
Rasmussen suggests the use of a numerically stable positive definite matrix B
Which has a positive diagonal elements and can be easily inverted
@@ -225,7 +313,7 @@ class Laplace(LatentFunctionInference):
"""
if not log_concave:
#print "Under 1e-10: {}".format(np.sum(W < 1e-6))
- W[W<1e-6] = 1e-6
+ W = np.clip(W, 1e-6, 1e+30)
# NOTE: when setting a parameter inside parameters_changed it will allways come to closed update circles!!!
#W.__setitem__(W < 1e-6, 1e-6, update=False) # FIXME-HACK: This is a hack since GPy can't handle negative variances which can occur
# If the likelihood is non-log-concave. We wan't to say that there is a negative variance
@@ -247,5 +335,160 @@ class Laplace(LatentFunctionInference):
#K_Wi_i_2 , _= dpotri(L2)
#symmetrify(K_Wi_i_2)
- return K_Wi_i, L, LiW12
+ #compute vital matrices
+ C = np.dot(LiW12, K)
+ Ki_W_i = K - C.T.dot(C)
+ I_KW_i = np.eye(K.shape[0]) - np.dot(K, K_Wi_i)
+ logdet_I_KW = 2*np.sum(np.log(np.diag(L)))
+
+ return K_Wi_i, logdet_I_KW, I_KW_i, Ki_W_i
+
+class LaplaceBlock(Laplace):
+ def rasm_mode(self, K, Y, likelihood, Ki_f_init, Y_metadata=None, *args, **kwargs):
+ Ki_f = Ki_f_init.copy()
+ f = np.dot(K, Ki_f)
+
+ #define the objective function (to be maximised)
+ def obj(Ki_f, f):
+ ll = -0.5*np.dot(Ki_f.T, f) + np.sum(likelihood.logpdf_sum(f, Y, Y_metadata=Y_metadata))
+ if np.isnan(ll):
+ return -np.inf
+ else:
+ return ll
+
+ difference = np.inf
+ iteration = 0
+
+ I = np.eye(K.shape[0])
+ while difference > self._mode_finding_tolerance and iteration < self._mode_finding_max_iter:
+ W = -likelihood.d2logpdf_df2(f, Y, Y_metadata=Y_metadata)
+
+ W[np.diag_indices_from(W)] = np.clip(np.diag(W), 1e-6, 1e+30)
+
+ W_f = np.dot(W, f)
+ grad = likelihood.dlogpdf_df(f, Y, Y_metadata=Y_metadata)
+
+ b = W_f + grad # R+W p46 line 6.
+ K_Wi_i, _, _, _ = self._compute_B_statistics(K, W, likelihood.log_concave, *args, **kwargs)
+
+ #Work out the DIRECTION that we want to move in, but don't choose the stepsize yet
+ #a = (I - (K+Wi)i*K)*b
+ full_step_Ki_f = np.dot(I - np.dot(K_Wi_i, K), b)
+ dKi_f = full_step_Ki_f - Ki_f
+
+ #define an objective for the line search (minimize this one)
+ def inner_obj(step_size):
+ Ki_f_trial = Ki_f + step_size*dKi_f
+ f_trial = np.dot(K, Ki_f_trial)
+ return -obj(Ki_f_trial, f_trial)
+
+ #use scipy for the line search, the compute new values of f, Ki_f
+ step = optimize.brent(inner_obj, tol=1e-4, maxiter=12)
+
+ Ki_f_new = Ki_f + step*dKi_f
+ f_new = np.dot(K, Ki_f_new)
+
+ difference = np.abs(np.sum(f_new - f)) + np.abs(np.sum(Ki_f_new - Ki_f))
+ Ki_f = Ki_f_new
+ f = f_new
+ iteration += 1
+
+ #Warn of bad fits
+ if difference > self._mode_finding_tolerance:
+ if not self.bad_fhat:
+ warnings.warn("Not perfect f_hat fit difference: {}".format(difference))
+ self._previous_Ki_fhat = np.zeros_like(Y)
+ self.bad_fhat = True
+ elif self.bad_fhat:
+ self.bad_fhat = False
+ warnings.warn("f_hat now fine again")
+ if iteration > self._mode_finding_max_iter:
+ warnings.warn("didn't find the best")
+
+ return f, Ki_f
+
+ def mode_computations(self, f_hat, Ki_f, K, Y, likelihood, kern, Y_metadata):
+ #At this point get the hessian matrix (or vector as W is diagonal)
+ W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata)
+
+ W[np.diag_indices_from(W)] = np.clip(np.diag(W), 1e-6, 1e+30)
+
+ K_Wi_i, log_B_det, I_KW_i, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave)
+
+ #compute the log marginal
+ #FIXME: The derterminant should be output_dim*0.5 I think, gradients may now no longer check
+ log_marginal = -0.5*np.dot(f_hat.T, Ki_f) + np.sum(likelihood.logpdf_sum(f_hat, Y, Y_metadata=Y_metadata)) - 0.5*log_B_det
+
+ #Compute vival matrices for derivatives
+ dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat
+
+ #dL_dfhat = np.zeros((f_hat.shape[0]))
+ #for i in range(f_hat.shape[0]):
+ #dL_dfhat[i] = -0.5*np.trace(np.dot(Ki_W_i, dW_df[:,:,i]))
+
+ dL_dfhat = -0.5*np.einsum('ij,ijk->k', Ki_W_i, dW_df)
+
+ woodbury_vector = likelihood.dlogpdf_df(f_hat, Y, Y_metadata=Y_metadata)
+
+ ####################
+ #compute dL_dK#
+ ####################
+ if kern.size > 0 and not kern.is_fixed:
+ #Explicit
+ explicit_part = 0.5*(np.dot(Ki_f, Ki_f.T) - K_Wi_i)
+
+ #Implicit
+ implicit_part = woodbury_vector.dot(dL_dfhat[None,:]).dot(I_KW_i)
+ #implicit_part = Ki_f.dot(dL_dfhat[None,:]).dot(I_KW_i)
+
+ dL_dK = explicit_part + implicit_part
+ else:
+ dL_dK = np.zeros_like(K)
+
+ ####################
+ #compute dL_dthetaL#
+ ####################
+ if likelihood.size > 0 and not likelihood.is_fixed:
+ raise NotImplementedError
+ else:
+ dL_dthetaL = np.zeros(likelihood.size)
+
+ #self.K_Wi_i = K_Wi_i
+ #self.Ki_W_i = Ki_W_i
+ #self.W = W
+ #self.K = K
+ #self.dL_dfhat = dL_dfhat
+ #self.explicit_part = explicit_part
+ #self.implicit_part = implicit_part
+ return log_marginal, K_Wi_i, dL_dK, dL_dthetaL
+
+ def _compute_B_statistics(self, K, W, log_concave, *args, **kwargs):
+ """
+ Rasmussen suggests the use of a numerically stable positive definite matrix B
+ Which has a positive diagonal element and can be easyily inverted
+
+ :param K: Prior Covariance matrix evaluated at locations X
+ :type K: NxN matrix
+ :param W: Negative hessian at a point (diagonal matrix)
+ :type W: Vector of diagonal values of hessian (1xN)
+ :returns: (K_Wi_i, L_B, not_provided)
+ """
+ #w = GPy.util.diag.view(W)
+ #W[:] = np.where(w<1e-6, 1e-6, w)
+
+ #B = I + KW
+ B = np.eye(K.shape[0]) + np.dot(K, W)
+ #Bi, L, Li, logdetB = pdinv(B)
+ Bi = np.linalg.inv(B)
+
+ #K_Wi_i = np.eye(K.shape[0]) - mdot(W, Bi, K)
+ K_Wi_i = np.dot(W, Bi)
+
+ #self.K_Wi_i_brute = np.linalg.inv(K + np.linalg.inv(W))
+ #self.B = B
+ #self.Bi = Bi
+ Ki_W_i = np.dot(Bi, K)
+
+ sign, logdetB = np.linalg.slogdet(B)
+ return K_Wi_i, sign*logdetB, Bi, Ki_W_i
diff --git a/GPy/inference/latent_function_inference/posterior.py b/GPy/inference/latent_function_inference/posterior.py
index 34f0b3bb..fbd72f57 100644
--- a/GPy/inference/latent_function_inference/posterior.py
+++ b/GPy/inference/latent_function_inference/posterior.py
@@ -15,7 +15,7 @@ class Posterior(object):
the function at any new point x_* by integrating over this posterior.
"""
- def __init__(self, woodbury_chol=None, woodbury_vector=None, K=None, mean=None, cov=None, K_chol=None, woodbury_inv=None):
+ def __init__(self, woodbury_chol=None, woodbury_vector=None, K=None, mean=None, cov=None, K_chol=None, woodbury_inv=None, prior_mean=0):
"""
woodbury_chol : a lower triangular matrix L that satisfies posterior_covariance = K - K L^{-T} L^{-1} K
woodbury_vector : a matrix (or vector, as Nx1 matrix) M which satisfies posterior_mean = K M
@@ -52,7 +52,7 @@ class Posterior(object):
or ((mean is not None) and (cov is not None)):
pass # we have sufficient to compute the posterior
else:
- raise ValueError, "insufficient information to compute the posterior"
+ raise ValueError("insufficient information to compute the posterior")
self._K_chol = K_chol
self._K = K
@@ -67,6 +67,7 @@ class Posterior(object):
#option 2:
self._mean = mean
self._covariance = cov
+ self._prior_mean = prior_mean
#compute this lazily
self._precision = None
@@ -107,7 +108,7 @@ class Posterior(object):
if self._precision is None:
cov = np.atleast_3d(self.covariance)
self._precision = np.zeros(cov.shape) # if one covariance per dimension
- for p in xrange(cov.shape[-1]):
+ for p in range(cov.shape[-1]):
self._precision[:,:,p] = pdinv(cov[:,:,p])[0]
return self._precision
@@ -125,7 +126,7 @@ class Posterior(object):
if self._woodbury_inv is not None:
winv = np.atleast_3d(self._woodbury_inv)
self._woodbury_chol = np.zeros(winv.shape)
- for p in xrange(winv.shape[-1]):
+ for p in range(winv.shape[-1]):
self._woodbury_chol[:,:,p] = pdinv(winv[:,:,p])[2]
#Li = jitchol(self._woodbury_inv)
#self._woodbury_chol, _ = dtrtri(Li)
@@ -134,13 +135,13 @@ class Posterior(object):
#self._woodbury_chol = jitchol(W)
#try computing woodbury chol from cov
elif self._covariance is not None:
- raise NotImplementedError, "TODO: check code here"
+ raise NotImplementedError("TODO: check code here")
B = self._K - self._covariance
tmp, _ = dpotrs(self.K_chol, B)
self._woodbury_inv, _ = dpotrs(self.K_chol, tmp.T)
_, _, self._woodbury_chol, _ = pdinv(self._woodbury_inv)
else:
- raise ValueError, "insufficient information to compute posterior"
+ raise ValueError("insufficient information to compute posterior")
return self._woodbury_chol
@property
@@ -160,7 +161,7 @@ class Posterior(object):
elif self._covariance is not None:
B = np.atleast_3d(self._K) - np.atleast_3d(self._covariance)
self._woodbury_inv = np.empty_like(B)
- for i in xrange(B.shape[-1]):
+ for i in range(B.shape[-1]):
tmp, _ = dpotrs(self.K_chol, B[:,:,i])
self._woodbury_inv[:,:,i], _ = dpotrs(self.K_chol, tmp.T)
return self._woodbury_inv
@@ -175,7 +176,7 @@ class Posterior(object):
$$
"""
if self._woodbury_vector is None:
- self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean)
+ self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean - self._prior_mean)
return self._woodbury_vector
@property
diff --git a/GPy/inference/latent_function_inference/svgp.py b/GPy/inference/latent_function_inference/svgp.py
index 1974991b..8d99e750 100644
--- a/GPy/inference/latent_function_inference/svgp.py
+++ b/GPy/inference/latent_function_inference/svgp.py
@@ -2,17 +2,22 @@ from . import LatentFunctionInference
from ...util import linalg
from ...util import choleskies
import numpy as np
-from posterior import Posterior
+from .posterior import Posterior
class SVGP(LatentFunctionInference):
- def inference(self, q_u_mean, q_u_chol, kern, X, Z, likelihood, Y, Y_metadata=None, KL_scale=1.0, batch_scale=1.0):
- num_inducing = Z.shape[0]
- num_data, num_outputs = Y.shape
+ def inference(self, q_u_mean, q_u_chol, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None, KL_scale=1.0, batch_scale=1.0):
+
+ num_data, _ = Y.shape
+ num_inducing, num_outputs = q_u_mean.shape
#expand cholesky representation
L = choleskies.flat_to_triang(q_u_chol)
- S = np.einsum('ijk,ljk->ilk', L, L) #L.dot(L.T)
+
+
+ S = np.empty((num_outputs, num_inducing, num_inducing))
+ [np.dot(L[:,:,i], L[:,:,i].T, S[i,:,:]) for i in range(num_outputs)]
+ S = S.swapaxes(0,2)
#Si,_ = linalg.dpotri(np.asfortranarray(L), lower=1)
Si = choleskies.multiple_dpotri(L)
logdetS = np.array([2.*np.sum(np.log(np.abs(np.diag(L[:,:,i])))) for i in range(L.shape[-1])])
@@ -22,6 +27,15 @@ class SVGP(LatentFunctionInference):
#S = S + np.eye(S.shape[0])*1e-5*np.max(np.max(S))
#Si, Lnew, _,_ = linalg.pdinv(S)
+ #compute mean function stuff
+ if mean_function is not None:
+ prior_mean_u = mean_function.f(Z)
+ prior_mean_f = mean_function.f(X)
+ else:
+ prior_mean_u = np.zeros((num_inducing, num_outputs))
+ prior_mean_f = np.zeros((num_data, num_outputs))
+
+
#compute kernel related stuff
Kmm = kern.K(Z)
Knm = kern.K(X, Z)
@@ -30,38 +44,64 @@ class SVGP(LatentFunctionInference):
#compute the marginal means and variances of q(f)
A = np.dot(Knm, Kmmi)
- mu = np.dot(A, q_u_mean)
- v = Knn_diag[:,None] - np.sum(A*Knm,1)[:,None] + np.sum(A[:,:,None] * np.einsum('ij,jkl->ikl', A, S),1)
+ mu = prior_mean_f + np.dot(A, q_u_mean - prior_mean_u)
+ #v = Knn_diag[:,None] - np.sum(A*Knm,1)[:,None] + np.sum(A[:,:,None] * np.einsum('ij,jlk->ilk', A, S),1)
+ v = Knn_diag[:,None] - np.sum(A*Knm,1)[:,None] + np.sum(A[:,:,None] * linalg.ij_jlk_to_ilk(A, S),1)
#compute the KL term
Kmmim = np.dot(Kmmi, q_u_mean)
- KLs = -0.5*logdetS -0.5*num_inducing + 0.5*logdetKmm + 0.5*np.einsum('ij,ijk->k', Kmmi, S) + 0.5*np.sum(q_u_mean*Kmmim,0)
+ KLs = -0.5*logdetS -0.5*num_inducing + 0.5*logdetKmm + 0.5*np.sum(Kmmi[:,:,None]*S,0).sum(0) + 0.5*np.sum(q_u_mean*Kmmim,0)
KL = KLs.sum()
- dKL_dm = Kmmim
+ #gradient of the KL term (assuming zero mean function)
+ dKL_dm = Kmmim.copy()
dKL_dS = 0.5*(Kmmi[:,:,None] - Si)
dKL_dKmm = 0.5*num_outputs*Kmmi - 0.5*Kmmi.dot(S.sum(-1)).dot(Kmmi) - 0.5*Kmmim.dot(Kmmim.T)
+ if mean_function is not None:
+ #adjust KL term for mean function
+ Kmmi_mfZ = np.dot(Kmmi, prior_mean_u)
+ KL += -np.sum(q_u_mean*Kmmi_mfZ)
+ KL += 0.5*np.sum(Kmmi_mfZ*prior_mean_u)
+
+ #adjust gradient for mean fucntion
+ dKL_dm -= Kmmi_mfZ
+ dKL_dKmm += Kmmim.dot(Kmmi_mfZ.T)
+ dKL_dKmm -= 0.5*Kmmi_mfZ.dot(Kmmi_mfZ.T)
+
+ #compute gradients for mean_function
+ dKL_dmfZ = Kmmi_mfZ - Kmmim
#quadrature for the likelihood
F, dF_dmu, dF_dv, dF_dthetaL = likelihood.variational_expectations(Y, mu, v, Y_metadata=Y_metadata)
#rescale the F term if working on a batch
F, dF_dmu, dF_dv = F*batch_scale, dF_dmu*batch_scale, dF_dv*batch_scale
+ if dF_dthetaL is not None:
+ dF_dthetaL = dF_dthetaL.sum(1).sum(1)*batch_scale
- #derivatives of expected likelihood
+ #derivatives of expected likelihood, assuming zero mean function
Adv = A.T[:,:,None]*dF_dv[None,:,:] # As if dF_Dv is diagonal
Admu = A.T.dot(dF_dmu)
- #AdvA = np.einsum('ijk,jl->ilk', Adv, A)
- #AdvA = np.dot(A.T, Adv).swapaxes(0,1)
AdvA = np.dstack([np.dot(A.T, Adv[:,:,i].T) for i in range(num_outputs)])
- tmp = np.einsum('ijk,jlk->il', AdvA, S).dot(Kmmi)
+ #tmp = np.einsum('ijk,jlk->il', AdvA, S).dot(Kmmi)
+ tmp = linalg.ijk_jlk_to_il(AdvA, S).dot(Kmmi)
dF_dKmm = -Admu.dot(Kmmim.T) + AdvA.sum(-1) - tmp - tmp.T
dF_dKmm = 0.5*(dF_dKmm + dF_dKmm.T) # necessary? GPy bug?
- tmp = 2.*(np.einsum('ij,jlk->ilk', Kmmi,S) - np.eye(num_inducing)[:,:,None])
- dF_dKmn = np.einsum('ijk,jlk->il', tmp, Adv) + Kmmim.dot(dF_dmu.T)
+ #tmp = 2.*(np.einsum('ij,jlk->ilk', Kmmi,S) - np.eye(num_inducing)[:,:,None])
+ tmp = 2.*(linalg.ij_jlk_to_ilk(Kmmi, S) - np.eye(num_inducing)[:,:,None])
+ #dF_dKmn = np.einsum('ijk,jlk->il', tmp, Adv) + Kmmim.dot(dF_dmu.T)
+ dF_dKmn = linalg.ijk_jlk_to_il(tmp, Adv) + Kmmim.dot(dF_dmu.T)
dF_dm = Admu
dF_dS = AdvA
+ #adjust gradient to account for mean function
+ if mean_function is not None:
+ dF_dmfX = dF_dmu.copy()
+ dF_dmfZ = -Admu
+ dF_dKmn -= np.dot(Kmmi_mfZ, dF_dmu.T)
+ dF_dKmm += Admu.dot(Kmmi_mfZ.T)
+
+
#sum (gradients of) expected likelihood and KL part
log_marginal = F.sum() - KL
dL_dm, dL_dS, dL_dKmm, dL_dKmn = dF_dm - dKL_dm, dF_dS- dKL_dS, dF_dKmm- dKL_dKmm, dF_dKmn
@@ -69,4 +109,8 @@ class SVGP(LatentFunctionInference):
dL_dchol = np.dstack([2.*np.dot(dL_dS[:,:,i], L[:,:,i]) for i in range(num_outputs)])
dL_dchol = choleskies.triang_to_flat(dL_dchol)
- return Posterior(mean=q_u_mean, cov=S, K=Kmm), log_marginal, {'dL_dKmm':dL_dKmm, 'dL_dKmn':dL_dKmn, 'dL_dKdiag': dF_dv, 'dL_dm':dL_dm, 'dL_dchol':dL_dchol, 'dL_dthetaL':dF_dthetaL}
+ grad_dict = {'dL_dKmm':dL_dKmm, 'dL_dKmn':dL_dKmn, 'dL_dKdiag': dF_dv.sum(1), 'dL_dm':dL_dm, 'dL_dchol':dL_dchol, 'dL_dthetaL':dF_dthetaL}
+ if mean_function is not None:
+ grad_dict['dL_dmfZ'] = dF_dmfZ - dKL_dmfZ
+ grad_dict['dL_dmfX'] = dF_dmfX
+ return Posterior(mean=q_u_mean, cov=S, K=Kmm, prior_mean=prior_mean_u), log_marginal, grad_dict
diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py
index 9c4d51bb..97d8dfe3 100644
--- a/GPy/inference/latent_function_inference/var_dtc.py
+++ b/GPy/inference/latent_function_inference/var_dtc.py
@@ -1,7 +1,7 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from posterior import Posterior
+from .posterior import Posterior
from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify
from ...util import diag
from ...core.parameterization.variational import VariationalPosterior
@@ -170,7 +170,7 @@ class VarDTC(LatentFunctionInference):
if VVT_factor.shape[1] == Y.shape[1]:
woodbury_vector = Cpsi1Vf # == Cpsi1V
else:
- print 'foobar'
+ print('foobar')
import ipdb; ipdb.set_trace()
psi1V = np.dot(Y.T*beta, psi1).T
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
@@ -213,7 +213,7 @@ def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf,
dL_dR = None
elif het_noise:
if uncertain_inputs:
- raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented"
+ raise NotImplementedError("heteroscedatic derivates with uncertain inputs not implemented")
else:
#from ...util.linalg import chol_inv
#LBi = chol_inv(LB)
diff --git a/GPy/inference/latent_function_inference/var_dtc_parallel.py b/GPy/inference/latent_function_inference/var_dtc_parallel.py
index cac69872..4b884d4c 100644
--- a/GPy/inference/latent_function_inference/var_dtc_parallel.py
+++ b/GPy/inference/latent_function_inference/var_dtc_parallel.py
@@ -1,7 +1,7 @@
# Copyright (c) 2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from posterior import Posterior
+from .posterior import Posterior
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri,pdinv
from ...util import diag
from ...core.parameterization.variational import VariationalPosterior
@@ -92,7 +92,7 @@ class VarDTC_minibatch(LatentFunctionInference):
psi0_full = 0.
YRY_full = 0.
- for n_start in xrange(0,num_data,batchsize):
+ for n_start in range(0,num_data,batchsize):
n_end = min(batchsize+n_start, num_data)
if batchsize==num_data:
Y_slice = Y
@@ -169,11 +169,13 @@ class VarDTC_minibatch(LatentFunctionInference):
Kmm = kern.K(Z).copy()
diag.add(Kmm, self.const_jitter)
- Lm = jitchol(Kmm, maxtries=100)
+ if not np.isfinite(Kmm).all():
+ print(Kmm)
+ Lm = jitchol(Kmm)
LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right')
Lambda = np.eye(Kmm.shape[0])+LmInvPsi2LmInvT
- LL = jitchol(Lambda, maxtries=100)
+ LL = jitchol(Lambda)
logdet_L = 2.*np.sum(np.log(np.diag(LL)))
b = dtrtrs(LL,dtrtrs(Lm,psi1Y_full.T)[0])[0]
bbt = np.square(b).sum()
diff --git a/GPy/inference/mcmc/__init__.py b/GPy/inference/mcmc/__init__.py
index 956448d4..8f185457 100644
--- a/GPy/inference/mcmc/__init__.py
+++ b/GPy/inference/mcmc/__init__.py
@@ -1 +1 @@
-from hmc import HMC
+from .hmc import HMC
diff --git a/GPy/inference/mcmc/hmc.py b/GPy/inference/mcmc/hmc.py
index ec6399b6..fcc72591 100644
--- a/GPy/inference/mcmc/hmc.py
+++ b/GPy/inference/mcmc/hmc.py
@@ -39,7 +39,7 @@ class HMC:
:rtype: numpy.ndarray
"""
params = np.empty((num_samples,self.p.size))
- for i in xrange(num_samples):
+ for i in range(num_samples):
self.p[:] = np.random.multivariate_normal(np.zeros(self.p.size),self.M)
H_old = self._computeH()
theta_old = self.model.optimizer_array.copy()
@@ -59,7 +59,7 @@ class HMC:
return params
def _update(self, hmc_iters):
- for i in xrange(hmc_iters):
+ for i in range(hmc_iters):
self.p[:] += -self.stepsize/2.*self.model._transform_gradients(self.model.objective_function_gradients())
self.model.optimizer_array = self.model.optimizer_array + self.stepsize*np.dot(self.Minv, self.p)
self.p[:] += -self.stepsize/2.*self.model._transform_gradients(self.model.objective_function_gradients())
@@ -82,7 +82,7 @@ class HMC_shortcut:
def sample(self, m_iters=1000, hmc_iters=20):
params = np.empty((m_iters,self.p.size))
- for i in xrange(m_iters):
+ for i in range(m_iters):
# sample a stepsize from the uniform distribution
stepsize = np.exp(np.random.rand()*(self.stepsize_range[1]-self.stepsize_range[0])+self.stepsize_range[0])
self.p[:] = np.random.multivariate_normal(np.zeros(self.p.size),self.M)
diff --git a/GPy/inference/mcmc/samplers.py b/GPy/inference/mcmc/samplers.py
index 444d99d7..6459e8af 100644
--- a/GPy/inference/mcmc/samplers.py
+++ b/GPy/inference/mcmc/samplers.py
@@ -9,7 +9,13 @@ import sys
import re
import numdifftools as ndt
import pdb
-import cPickle
+
+try:
+ #In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used
+ #if available
+ import cPickle as pickle
+except ImportError:
+ import pickle
class Metropolis_Hastings:
@@ -40,7 +46,7 @@ class Metropolis_Hastings:
fcurrent = self.model.log_likelihood() + self.model.log_prior()
accepted = np.zeros(Ntotal,dtype=np.bool)
for it in range(Ntotal):
- print "sample %d of %d\r"%(it,Ntotal),
+ print("sample %d of %d\r"%(it,Ntotal), end=' ')
sys.stdout.flush()
prop = np.random.multivariate_normal(current, self.cov*self.scale*self.scale)
self.model._set_params_transformed(prop)
diff --git a/GPy/inference/optimization/__init__.py b/GPy/inference/optimization/__init__.py
index 1a8f043b..909f897b 100644
--- a/GPy/inference/optimization/__init__.py
+++ b/GPy/inference/optimization/__init__.py
@@ -1,2 +1,2 @@
-from scg import SCG
-from optimization import *
+from .scg import SCG
+from .optimization import *
diff --git a/GPy/inference/optimization/conjugate_gradient_descent.py b/GPy/inference/optimization/conjugate_gradient_descent.py
index dfc4a48d..fc2d8b61 100644
--- a/GPy/inference/optimization/conjugate_gradient_descent.py
+++ b/GPy/inference/optimization/conjugate_gradient_descent.py
@@ -1,7 +1,7 @@
# Copyright (c) 2012-2014, Max Zwiessele
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from gradient_descent_update_rules import FletcherReeves, \
+from .gradient_descent_update_rules import FletcherReeves, \
PolakRibiere
from Queue import Empty
from multiprocessing import Value
@@ -74,7 +74,7 @@ class _Async_Optimization(Thread):
if self.outq is not None:
self.outq.put(self.SENTINEL)
if self.messages:
- print ""
+ print("")
self.runsignal.clear()
def run(self, *args, **kwargs):
@@ -213,7 +213,7 @@ class Async_Optimize(object):
# # print "^C"
# self.runsignal.clear()
# c.join()
- print "WARNING: callback still running, optimisation done!"
+ print("WARNING: callback still running, optimisation done!")
return p.result
class CGD(Async_Optimize):
diff --git a/GPy/inference/optimization/optimization.py b/GPy/inference/optimization/optimization.py
index aa9be793..fd140688 100644
--- a/GPy/inference/optimization/optimization.py
+++ b/GPy/inference/optimization/optimization.py
@@ -10,7 +10,7 @@ try:
rasm_available = True
except ImportError:
rasm_available = False
-from scg import SCG
+from .scg import SCG
class Optimizer():
"""
@@ -54,7 +54,7 @@ class Optimizer():
self.time = str(end - start)
def opt(self, f_fp=None, f=None, fp=None):
- raise NotImplementedError, "this needs to be implemented to use the optimizer class"
+ raise NotImplementedError("this needs to be implemented to use the optimizer class")
def plot(self):
"""
@@ -125,9 +125,9 @@ class opt_lbfgsb(Optimizer):
opt_dict = {}
if self.xtol is not None:
- print "WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it"
+ print("WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
- print "WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it"
+ print("WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
opt_dict['pgtol'] = self.gtol
if self.bfgs_factor is not None:
@@ -140,6 +140,10 @@ class opt_lbfgsb(Optimizer):
self.funct_eval = opt_result[2]['funcalls']
self.status = rcstrings[opt_result[2]['warnflag']]
+ #a more helpful error message is available in opt_result in the Error case
+ if opt_result[2]['warnflag']==2:
+ self.status = 'Error' + opt_result[2]['task']
+
class opt_simplex(Optimizer):
def __init__(self, *args, **kwargs):
Optimizer.__init__(self, *args, **kwargs)
@@ -158,7 +162,7 @@ class opt_simplex(Optimizer):
if self.ftol is not None:
opt_dict['ftol'] = self.ftol
if self.gtol is not None:
- print "WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it"
+ print("WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it")
opt_result = optimize.fmin(f, self.x_init, (), disp=self.messages,
maxfun=self.max_f_eval, full_output=True, **opt_dict)
@@ -186,11 +190,11 @@ class opt_rasm(Optimizer):
opt_dict = {}
if self.xtol is not None:
- print "WARNING: minimize doesn't have an xtol arg, so I'm going to ignore it"
+ print("WARNING: minimize doesn't have an xtol arg, so I'm going to ignore it")
if self.ftol is not None:
- print "WARNING: minimize doesn't have an ftol arg, so I'm going to ignore it"
+ print("WARNING: minimize doesn't have an ftol arg, so I'm going to ignore it")
if self.gtol is not None:
- print "WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it"
+ print("WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it")
opt_result = rasm.minimize(self.x_init, f_fp, (), messages=self.messages,
maxnumfuneval=self.max_f_eval)
diff --git a/GPy/inference/optimization/scg.py b/GPy/inference/optimization/scg.py
index 34dd181f..8960de1d 100644
--- a/GPy/inference/optimization/scg.py
+++ b/GPy/inference/optimization/scg.py
@@ -21,14 +21,13 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-
+from __future__ import print_function
import numpy as np
import sys
-
def print_out(len_maxiters, fnow, current_grad, beta, iteration):
- print '\r',
- print '{0:>0{mi}g} {1:> 12e} {2:< 12.6e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
+ print('\r', end=' ')
+ print('{0:>0{mi}g} {1:> 12e} {2:< 12.6e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
sys.stdout.flush()
def exponents(fnow, current_grad):
@@ -80,7 +79,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True,
len_maxiters = len(str(maxiters))
if display:
- print ' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters)
+ print(' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters))
exps = exponents(fnow, current_grad)
p_iter = iteration
@@ -140,7 +139,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True,
b = np.any(n_exps < exps)
if a or b:
p_iter = iteration
- print ''
+ print('')
if b:
exps = n_exps
@@ -189,6 +188,6 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True,
if display:
print_out(len_maxiters, fnow, current_grad, beta, iteration)
- print ""
- print status
+ print("")
+ print(status)
return x, flog, function_eval, status
diff --git a/GPy/inference/optimization/stochastics.py b/GPy/inference/optimization/stochastics.py
index dc71d539..f1532bc5 100644
--- a/GPy/inference/optimization/stochastics.py
+++ b/GPy/inference/optimization/stochastics.py
@@ -30,7 +30,7 @@ class SparseGPMissing(StochasticStorage):
Thus, we can just make sure the loop goes over self.d every
time.
"""
- self.d = xrange(model.Y_normalized.shape[1])
+ self.d = range(model.Y_normalized.shape[1])
class SparseGPStochastics(StochasticStorage):
"""
diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py
index 718be74f..2bd55617 100644
--- a/GPy/kern/__init__.py
+++ b/GPy/kern/__init__.py
@@ -1,20 +1,23 @@
-from _src.kern import Kern
-from _src.rbf import RBF
-from _src.linear import Linear, LinearFull
-from _src.static import Bias, White, Fixed
-from _src.brownian import Brownian
-from _src.stationary import Exponential, OU, Matern32, Matern52, ExpQuad, RatQuad, Cosine
-from _src.mlp import MLP
-from _src.periodic import PeriodicExponential, PeriodicMatern32, PeriodicMatern52
-from _src.independent_outputs import IndependentOutputs, Hierarchical
-from _src.coregionalize import Coregionalize
-from _src.ODE_UY import ODE_UY
-from _src.ODE_UYC import ODE_UYC
-from _src.ODE_st import ODE_st
-from _src.ODE_t import ODE_t
-from _src.poly import Poly
-from _src.eq_ode2 import EQ_ODE2
+from ._src.kern import Kern
+from ._src.rbf import RBF
+from ._src.linear import Linear, LinearFull
+from ._src.static import Bias, White, Fixed
+from ._src.brownian import Brownian
+from ._src.stationary import Exponential, OU, Matern32, Matern52, ExpQuad, RatQuad, Cosine
+from ._src.mlp import MLP
+from ._src.periodic import PeriodicExponential, PeriodicMatern32, PeriodicMatern52
+from ._src.independent_outputs import IndependentOutputs, Hierarchical
+from ._src.coregionalize import Coregionalize
+from ._src.ODE_UY import ODE_UY
+from ._src.ODE_UYC import ODE_UYC
+from ._src.ODE_st import ODE_st
+from ._src.ODE_t import ODE_t
+from ._src.poly import Poly
+from ._src.eq_ode2 import EQ_ODE2
+from ._src.trunclinear import TruncLinear,TruncLinear_inf
+from ._src.splitKern import SplitKern,DEtime
+from ._src.splitKern import DEtime as DiffGenomeKern
-from _src.trunclinear import TruncLinear,TruncLinear_inf
-from _src.splitKern import SplitKern,DiffGenomeKern
+
+from _src.basis_funcs import LinearSlopeBasisFuncKernel, BasisFuncKernel, ChangePointBasisFuncKernel, DomainKernel
diff --git a/GPy/kern/_src/ODE_UY.py b/GPy/kern/_src/ODE_UY.py
index b4a2b42d..9c9b47be 100644
--- a/GPy/kern/_src/ODE_UY.py
+++ b/GPy/kern/_src/ODE_UY.py
@@ -1,11 +1,11 @@
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from kern import Kern
+from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
import numpy as np
-from independent_outputs import index_to_slices
+from .independent_outputs import index_to_slices
class ODE_UY(Kern):
def __init__(self, input_dim, variance_U=3., variance_Y=1., lengthscale_U=1., lengthscale_Y=1., active_dims=None, name='ode_uy'):
@@ -114,7 +114,7 @@ class ODE_UY(Kern):
elif i==1:
Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
else:
- raise ValueError, "invalid input/output index"
+ raise ValueError("invalid input/output index")
#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag
#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag
return Kdiag
diff --git a/GPy/kern/_src/ODE_UYC.py b/GPy/kern/_src/ODE_UYC.py
index 1722d2e1..ff75a328 100644
--- a/GPy/kern/_src/ODE_UYC.py
+++ b/GPy/kern/_src/ODE_UYC.py
@@ -1,11 +1,11 @@
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from kern import Kern
+from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
import numpy as np
-from independent_outputs import index_to_slices
+from .independent_outputs import index_to_slices
class ODE_UYC(Kern):
def __init__(self, input_dim, variance_U=3., variance_Y=1., lengthscale_U=1., lengthscale_Y=1., ubias =1. ,active_dims=None, name='ode_uyc'):
@@ -115,7 +115,7 @@ class ODE_UYC(Kern):
elif i==1:
Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
else:
- raise ValueError, "invalid input/output index"
+ raise ValueError("invalid input/output index")
#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag
#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag
return Kdiag
diff --git a/GPy/kern/_src/ODE_st.py b/GPy/kern/_src/ODE_st.py
index 665be230..afa46d09 100644
--- a/GPy/kern/_src/ODE_st.py
+++ b/GPy/kern/_src/ODE_st.py
@@ -1,10 +1,10 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from kern import Kern
+from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
import numpy as np
-from independent_outputs import index_to_slices
+from .independent_outputs import index_to_slices
class ODE_st(Kern):
@@ -135,7 +135,7 @@ class ODE_st(Kern):
Kdiag[s1]+= b**2*k1 - 2*a*c*k2 + a**2*k3 + c**2*vyt*vyx
#Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
else:
- raise ValueError, "invalid input/output index"
+ raise ValueError("invalid input/output index")
return Kdiag
diff --git a/GPy/kern/_src/ODE_t.py b/GPy/kern/_src/ODE_t.py
index a470cbec..80625f51 100644
--- a/GPy/kern/_src/ODE_t.py
+++ b/GPy/kern/_src/ODE_t.py
@@ -1,8 +1,8 @@
-from kern import Kern
+from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
import numpy as np
-from independent_outputs import index_to_slices
+from .independent_outputs import index_to_slices
class ODE_t(Kern):
@@ -85,7 +85,7 @@ class ODE_t(Kern):
Kdiag[s1]+= k1 + vyt+self.ubias
#Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
else:
- raise ValueError, "invalid input/output index"
+ raise ValueError("invalid input/output index")
return Kdiag
diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py
index 4c72a254..696a8b04 100644
--- a/GPy/kern/_src/add.py
+++ b/GPy/kern/_src/add.py
@@ -4,7 +4,8 @@
import numpy as np
import itertools
from ...util.caching import Cache_this
-from kern import CombinationKernel
+from .kern import CombinationKernel
+from functools import reduce
class Add(CombinationKernel):
"""
@@ -84,10 +85,10 @@ class Add(CombinationKernel):
psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts))
#return psi2
# compute the "cross" terms
- from static import White, Bias
- from rbf import RBF
+ from .static import White, Bias
+ from .rbf import RBF
#from rbf_inv import RBFInv
- from linear import Linear
+ from .linear import Linear
#ffrom fixed import Fixed
for p1, p2 in itertools.combinations(self.parts, 2):
@@ -111,11 +112,11 @@ class Add(CombinationKernel):
psi2 += np.einsum('nm,no->mo',tmp1,tmp2)+np.einsum('nm,no->mo',tmp2,tmp1)
#(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
else:
- raise NotImplementedError, "psi2 cannot be computed for this kernel"
+ raise NotImplementedError("psi2 cannot be computed for this kernel")
return psi2
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
- from static import White, Bias
+ from .static import White, Bias
for p1 in self.parts:
#compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2!
eff_dL_dpsi1 = dL_dpsi1.copy()
@@ -131,7 +132,7 @@ class Add(CombinationKernel):
p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
def gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
- from static import White, Bias
+ from .static import White, Bias
target = np.zeros(Z.shape)
for p1 in self.parts:
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
@@ -149,7 +150,7 @@ class Add(CombinationKernel):
return target
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
- from static import White, Bias
+ from .static import White, Bias
target_grads = [np.zeros(v.shape) for v in variational_posterior.parameters]
for p1 in self.parameters:
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
@@ -164,7 +165,7 @@ class Add(CombinationKernel):
else:
eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.psi1(Z, variational_posterior) * 2.
grads = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
- [np.add(target_grads[i],grads[i],target_grads[i]) for i in xrange(len(grads))]
+ [np.add(target_grads[i],grads[i],target_grads[i]) for i in range(len(grads))]
return target_grads
def add(self, other):
@@ -180,9 +181,12 @@ class Add(CombinationKernel):
def input_sensitivity(self, summarize=True):
if summarize:
- return reduce(np.add, [k.input_sensitivity(summarize) for k in self.parts])
+ i_s = np.zeros((self.input_dim))
+ for k in self.parts:
+ i_s[k.active_dims] += k.input_sensitivity(summarize)
+ return i_s
else:
i_s = np.zeros((len(self.parts), self.input_dim))
from operator import setitem
- [setitem(i_s, (i, Ellipsis), k.input_sensitivity(summarize)) for i, k in enumerate(self.parts)]
+ [setitem(i_s, (i, k.active_dims), k.input_sensitivity(summarize)) for i, k in enumerate(self.parts)]
return i_s
diff --git a/GPy/kern/_src/basis_funcs.py b/GPy/kern/_src/basis_funcs.py
new file mode 100644
index 00000000..a6c1f36c
--- /dev/null
+++ b/GPy/kern/_src/basis_funcs.py
@@ -0,0 +1,183 @@
+# #Copyright (c) 2012, Max Zwiessele (see AUTHORS.txt).
+# Licensed under the BSD 3-clause license (see LICENSE.txt)
+from .kern import Kern
+from ...core.parameterization.param import Param
+from ...core.parameterization.transformations import Logexp
+import numpy as np
+from ...util.caching import Cache_this
+from ...util.linalg import tdot, mdot
+
+class BasisFuncKernel(Kern):
+ def __init__(self, input_dim, variance=1., active_dims=None, ARD=False, name='basis func kernel'):
+ """
+ Abstract superclass for kernels with explicit basis functions for use in GPy.
+
+ This class does NOT automatically add an offset to the design matrix phi!
+ """
+ super(BasisFuncKernel, self).__init__(input_dim, active_dims, name)
+ self.ARD = ARD
+ if self.ARD:
+ phi_test = self._phi(np.random.normal(0, 1, (1, self.input_dim)))
+ variance = variance * np.ones(phi_test.shape[1])
+ else:
+ variance = np.array(variance)
+ self.variance = Param('variance', variance, Logexp())
+ self.link_parameter(self.variance)
+
+ def parameters_changed(self):
+ self.alpha = np.sqrt(self.variance)
+ self.beta = 1./self.variance
+
+ @Cache_this(limit=3, ignore_args=())
+ def phi(self, X):
+ return self._phi(X)
+
+ def _phi(self, X):
+ raise NotImplementedError('Overwrite this _phi function, which maps the input X into the higher dimensional space and returns the design matrix Phi')
+
+ def K(self, X, X2=None):
+ return self._K(X, X2)
+
+ def Kdiag(self, X, X2=None):
+ return np.diag(self._K(X, X2))
+
+ def update_gradients_full(self, dL_dK, X, X2=None):
+ if self.ARD:
+ phi1 = self.phi(X)
+ if X2 is None or X is X2:
+ self.variance.gradient = np.einsum('ij,iq,jq->q', dL_dK, phi1, phi1)
+ else:
+ phi2 = self.phi(X2)
+ self.variance.gradient = np.einsum('ij,iq,jq->q', dL_dK, phi1, phi2)
+ else:
+ self.variance.gradient = np.einsum('ij,ij', dL_dK, self._K(X, X2)) * self.beta
+
+ def update_gradients_diag(self, dL_dKdiag, X):
+ if self.ARD:
+ phi1 = self.phi(X)
+ self.variance.gradient = np.einsum('i,iq,iq->q', dL_dKdiag, phi1, phi1)
+ else:
+ self.variance.gradient = np.einsum('i,i', dL_dKdiag, self.Kdiag(X)) * self.beta
+
+ def concatenate_offset(self, X):
+ return np.c_[np.ones((X.shape[0], 1)), X]
+
+ def posterior_inf(self, X=None, posterior=None):
+ """
+ Do the posterior inference on the parameters given this kernels functions
+ and the model posterior, which has to be a GPy posterior, usually found at m.posterior, if m is a GPy model.
+ If not given we search for the the highest parent to be a model, containing the posterior, and for X accordingly.
+ """
+ if X is None:
+ try:
+ X = self._highest_parent_.X
+ except NameError:
+ raise RuntimeError("This kernel is not part of a model and cannot be used for posterior inference")
+ if posterior is None:
+ try:
+ posterior = self._highest_parent_.posterior
+ except NameError:
+ raise RuntimeError("This kernel is not part of a model and cannot be used for posterior inference")
+ phi_alpha = self.phi(X) * self.variance
+ return (phi_alpha).T.dot(posterior.woodbury_vector), (np.eye(phi_alpha.shape[1])*self.variance - mdot(phi_alpha.T, posterior.woodbury_inv, phi_alpha))
+
+ @Cache_this(limit=3, ignore_args=())
+ def _K(self, X, X2):
+ if X2 is None or X is X2:
+ phi = self.phi(X) * self.alpha
+ if phi.ndim != 2:
+ phi = phi[:, None]
+ return tdot(phi)
+ else:
+ phi1 = self.phi(X) * self.alpha
+ phi2 = self.phi(X2) * self.alpha
+ if phi1.ndim != 2:
+ phi1 = phi1[:, None]
+ phi2 = phi2[:, None]
+ return phi1.dot(phi2.T)
+
+
+class LinearSlopeBasisFuncKernel(BasisFuncKernel):
+ def __init__(self, input_dim, start, stop, variance=1., active_dims=None, ARD=False, name='linear_segment'):
+ """
+ A linear segment transformation. The segments start at start, \
+ are then linear to stop and constant again. The segments are
+ normalized, so that they have exactly as much mass above
+ as below the origin.
+
+ Start and stop can be tuples or lists of starts and stops.
+ Behaviour of start stop is as np.where(X self.stop, self.stop, phi)
+ return ((phi-(self.stop+self.start)/2.))#/(.5*(self.stop-self.start)))-1.
+
+class ChangePointBasisFuncKernel(BasisFuncKernel):
+ def __init__(self, input_dim, changepoint, variance=1., active_dims=None, ARD=False, name='changepoint'):
+ self.changepoint = np.array(changepoint)
+ super(ChangePointBasisFuncKernel, self).__init__(input_dim, variance, active_dims, ARD, name)
+
+ @Cache_this(limit=3, ignore_args=())
+ def _phi(self, X):
+ return np.where((X < self.changepoint), -1, 1)
+
+class DomainKernel(LinearSlopeBasisFuncKernel):
+ def __init__(self, input_dim, start, stop, variance=1., active_dims=None, ARD=False, name='constant_domain'):
+ super(DomainKernel, self).__init__(input_dim, start, stop, variance, active_dims, ARD, name)
+
+ @Cache_this(limit=3, ignore_args=())
+ def _phi(self, X):
+ phi = np.where((X>self.start)*(Xq', dL_dK, phi1, dphi1_dl)
+ else:
+ self.slope.gradient = self.variance * 2 * (dL_dK * phi1.dot(dphi1_dl.T)).sum()
+ else:
+ phi1 = self.phi(X)
+ phi2 = self.phi(X2)
+ if phi1.ndim != 2:
+ phi1 = phi1[:, None]
+ phi2 = phi2[:, None]
+ dphi1_dl = (phi1**2) * (np.exp(-((X-self.centers)*self.slope)) * (X-self.centers))
+ dphi2_dl = (phi2**2) * (np.exp(-((X2-self.centers)*self.slope)) * (X2-self.centers))
+ if self.ARD_slope:
+ self.slope.gradient = (self.variance * np.einsum('ij,iq,jq->q', dL_dK, phi1, dphi2_dl) + np.einsum('ij,iq,jq->q', dL_dK, phi2, dphi1_dl))
+ else:
+ self.slope.gradient = self.variance * (dL_dK * phi1.dot(dphi2_dl.T)).sum() + (dL_dK * phi2.dot(dphi1_dl.T)).sum()
+ self.slope.gradient = np.where(np.isnan(self.slope.gradient), 0, self.slope.gradient)
diff --git a/GPy/kern/_src/brownian.py b/GPy/kern/_src/brownian.py
index fd79973c..d403fce7 100644
--- a/GPy/kern/_src/brownian.py
+++ b/GPy/kern/_src/brownian.py
@@ -1,7 +1,7 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from kern import Kern
+from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
import numpy as np
diff --git a/GPy/kern/_src/coregionalize.py b/GPy/kern/_src/coregionalize.py
index 291402ec..7d5f5a2b 100644
--- a/GPy/kern/_src/coregionalize.py
+++ b/GPy/kern/_src/coregionalize.py
@@ -1,12 +1,12 @@
# Copyright (c) 2012, James Hensman and Ricardo Andrade
# Licensed under the BSD 3-clause license (see LICENSE.txt)
-from kern import Kern
+from .kern import Kern
import numpy as np
-from scipy import weave
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
-from ...util.config import config # for assesing whether to use weave
+from ...util.config import config # for assesing whether to use cython
+import coregionalize_cython
class Coregionalize(Kern):
"""
@@ -57,13 +57,8 @@ class Coregionalize(Kern):
self.B = np.dot(self.W, self.W.T) + np.diag(self.kappa)
def K(self, X, X2=None):
- if config.getboolean('weave', 'working'):
- try:
- return self._K_weave(X, X2)
- except:
- print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
- config.set('weave', 'working', 'False')
- return self._K_numpy(X, X2)
+ if config.getboolean('cython', 'working'):
+ return self._K_cython(X, X2)
else:
return self._K_numpy(X, X2)
@@ -76,36 +71,10 @@ class Coregionalize(Kern):
index2 = np.asarray(X2, dtype=np.int)
return self.B[index,index2.T]
- def _K_weave(self, X, X2=None):
- """compute the kernel function using scipy.weave"""
- index = np.asarray(X, dtype=np.int)
-
+ def _K_cython(self, X, X2=None):
if X2 is None:
- target = np.empty((X.shape[0], X.shape[0]), dtype=np.float64)
- code="""
- for(int i=0;i
+#ifndef offsetof
+#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+ #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+#define CYTHON_COMPILING_IN_PYPY 1
+#define CYTHON_COMPILING_IN_CPYTHON 0
+#else
+#define CYTHON_COMPILING_IN_PYPY 0
+#define CYTHON_COMPILING_IN_CPYTHON 1
+#endif
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600
+#define Py_OptimizeFlag 0
+#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+ #define __Pyx_DefaultClassType PyClass_Type
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
+ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+ #define __Pyx_DefaultClassType PyType_Type
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define Py_TPFLAGS_CHECKTYPES 0
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
+ #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+ #define CYTHON_PEP393_ENABLED 1
+ #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \
+ 0 : _PyUnicode_Ready((PyObject *)(op)))
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+ #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
+ #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
+ #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
+#else
+ #define CYTHON_PEP393_ENABLED 0
+ #define __Pyx_PyUnicode_READY(op) (0)
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+ #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
+ #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
+ #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+ #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
+#else
+ #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
+ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
+#else
+ #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+ #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+ #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
+ #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+ #define PyNumber_Int PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
+#endif
+#ifndef CYTHON_INLINE
+ #if defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+ /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
+ a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
+ a quiet NaN. */
+ float value;
+ memset(&value, 0xFF, sizeof(value));
+ return value;
+}
+#endif
+#ifdef __cplusplus
+template
+void __Pyx_call_destructor(T* x) {
+ x->~T();
+}
+#endif
+
+
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+#define _USE_MATH_DEFINES
+#endif
+#include
+#define __PYX_HAVE__GPy__kern___src__coregionalize_cython
+#define __PYX_HAVE_API__GPy__kern___src__coregionalize_cython
+#include "string.h"
+#include "stdio.h"
+#include "stdlib.h"
+#include "numpy/arrayobject.h"
+#include "numpy/ufuncobject.h"
+#ifdef _OPENMP
+#include
+#endif /* _OPENMP */
+
+#ifdef PYREX_WITHOUT_ASSERTIONS
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
+ const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \
+ (sizeof(type) < sizeof(Py_ssize_t)) || \
+ (sizeof(type) > sizeof(Py_ssize_t) && \
+ likely(v < (type)PY_SSIZE_T_MAX || \
+ v == (type)PY_SSIZE_T_MAX) && \
+ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \
+ v == (type)PY_SSIZE_T_MIN))) || \
+ (sizeof(type) == sizeof(Py_ssize_t) && \
+ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \
+ v == (type)PY_SSIZE_T_MAX))) )
+static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((const char*)s)
+#if PY_MAJOR_VERSION < 3
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
+{
+ const Py_UNICODE *u_end = u;
+ while (*u_end++) ;
+ return (size_t)(u_end - u - 1);
+}
+#else
+#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
+#endif
+#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
+#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
+#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ PyObject* ascii_chars_u = NULL;
+ PyObject* ascii_chars_b = NULL;
+ const char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ if (strcmp(default_encoding_c, "ascii") == 0) {
+ __Pyx_sys_getdefaultencoding_not_ascii = 0;
+ } else {
+ char ascii_chars[128];
+ int c;
+ for (c = 0; c < 128; c++) {
+ ascii_chars[c] = c;
+ }
+ __Pyx_sys_getdefaultencoding_not_ascii = 1;
+ ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+ if (!ascii_chars_u) goto bad;
+ ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+ if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+ default_encoding_c);
+ goto bad;
+ }
+ Py_DECREF(ascii_chars_u);
+ Py_DECREF(ascii_chars_b);
+ }
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
+ if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+ strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+
+static PyObject *__pyx_m;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+#if !defined(CYTHON_CCOMPLEX)
+ #if defined(__cplusplus)
+ #define CYTHON_CCOMPLEX 1
+ #elif defined(_Complex_I)
+ #define CYTHON_CCOMPLEX 1
+ #else
+ #define CYTHON_CCOMPLEX 0
+ #endif
+#endif
+#if CYTHON_CCOMPLEX
+ #ifdef __cplusplus
+ #include
+ #else
+ #include
+ #endif
+#endif
+#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
+ #undef _Complex_I
+ #define _Complex_I 1.0fj
+#endif
+
+
+static const char *__pyx_f[] = {
+ "GPy/kern/_src/coregionalize_cython.pyx",
+ "__init__.pxd",
+ "type.pxd",
+};
+#define IS_UNSIGNED(type) (((type) -1) > 0)
+struct __Pyx_StructField_;
+#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
+typedef struct {
+ const char* name;
+ struct __Pyx_StructField_* fields;
+ size_t size;
+ size_t arraysize[8];
+ int ndim;
+ char typegroup;
+ char is_unsigned;
+ int flags;
+} __Pyx_TypeInfo;
+typedef struct __Pyx_StructField_ {
+ __Pyx_TypeInfo* type;
+ const char* name;
+ size_t offset;
+} __Pyx_StructField;
+typedef struct {
+ __Pyx_StructField* field;
+ size_t parent_offset;
+} __Pyx_BufFmt_StackElem;
+typedef struct {
+ __Pyx_StructField root;
+ __Pyx_BufFmt_StackElem* head;
+ size_t fmt_offset;
+ size_t new_count, enc_count;
+ size_t struct_alignment;
+ int is_complex;
+ char enc_type;
+ char new_packmode;
+ char enc_packmode;
+ char is_valid_array;
+} __Pyx_BufFmt_Context;
+
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":723
+ * # in Cython to enable them only on the right systems.
+ *
+ * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
+ * ctypedef npy_int16 int16_t
+ * ctypedef npy_int32 int32_t
+ */
+typedef npy_int8 __pyx_t_5numpy_int8_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":724
+ *
+ * ctypedef npy_int8 int8_t
+ * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
+ * ctypedef npy_int32 int32_t
+ * ctypedef npy_int64 int64_t
+ */
+typedef npy_int16 __pyx_t_5numpy_int16_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725
+ * ctypedef npy_int8 int8_t
+ * ctypedef npy_int16 int16_t
+ * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
+ * ctypedef npy_int64 int64_t
+ * #ctypedef npy_int96 int96_t
+ */
+typedef npy_int32 __pyx_t_5numpy_int32_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726
+ * ctypedef npy_int16 int16_t
+ * ctypedef npy_int32 int32_t
+ * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
+ * #ctypedef npy_int96 int96_t
+ * #ctypedef npy_int128 int128_t
+ */
+typedef npy_int64 __pyx_t_5numpy_int64_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":730
+ * #ctypedef npy_int128 int128_t
+ *
+ * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
+ * ctypedef npy_uint16 uint16_t
+ * ctypedef npy_uint32 uint32_t
+ */
+typedef npy_uint8 __pyx_t_5numpy_uint8_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":731
+ *
+ * ctypedef npy_uint8 uint8_t
+ * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
+ * ctypedef npy_uint32 uint32_t
+ * ctypedef npy_uint64 uint64_t
+ */
+typedef npy_uint16 __pyx_t_5numpy_uint16_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732
+ * ctypedef npy_uint8 uint8_t
+ * ctypedef npy_uint16 uint16_t
+ * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
+ * ctypedef npy_uint64 uint64_t
+ * #ctypedef npy_uint96 uint96_t
+ */
+typedef npy_uint32 __pyx_t_5numpy_uint32_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733
+ * ctypedef npy_uint16 uint16_t
+ * ctypedef npy_uint32 uint32_t
+ * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
+ * #ctypedef npy_uint96 uint96_t
+ * #ctypedef npy_uint128 uint128_t
+ */
+typedef npy_uint64 __pyx_t_5numpy_uint64_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":737
+ * #ctypedef npy_uint128 uint128_t
+ *
+ * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
+ * ctypedef npy_float64 float64_t
+ * #ctypedef npy_float80 float80_t
+ */
+typedef npy_float32 __pyx_t_5numpy_float32_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":738
+ *
+ * ctypedef npy_float32 float32_t
+ * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
+ * #ctypedef npy_float80 float80_t
+ * #ctypedef npy_float128 float128_t
+ */
+typedef npy_float64 __pyx_t_5numpy_float64_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":747
+ * # The int types are mapped a bit surprising --
+ * # numpy.int corresponds to 'l' and numpy.long to 'q'
+ * ctypedef npy_long int_t # <<<<<<<<<<<<<<
+ * ctypedef npy_longlong long_t
+ * ctypedef npy_longlong longlong_t
+ */
+typedef npy_long __pyx_t_5numpy_int_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":748
+ * # numpy.int corresponds to 'l' and numpy.long to 'q'
+ * ctypedef npy_long int_t
+ * ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
+ * ctypedef npy_longlong longlong_t
+ *
+ */
+typedef npy_longlong __pyx_t_5numpy_long_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749
+ * ctypedef npy_long int_t
+ * ctypedef npy_longlong long_t
+ * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_ulong uint_t
+ */
+typedef npy_longlong __pyx_t_5numpy_longlong_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751
+ * ctypedef npy_longlong longlong_t
+ *
+ * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
+ * ctypedef npy_ulonglong ulong_t
+ * ctypedef npy_ulonglong ulonglong_t
+ */
+typedef npy_ulong __pyx_t_5numpy_uint_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":752
+ *
+ * ctypedef npy_ulong uint_t
+ * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
+ * ctypedef npy_ulonglong ulonglong_t
+ *
+ */
+typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753
+ * ctypedef npy_ulong uint_t
+ * ctypedef npy_ulonglong ulong_t
+ * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_intp intp_t
+ */
+typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755
+ * ctypedef npy_ulonglong ulonglong_t
+ *
+ * ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
+ * ctypedef npy_uintp uintp_t
+ *
+ */
+typedef npy_intp __pyx_t_5numpy_intp_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":756
+ *
+ * ctypedef npy_intp intp_t
+ * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_double float_t
+ */
+typedef npy_uintp __pyx_t_5numpy_uintp_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758
+ * ctypedef npy_uintp uintp_t
+ *
+ * ctypedef npy_double float_t # <<<<<<<<<<<<<<
+ * ctypedef npy_double double_t
+ * ctypedef npy_longdouble longdouble_t
+ */
+typedef npy_double __pyx_t_5numpy_float_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":759
+ *
+ * ctypedef npy_double float_t
+ * ctypedef npy_double double_t # <<<<<<<<<<<<<<
+ * ctypedef npy_longdouble longdouble_t
+ *
+ */
+typedef npy_double __pyx_t_5numpy_double_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760
+ * ctypedef npy_double float_t
+ * ctypedef npy_double double_t
+ * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_cfloat cfloat_t
+ */
+typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
+#if CYTHON_CCOMPLEX
+ #ifdef __cplusplus
+ typedef ::std::complex< float > __pyx_t_float_complex;
+ #else
+ typedef float _Complex __pyx_t_float_complex;
+ #endif
+#else
+ typedef struct { float real, imag; } __pyx_t_float_complex;
+#endif
+
+#if CYTHON_CCOMPLEX
+ #ifdef __cplusplus
+ typedef ::std::complex< double > __pyx_t_double_complex;
+ #else
+ typedef double _Complex __pyx_t_double_complex;
+ #endif
+#else
+ typedef struct { double real, imag; } __pyx_t_double_complex;
+#endif
+
+
+/*--- Type declarations ---*/
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762
+ * ctypedef npy_longdouble longdouble_t
+ *
+ * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
+ * ctypedef npy_cdouble cdouble_t
+ * ctypedef npy_clongdouble clongdouble_t
+ */
+typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":763
+ *
+ * ctypedef npy_cfloat cfloat_t
+ * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
+ * ctypedef npy_clongdouble clongdouble_t
+ *
+ */
+typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764
+ * ctypedef npy_cfloat cfloat_t
+ * ctypedef npy_cdouble cdouble_t
+ * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
+ *
+ * ctypedef npy_cdouble complex_t
+ */
+typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766
+ * ctypedef npy_clongdouble clongdouble_t
+ *
+ * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew1(a):
+ */
+typedef npy_cdouble __pyx_t_5numpy_complex_t;
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+ #define __Pyx_RefNannySetupContext(name, acquire_gil) \
+ if (acquire_gil) { \
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
+ PyGILState_Release(__pyx_gilstate_save); \
+ } else { \
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
+ }
+#else
+ #define __Pyx_RefNannySetupContext(name, acquire_gil) \
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+ #define __Pyx_RefNannyFinishContext() \
+ __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif
+#define __Pyx_XDECREF_SET(r, v) do { \
+ PyObject *tmp = (PyObject *) r; \
+ r = v; __Pyx_XDECREF(tmp); \
+ } while (0)
+#define __Pyx_DECREF_SET(r, v) do { \
+ PyObject *tmp = (PyObject *) r; \
+ r = v; __Pyx_DECREF(tmp); \
+ } while (0)
+#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro))
+ return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+ if (likely(tp->tp_getattr))
+ return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+ return PyObject_GetAttr(obj, attr_name);
+}
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
+
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
+
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
+
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
+ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
+ const char* function_name);
+
+static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
+ const char *name, int exact);
+
+static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
+ __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
+static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
+
+static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name);
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
+
+static void __Pyx_RaiseBufferIndexError(int axis);
+
+#define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0)
+#define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1)
+static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb);
+
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
+
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
+
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
+
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
+
+typedef struct {
+ int code_line;
+ PyCodeObject* code_object;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+ int count;
+ int max_count;
+ __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename);
+
+typedef struct {
+ Py_ssize_t shape, strides, suboffsets;
+} __Pyx_Buf_DimInfo;
+typedef struct {
+ size_t refcount;
+ Py_buffer pybuffer;
+} __Pyx_Buffer;
+typedef struct {
+ __Pyx_Buffer *rcbuffer;
+ char *data;
+ __Pyx_Buf_DimInfo diminfo[8];
+} __Pyx_LocalBuf_ND;
+
+#if PY_MAJOR_VERSION < 3
+ static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
+ static void __Pyx_ReleaseBuffer(Py_buffer *view);
+#else
+ #define __Pyx_GetBuffer PyObject_GetBuffer
+ #define __Pyx_ReleaseBuffer PyBuffer_Release
+#endif
+
+
+static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
+static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
+
+#if CYTHON_CCOMPLEX
+ #ifdef __cplusplus
+ #define __Pyx_CREAL(z) ((z).real())
+ #define __Pyx_CIMAG(z) ((z).imag())
+ #else
+ #define __Pyx_CREAL(z) (__real__(z))
+ #define __Pyx_CIMAG(z) (__imag__(z))
+ #endif
+#else
+ #define __Pyx_CREAL(z) ((z).real)
+ #define __Pyx_CIMAG(z) ((z).imag)
+#endif
+#if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX
+ #define __Pyx_SET_CREAL(z,x) ((z).real(x))
+ #define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
+#else
+ #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
+ #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
+#endif
+
+static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
+
+#if CYTHON_CCOMPLEX
+ #define __Pyx_c_eqf(a, b) ((a)==(b))
+ #define __Pyx_c_sumf(a, b) ((a)+(b))
+ #define __Pyx_c_difff(a, b) ((a)-(b))
+ #define __Pyx_c_prodf(a, b) ((a)*(b))
+ #define __Pyx_c_quotf(a, b) ((a)/(b))
+ #define __Pyx_c_negf(a) (-(a))
+ #ifdef __cplusplus
+ #define __Pyx_c_is_zerof(z) ((z)==(float)0)
+ #define __Pyx_c_conjf(z) (::std::conj(z))
+ #if 1
+ #define __Pyx_c_absf(z) (::std::abs(z))
+ #define __Pyx_c_powf(a, b) (::std::pow(a, b))
+ #endif
+ #else
+ #define __Pyx_c_is_zerof(z) ((z)==0)
+ #define __Pyx_c_conjf(z) (conjf(z))
+ #if 1
+ #define __Pyx_c_absf(z) (cabsf(z))
+ #define __Pyx_c_powf(a, b) (cpowf(a, b))
+ #endif
+ #endif
+#else
+ static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
+ static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
+ #if 1
+ static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
+ static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
+ #endif
+#endif
+
+static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
+
+#if CYTHON_CCOMPLEX
+ #define __Pyx_c_eq(a, b) ((a)==(b))
+ #define __Pyx_c_sum(a, b) ((a)+(b))
+ #define __Pyx_c_diff(a, b) ((a)-(b))
+ #define __Pyx_c_prod(a, b) ((a)*(b))
+ #define __Pyx_c_quot(a, b) ((a)/(b))
+ #define __Pyx_c_neg(a) (-(a))
+ #ifdef __cplusplus
+ #define __Pyx_c_is_zero(z) ((z)==(double)0)
+ #define __Pyx_c_conj(z) (::std::conj(z))
+ #if 1
+ #define __Pyx_c_abs(z) (::std::abs(z))
+ #define __Pyx_c_pow(a, b) (::std::pow(a, b))
+ #endif
+ #else
+ #define __Pyx_c_is_zero(z) ((z)==0)
+ #define __Pyx_c_conj(z) (conj(z))
+ #if 1
+ #define __Pyx_c_abs(z) (cabs(z))
+ #define __Pyx_c_pow(a, b) (cpow(a, b))
+ #endif
+ #endif
+#else
+ static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
+ static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
+ #if 1
+ static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
+ static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
+ #endif
+#endif
+
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+static int __Pyx_check_binary_version(void);
+
+#if !defined(__Pyx_PyIdentifier_FromString)
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
+#else
+ #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
+#endif
+#endif
+
+static PyObject *__Pyx_ImportModule(const char *name);
+
+static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
+
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
+
+
+/* Module declarations from 'cython' */
+
+/* Module declarations from 'cpython.buffer' */
+
+/* Module declarations from 'cpython.ref' */
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.type' */
+static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
+
+/* Module declarations from 'libc.stdlib' */
+
+/* Module declarations from 'numpy' */
+
+/* Module declarations from 'numpy' */
+static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
+static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
+static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
+static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
+static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
+static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
+
+/* Module declarations from 'GPy.kern._src.coregionalize_cython' */
+static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
+static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t = { "int64_t", NULL, sizeof(__pyx_t_5numpy_int64_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int64_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int64_t), 0 };
+static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_double_t = { "double_t", NULL, sizeof(__pyx_t_5numpy_double_t), { 0 }, 0, 'R', 0, 0 };
+#define __Pyx_MODULE_NAME "GPy.kern._src.coregionalize_cython"
+int __pyx_module_is_main_GPy__kern___src__coregionalize_cython = 0;
+
+/* Implementation of 'GPy.kern._src.coregionalize_cython' */
+static PyObject *__pyx_builtin_range;
+static PyObject *__pyx_builtin_ValueError;
+static PyObject *__pyx_builtin_RuntimeError;
+static PyObject *__pyx_pf_3GPy_4kern_4_src_20coregionalize_cython_K_symmetric(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_B, PyArrayObject *__pyx_v_X); /* proto */
+static PyObject *__pyx_pf_3GPy_4kern_4_src_20coregionalize_cython_2K_asymmetric(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_B, PyArrayObject *__pyx_v_X, PyArrayObject *__pyx_v_X2); /* proto */
+static PyObject *__pyx_pf_3GPy_4kern_4_src_20coregionalize_cython_4gradient_reduce(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_D, PyArrayObject *__pyx_v_dL_dK, PyArrayObject *__pyx_v_index, PyArrayObject *__pyx_v_index2); /* proto */
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
+static char __pyx_k_B[] = "B";
+static char __pyx_k_D[] = "D";
+static char __pyx_k_H[] = "H";
+static char __pyx_k_I[] = "I";
+static char __pyx_k_K[] = "K";
+static char __pyx_k_L[] = "L";
+static char __pyx_k_M[] = "M";
+static char __pyx_k_N[] = "N";
+static char __pyx_k_O[] = "O";
+static char __pyx_k_Q[] = "Q";
+static char __pyx_k_X[] = "X";
+static char __pyx_k_b[] = "b";
+static char __pyx_k_d[] = "d";
+static char __pyx_k_f[] = "f";
+static char __pyx_k_g[] = "g";
+static char __pyx_k_h[] = "h";
+static char __pyx_k_i[] = "i";
+static char __pyx_k_j[] = "j";
+static char __pyx_k_l[] = "l";
+static char __pyx_k_m[] = "m";
+static char __pyx_k_n[] = "n";
+static char __pyx_k_q[] = "q";
+static char __pyx_k_X2[] = "X2";
+static char __pyx_k_Zd[] = "Zd";
+static char __pyx_k_Zf[] = "Zf";
+static char __pyx_k_Zg[] = "Zg";
+static char __pyx_k_np[] = "np";
+static char __pyx_k_main[] = "__main__";
+static char __pyx_k_size[] = "size";
+static char __pyx_k_test[] = "__test__";
+static char __pyx_k_dL_dK[] = "dL_dK";
+static char __pyx_k_empty[] = "empty";
+static char __pyx_k_index[] = "index";
+static char __pyx_k_numpy[] = "numpy";
+static char __pyx_k_range[] = "range";
+static char __pyx_k_zeros[] = "zeros";
+static char __pyx_k_import[] = "__import__";
+static char __pyx_k_index2[] = "index2";
+static char __pyx_k_ValueError[] = "ValueError";
+static char __pyx_k_K_symmetric[] = "K_symmetric";
+static char __pyx_k_dL_dK_small[] = "dL_dK_small";
+static char __pyx_k_K_asymmetric[] = "K_asymmetric";
+static char __pyx_k_RuntimeError[] = "RuntimeError";
+static char __pyx_k_gradient_reduce[] = "gradient_reduce";
+static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
+static char __pyx_k_home_james_work_GPy_GPy_kern__s[] = "/home/james/work/GPy/GPy/kern/_src/coregionalize_cython.pyx";
+static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
+static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
+static char __pyx_k_GPy_kern__src_coregionalize_cyth[] = "GPy.kern._src.coregionalize_cython";
+static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
+static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
+static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
+static PyObject *__pyx_n_s_B;
+static PyObject *__pyx_n_s_D;
+static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
+static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
+static PyObject *__pyx_n_s_GPy_kern__src_coregionalize_cyth;
+static PyObject *__pyx_n_s_K;
+static PyObject *__pyx_n_s_K_asymmetric;
+static PyObject *__pyx_n_s_K_symmetric;
+static PyObject *__pyx_n_s_M;
+static PyObject *__pyx_n_s_N;
+static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
+static PyObject *__pyx_n_s_RuntimeError;
+static PyObject *__pyx_n_s_ValueError;
+static PyObject *__pyx_n_s_X;
+static PyObject *__pyx_n_s_X2;
+static PyObject *__pyx_n_s_dL_dK;
+static PyObject *__pyx_n_s_dL_dK_small;
+static PyObject *__pyx_n_s_empty;
+static PyObject *__pyx_n_s_gradient_reduce;
+static PyObject *__pyx_kp_s_home_james_work_GPy_GPy_kern__s;
+static PyObject *__pyx_n_s_i;
+static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_n_s_index;
+static PyObject *__pyx_n_s_index2;
+static PyObject *__pyx_n_s_j;
+static PyObject *__pyx_n_s_m;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_n;
+static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
+static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
+static PyObject *__pyx_n_s_np;
+static PyObject *__pyx_n_s_numpy;
+static PyObject *__pyx_n_s_range;
+static PyObject *__pyx_n_s_size;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
+static PyObject *__pyx_n_s_zeros;
+static PyObject *__pyx_tuple_;
+static PyObject *__pyx_tuple__2;
+static PyObject *__pyx_tuple__3;
+static PyObject *__pyx_tuple__4;
+static PyObject *__pyx_tuple__5;
+static PyObject *__pyx_tuple__6;
+static PyObject *__pyx_tuple__7;
+static PyObject *__pyx_tuple__9;
+static PyObject *__pyx_tuple__11;
+static PyObject *__pyx_codeobj__8;
+static PyObject *__pyx_codeobj__10;
+static PyObject *__pyx_codeobj__12;
+
+/* "GPy/kern/_src/coregionalize_cython.pyx":7
+ * cimport numpy as np
+ *
+ * def K_symmetric(np.ndarray[double, ndim=2] B, np.ndarray[np.int64_t, ndim=1] X): # <<<<<<<<<<<<<<
+ * cdef int N = X.size
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, N))
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_3GPy_4kern_4_src_20coregionalize_cython_1K_symmetric(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_3GPy_4kern_4_src_20coregionalize_cython_1K_symmetric = {"K_symmetric", (PyCFunction)__pyx_pw_3GPy_4kern_4_src_20coregionalize_cython_1K_symmetric, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_3GPy_4kern_4_src_20coregionalize_cython_1K_symmetric(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyArrayObject *__pyx_v_B = 0;
+ PyArrayObject *__pyx_v_X = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("K_symmetric (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_B,&__pyx_n_s_X,0};
+ PyObject* values[2] = {0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_B)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("K_symmetric", 1, 2, 2, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "K_symmetric") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ __pyx_v_B = ((PyArrayObject *)values[0]);
+ __pyx_v_X = ((PyArrayObject *)values[1]);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("K_symmetric", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("GPy.kern._src.coregionalize_cython.K_symmetric", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_B), __pyx_ptype_5numpy_ndarray, 1, "B", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_r = __pyx_pf_3GPy_4kern_4_src_20coregionalize_cython_K_symmetric(__pyx_self, __pyx_v_B, __pyx_v_X);
+
+ /* function exit code */
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_3GPy_4kern_4_src_20coregionalize_cython_K_symmetric(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_B, PyArrayObject *__pyx_v_X) {
+ int __pyx_v_N;
+ PyArrayObject *__pyx_v_K = 0;
+ int __pyx_v_n;
+ int __pyx_v_m;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_B;
+ __Pyx_Buffer __pyx_pybuffer_B;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_K;
+ __Pyx_Buffer __pyx_pybuffer_K;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_X;
+ __Pyx_Buffer __pyx_pybuffer_X;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyArrayObject *__pyx_t_7 = NULL;
+ int __pyx_t_8;
+ int __pyx_t_9;
+ int __pyx_t_10;
+ int __pyx_t_11;
+ int __pyx_t_12;
+ int __pyx_t_13;
+ __pyx_t_5numpy_int64_t __pyx_t_14;
+ __pyx_t_5numpy_int64_t __pyx_t_15;
+ int __pyx_t_16;
+ int __pyx_t_17;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("K_symmetric", 0);
+ __pyx_pybuffer_K.pybuffer.buf = NULL;
+ __pyx_pybuffer_K.refcount = 0;
+ __pyx_pybuffernd_K.data = NULL;
+ __pyx_pybuffernd_K.rcbuffer = &__pyx_pybuffer_K;
+ __pyx_pybuffer_B.pybuffer.buf = NULL;
+ __pyx_pybuffer_B.refcount = 0;
+ __pyx_pybuffernd_B.data = NULL;
+ __pyx_pybuffernd_B.rcbuffer = &__pyx_pybuffer_B;
+ __pyx_pybuffer_X.pybuffer.buf = NULL;
+ __pyx_pybuffer_X.refcount = 0;
+ __pyx_pybuffernd_X.data = NULL;
+ __pyx_pybuffernd_X.rcbuffer = &__pyx_pybuffer_X;
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_B.rcbuffer->pybuffer, (PyObject*)__pyx_v_B, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_pybuffernd_B.diminfo[0].strides = __pyx_pybuffernd_B.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_B.diminfo[0].shape = __pyx_pybuffernd_B.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_B.diminfo[1].strides = __pyx_pybuffernd_B.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_B.diminfo[1].shape = __pyx_pybuffernd_B.rcbuffer->pybuffer.shape[1];
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_X.rcbuffer->pybuffer, (PyObject*)__pyx_v_X, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 7; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_pybuffernd_X.diminfo[0].strides = __pyx_pybuffernd_X.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_X.diminfo[0].shape = __pyx_pybuffernd_X.rcbuffer->pybuffer.shape[0];
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":8
+ *
+ * def K_symmetric(np.ndarray[double, ndim=2] B, np.ndarray[np.int64_t, ndim=1] X):
+ * cdef int N = X.size # <<<<<<<<<<<<<<
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, N))
+ * for n in range(N):
+ */
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_X), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 8; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_N = __pyx_t_2;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":9
+ * def K_symmetric(np.ndarray[double, ndim=2] B, np.ndarray[np.int64_t, ndim=1] X):
+ * cdef int N = X.size
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, N)) # <<<<<<<<<<<<<<
+ * for n in range(N):
+ * for m in range(N):
+ */
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_3 = 0;
+ __pyx_t_5 = 0;
+ __pyx_t_5 = NULL;
+ if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ }
+ }
+ if (!__pyx_t_5) {
+ __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ } else {
+ __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL;
+ PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_6 = 0;
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = ((PyArrayObject *)__pyx_t_1);
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_K.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
+ __pyx_v_K = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_K.rcbuffer->pybuffer.buf = NULL;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 9; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ } else {__pyx_pybuffernd_K.diminfo[0].strides = __pyx_pybuffernd_K.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_K.diminfo[0].shape = __pyx_pybuffernd_K.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_K.diminfo[1].strides = __pyx_pybuffernd_K.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_K.diminfo[1].shape = __pyx_pybuffernd_K.rcbuffer->pybuffer.shape[1];
+ }
+ }
+ __pyx_t_7 = 0;
+ __pyx_v_K = ((PyArrayObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":10
+ * cdef int N = X.size
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, N))
+ * for n in range(N): # <<<<<<<<<<<<<<
+ * for m in range(N):
+ * K[n,m] = B[X[n],X[m]]
+ */
+ __pyx_t_2 = __pyx_v_N;
+ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_2; __pyx_t_8+=1) {
+ __pyx_v_n = __pyx_t_8;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":11
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, N))
+ * for n in range(N):
+ * for m in range(N): # <<<<<<<<<<<<<<
+ * K[n,m] = B[X[n],X[m]]
+ * return K
+ */
+ __pyx_t_9 = __pyx_v_N;
+ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
+ __pyx_v_m = __pyx_t_10;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":12
+ * for n in range(N):
+ * for m in range(N):
+ * K[n,m] = B[X[n],X[m]] # <<<<<<<<<<<<<<
+ * return K
+ *
+ */
+ __pyx_t_11 = __pyx_v_n;
+ __pyx_t_12 = -1;
+ if (__pyx_t_11 < 0) {
+ __pyx_t_11 += __pyx_pybuffernd_X.diminfo[0].shape;
+ if (unlikely(__pyx_t_11 < 0)) __pyx_t_12 = 0;
+ } else if (unlikely(__pyx_t_11 >= __pyx_pybuffernd_X.diminfo[0].shape)) __pyx_t_12 = 0;
+ if (unlikely(__pyx_t_12 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_12);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_12 = __pyx_v_m;
+ __pyx_t_13 = -1;
+ if (__pyx_t_12 < 0) {
+ __pyx_t_12 += __pyx_pybuffernd_X.diminfo[0].shape;
+ if (unlikely(__pyx_t_12 < 0)) __pyx_t_13 = 0;
+ } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_X.diminfo[0].shape)) __pyx_t_13 = 0;
+ if (unlikely(__pyx_t_13 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_13);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_14 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_X.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_X.diminfo[0].strides));
+ __pyx_t_15 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_X.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_X.diminfo[0].strides));
+ __pyx_t_13 = -1;
+ if (__pyx_t_14 < 0) {
+ __pyx_t_14 += __pyx_pybuffernd_B.diminfo[0].shape;
+ if (unlikely(__pyx_t_14 < 0)) __pyx_t_13 = 0;
+ } else if (unlikely(__pyx_t_14 >= __pyx_pybuffernd_B.diminfo[0].shape)) __pyx_t_13 = 0;
+ if (__pyx_t_15 < 0) {
+ __pyx_t_15 += __pyx_pybuffernd_B.diminfo[1].shape;
+ if (unlikely(__pyx_t_15 < 0)) __pyx_t_13 = 1;
+ } else if (unlikely(__pyx_t_15 >= __pyx_pybuffernd_B.diminfo[1].shape)) __pyx_t_13 = 1;
+ if (unlikely(__pyx_t_13 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_13);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_13 = __pyx_v_n;
+ __pyx_t_16 = __pyx_v_m;
+ __pyx_t_17 = -1;
+ if (__pyx_t_13 < 0) {
+ __pyx_t_13 += __pyx_pybuffernd_K.diminfo[0].shape;
+ if (unlikely(__pyx_t_13 < 0)) __pyx_t_17 = 0;
+ } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_K.diminfo[0].shape)) __pyx_t_17 = 0;
+ if (__pyx_t_16 < 0) {
+ __pyx_t_16 += __pyx_pybuffernd_K.diminfo[1].shape;
+ if (unlikely(__pyx_t_16 < 0)) __pyx_t_17 = 1;
+ } else if (unlikely(__pyx_t_16 >= __pyx_pybuffernd_K.diminfo[1].shape)) __pyx_t_17 = 1;
+ if (unlikely(__pyx_t_17 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_17);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_pybuffernd_K.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_K.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_K.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(double *, __pyx_pybuffernd_B.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_B.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_B.diminfo[1].strides));
+ }
+ }
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":13
+ * for m in range(N):
+ * K[n,m] = B[X[n],X[m]]
+ * return K # <<<<<<<<<<<<<<
+ *
+ * def K_asymmetric(np.ndarray[double, ndim=2] B, np.ndarray[np.int64_t, ndim=1] X, np.ndarray[np.int64_t, ndim=1] X2):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_K));
+ __pyx_r = ((PyObject *)__pyx_v_K);
+ goto __pyx_L0;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":7
+ * cimport numpy as np
+ *
+ * def K_symmetric(np.ndarray[double, ndim=2] B, np.ndarray[np.int64_t, ndim=1] X): # <<<<<<<<<<<<<<
+ * cdef int N = X.size
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, N))
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+ __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_B.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_K.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_X.rcbuffer->pybuffer);
+ __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+ __Pyx_AddTraceback("GPy.kern._src.coregionalize_cython.K_symmetric", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ goto __pyx_L2;
+ __pyx_L0:;
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_B.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_K.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_X.rcbuffer->pybuffer);
+ __pyx_L2:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_K);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "GPy/kern/_src/coregionalize_cython.pyx":15
+ * return K
+ *
+ * def K_asymmetric(np.ndarray[double, ndim=2] B, np.ndarray[np.int64_t, ndim=1] X, np.ndarray[np.int64_t, ndim=1] X2): # <<<<<<<<<<<<<<
+ * cdef int N = X.size
+ * cdef int M = X2.size
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_3GPy_4kern_4_src_20coregionalize_cython_3K_asymmetric(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_3GPy_4kern_4_src_20coregionalize_cython_3K_asymmetric = {"K_asymmetric", (PyCFunction)__pyx_pw_3GPy_4kern_4_src_20coregionalize_cython_3K_asymmetric, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_3GPy_4kern_4_src_20coregionalize_cython_3K_asymmetric(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyArrayObject *__pyx_v_B = 0;
+ PyArrayObject *__pyx_v_X = 0;
+ PyArrayObject *__pyx_v_X2 = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("K_asymmetric (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_B,&__pyx_n_s_X,&__pyx_n_s_X2,0};
+ PyObject* values[3] = {0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_B)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_X)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("K_asymmetric", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_X2)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("K_asymmetric", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "K_asymmetric") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ __pyx_v_B = ((PyArrayObject *)values[0]);
+ __pyx_v_X = ((PyArrayObject *)values[1]);
+ __pyx_v_X2 = ((PyArrayObject *)values[2]);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("K_asymmetric", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("GPy.kern._src.coregionalize_cython.K_asymmetric", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_B), __pyx_ptype_5numpy_ndarray, 1, "B", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X), __pyx_ptype_5numpy_ndarray, 1, "X", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_X2), __pyx_ptype_5numpy_ndarray, 1, "X2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_r = __pyx_pf_3GPy_4kern_4_src_20coregionalize_cython_2K_asymmetric(__pyx_self, __pyx_v_B, __pyx_v_X, __pyx_v_X2);
+
+ /* function exit code */
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_3GPy_4kern_4_src_20coregionalize_cython_2K_asymmetric(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_B, PyArrayObject *__pyx_v_X, PyArrayObject *__pyx_v_X2) {
+ int __pyx_v_N;
+ int __pyx_v_M;
+ PyArrayObject *__pyx_v_K = 0;
+ int __pyx_v_n;
+ int __pyx_v_m;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_B;
+ __Pyx_Buffer __pyx_pybuffer_B;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_K;
+ __Pyx_Buffer __pyx_pybuffer_K;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_X;
+ __Pyx_Buffer __pyx_pybuffer_X;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_X2;
+ __Pyx_Buffer __pyx_pybuffer_X2;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyArrayObject *__pyx_t_7 = NULL;
+ int __pyx_t_8;
+ int __pyx_t_9;
+ int __pyx_t_10;
+ int __pyx_t_11;
+ int __pyx_t_12;
+ int __pyx_t_13;
+ __pyx_t_5numpy_int64_t __pyx_t_14;
+ __pyx_t_5numpy_int64_t __pyx_t_15;
+ int __pyx_t_16;
+ int __pyx_t_17;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("K_asymmetric", 0);
+ __pyx_pybuffer_K.pybuffer.buf = NULL;
+ __pyx_pybuffer_K.refcount = 0;
+ __pyx_pybuffernd_K.data = NULL;
+ __pyx_pybuffernd_K.rcbuffer = &__pyx_pybuffer_K;
+ __pyx_pybuffer_B.pybuffer.buf = NULL;
+ __pyx_pybuffer_B.refcount = 0;
+ __pyx_pybuffernd_B.data = NULL;
+ __pyx_pybuffernd_B.rcbuffer = &__pyx_pybuffer_B;
+ __pyx_pybuffer_X.pybuffer.buf = NULL;
+ __pyx_pybuffer_X.refcount = 0;
+ __pyx_pybuffernd_X.data = NULL;
+ __pyx_pybuffernd_X.rcbuffer = &__pyx_pybuffer_X;
+ __pyx_pybuffer_X2.pybuffer.buf = NULL;
+ __pyx_pybuffer_X2.refcount = 0;
+ __pyx_pybuffernd_X2.data = NULL;
+ __pyx_pybuffernd_X2.rcbuffer = &__pyx_pybuffer_X2;
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_B.rcbuffer->pybuffer, (PyObject*)__pyx_v_B, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_pybuffernd_B.diminfo[0].strides = __pyx_pybuffernd_B.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_B.diminfo[0].shape = __pyx_pybuffernd_B.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_B.diminfo[1].strides = __pyx_pybuffernd_B.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_B.diminfo[1].shape = __pyx_pybuffernd_B.rcbuffer->pybuffer.shape[1];
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_X.rcbuffer->pybuffer, (PyObject*)__pyx_v_X, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_pybuffernd_X.diminfo[0].strides = __pyx_pybuffernd_X.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_X.diminfo[0].shape = __pyx_pybuffernd_X.rcbuffer->pybuffer.shape[0];
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_X2.rcbuffer->pybuffer, (PyObject*)__pyx_v_X2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_pybuffernd_X2.diminfo[0].strides = __pyx_pybuffernd_X2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_X2.diminfo[0].shape = __pyx_pybuffernd_X2.rcbuffer->pybuffer.shape[0];
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":16
+ *
+ * def K_asymmetric(np.ndarray[double, ndim=2] B, np.ndarray[np.int64_t, ndim=1] X, np.ndarray[np.int64_t, ndim=1] X2):
+ * cdef int N = X.size # <<<<<<<<<<<<<<
+ * cdef int M = X2.size
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, M))
+ */
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_X), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 16; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_N = __pyx_t_2;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":17
+ * def K_asymmetric(np.ndarray[double, ndim=2] B, np.ndarray[np.int64_t, ndim=1] X, np.ndarray[np.int64_t, ndim=1] X2):
+ * cdef int N = X.size
+ * cdef int M = X2.size # <<<<<<<<<<<<<<
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, M))
+ * for n in range(N):
+ */
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_X2), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 17; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_M = __pyx_t_2;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":18
+ * cdef int N = X.size
+ * cdef int M = X2.size
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, M)) # <<<<<<<<<<<<<<
+ * for n in range(N):
+ * for m in range(M):
+ */
+ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_empty); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_3 = 0;
+ __pyx_t_5 = 0;
+ __pyx_t_5 = NULL;
+ if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ }
+ }
+ if (!__pyx_t_5) {
+ __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_6); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ } else {
+ __pyx_t_3 = PyTuple_New(1+1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __pyx_t_5 = NULL;
+ PyTuple_SET_ITEM(__pyx_t_3, 0+1, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_6 = 0;
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_3, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_7 = ((PyArrayObject *)__pyx_t_1);
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_K.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
+ __pyx_v_K = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_K.rcbuffer->pybuffer.buf = NULL;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 18; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ } else {__pyx_pybuffernd_K.diminfo[0].strides = __pyx_pybuffernd_K.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_K.diminfo[0].shape = __pyx_pybuffernd_K.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_K.diminfo[1].strides = __pyx_pybuffernd_K.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_K.diminfo[1].shape = __pyx_pybuffernd_K.rcbuffer->pybuffer.shape[1];
+ }
+ }
+ __pyx_t_7 = 0;
+ __pyx_v_K = ((PyArrayObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":19
+ * cdef int M = X2.size
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, M))
+ * for n in range(N): # <<<<<<<<<<<<<<
+ * for m in range(M):
+ * K[n,m] = B[X[n],X2[m]]
+ */
+ __pyx_t_2 = __pyx_v_N;
+ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_2; __pyx_t_8+=1) {
+ __pyx_v_n = __pyx_t_8;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":20
+ * cdef np.ndarray[np.double_t, ndim=2] K = np.empty((N, M))
+ * for n in range(N):
+ * for m in range(M): # <<<<<<<<<<<<<<
+ * K[n,m] = B[X[n],X2[m]]
+ * return K
+ */
+ __pyx_t_9 = __pyx_v_M;
+ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
+ __pyx_v_m = __pyx_t_10;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":21
+ * for n in range(N):
+ * for m in range(M):
+ * K[n,m] = B[X[n],X2[m]] # <<<<<<<<<<<<<<
+ * return K
+ *
+ */
+ __pyx_t_11 = __pyx_v_n;
+ __pyx_t_12 = -1;
+ if (__pyx_t_11 < 0) {
+ __pyx_t_11 += __pyx_pybuffernd_X.diminfo[0].shape;
+ if (unlikely(__pyx_t_11 < 0)) __pyx_t_12 = 0;
+ } else if (unlikely(__pyx_t_11 >= __pyx_pybuffernd_X.diminfo[0].shape)) __pyx_t_12 = 0;
+ if (unlikely(__pyx_t_12 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_12);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_12 = __pyx_v_m;
+ __pyx_t_13 = -1;
+ if (__pyx_t_12 < 0) {
+ __pyx_t_12 += __pyx_pybuffernd_X2.diminfo[0].shape;
+ if (unlikely(__pyx_t_12 < 0)) __pyx_t_13 = 0;
+ } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_X2.diminfo[0].shape)) __pyx_t_13 = 0;
+ if (unlikely(__pyx_t_13 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_13);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_14 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_X.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_X.diminfo[0].strides));
+ __pyx_t_15 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_X2.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_X2.diminfo[0].strides));
+ __pyx_t_13 = -1;
+ if (__pyx_t_14 < 0) {
+ __pyx_t_14 += __pyx_pybuffernd_B.diminfo[0].shape;
+ if (unlikely(__pyx_t_14 < 0)) __pyx_t_13 = 0;
+ } else if (unlikely(__pyx_t_14 >= __pyx_pybuffernd_B.diminfo[0].shape)) __pyx_t_13 = 0;
+ if (__pyx_t_15 < 0) {
+ __pyx_t_15 += __pyx_pybuffernd_B.diminfo[1].shape;
+ if (unlikely(__pyx_t_15 < 0)) __pyx_t_13 = 1;
+ } else if (unlikely(__pyx_t_15 >= __pyx_pybuffernd_B.diminfo[1].shape)) __pyx_t_13 = 1;
+ if (unlikely(__pyx_t_13 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_13);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_13 = __pyx_v_n;
+ __pyx_t_16 = __pyx_v_m;
+ __pyx_t_17 = -1;
+ if (__pyx_t_13 < 0) {
+ __pyx_t_13 += __pyx_pybuffernd_K.diminfo[0].shape;
+ if (unlikely(__pyx_t_13 < 0)) __pyx_t_17 = 0;
+ } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_K.diminfo[0].shape)) __pyx_t_17 = 0;
+ if (__pyx_t_16 < 0) {
+ __pyx_t_16 += __pyx_pybuffernd_K.diminfo[1].shape;
+ if (unlikely(__pyx_t_16 < 0)) __pyx_t_17 = 1;
+ } else if (unlikely(__pyx_t_16 >= __pyx_pybuffernd_K.diminfo[1].shape)) __pyx_t_17 = 1;
+ if (unlikely(__pyx_t_17 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_17);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_pybuffernd_K.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_K.diminfo[0].strides, __pyx_t_16, __pyx_pybuffernd_K.diminfo[1].strides) = (*__Pyx_BufPtrStrided2d(double *, __pyx_pybuffernd_B.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_B.diminfo[0].strides, __pyx_t_15, __pyx_pybuffernd_B.diminfo[1].strides));
+ }
+ }
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":22
+ * for m in range(M):
+ * K[n,m] = B[X[n],X2[m]]
+ * return K # <<<<<<<<<<<<<<
+ *
+ * def gradient_reduce(int D, np.ndarray[double, ndim=2] dL_dK, np.ndarray[np.int64_t, ndim=1] index, np.ndarray[np.int64_t, ndim=1] index2):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_K));
+ __pyx_r = ((PyObject *)__pyx_v_K);
+ goto __pyx_L0;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":15
+ * return K
+ *
+ * def K_asymmetric(np.ndarray[double, ndim=2] B, np.ndarray[np.int64_t, ndim=1] X, np.ndarray[np.int64_t, ndim=1] X2): # <<<<<<<<<<<<<<
+ * cdef int N = X.size
+ * cdef int M = X2.size
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+ __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_B.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_K.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_X.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_X2.rcbuffer->pybuffer);
+ __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+ __Pyx_AddTraceback("GPy.kern._src.coregionalize_cython.K_asymmetric", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ goto __pyx_L2;
+ __pyx_L0:;
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_B.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_K.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_X.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_X2.rcbuffer->pybuffer);
+ __pyx_L2:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_K);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "GPy/kern/_src/coregionalize_cython.pyx":24
+ * return K
+ *
+ * def gradient_reduce(int D, np.ndarray[double, ndim=2] dL_dK, np.ndarray[np.int64_t, ndim=1] index, np.ndarray[np.int64_t, ndim=1] index2): # <<<<<<<<<<<<<<
+ * cdef np.ndarray[np.double_t, ndim=2] dL_dK_small = np.zeros((D, D))
+ * cdef int N = index.size
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_3GPy_4kern_4_src_20coregionalize_cython_5gradient_reduce(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_3GPy_4kern_4_src_20coregionalize_cython_5gradient_reduce = {"gradient_reduce", (PyCFunction)__pyx_pw_3GPy_4kern_4_src_20coregionalize_cython_5gradient_reduce, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_3GPy_4kern_4_src_20coregionalize_cython_5gradient_reduce(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ int __pyx_v_D;
+ PyArrayObject *__pyx_v_dL_dK = 0;
+ PyArrayObject *__pyx_v_index = 0;
+ PyArrayObject *__pyx_v_index2 = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("gradient_reduce (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_D,&__pyx_n_s_dL_dK,&__pyx_n_s_index,&__pyx_n_s_index2,0};
+ PyObject* values[4] = {0,0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_D)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ case 1:
+ if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dL_dK)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("gradient_reduce", 1, 4, 4, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 2:
+ if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_index)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("gradient_reduce", 1, 4, 4, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ case 3:
+ if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_index2)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("gradient_reduce", 1, 4, 4, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gradient_reduce") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ }
+ __pyx_v_D = __Pyx_PyInt_As_int(values[0]); if (unlikely((__pyx_v_D == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_v_dL_dK = ((PyArrayObject *)values[1]);
+ __pyx_v_index = ((PyArrayObject *)values[2]);
+ __pyx_v_index2 = ((PyArrayObject *)values[3]);
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("gradient_reduce", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("GPy.kern._src.coregionalize_cython.gradient_reduce", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dL_dK), __pyx_ptype_5numpy_ndarray, 1, "dL_dK", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_index), __pyx_ptype_5numpy_ndarray, 1, "index", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_index2), __pyx_ptype_5numpy_ndarray, 1, "index2", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_r = __pyx_pf_3GPy_4kern_4_src_20coregionalize_cython_4gradient_reduce(__pyx_self, __pyx_v_D, __pyx_v_dL_dK, __pyx_v_index, __pyx_v_index2);
+
+ /* function exit code */
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_3GPy_4kern_4_src_20coregionalize_cython_4gradient_reduce(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_D, PyArrayObject *__pyx_v_dL_dK, PyArrayObject *__pyx_v_index, PyArrayObject *__pyx_v_index2) {
+ PyArrayObject *__pyx_v_dL_dK_small = 0;
+ int __pyx_v_N;
+ int __pyx_v_M;
+ int __pyx_v_i;
+ int __pyx_v_j;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_dL_dK;
+ __Pyx_Buffer __pyx_pybuffer_dL_dK;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_dL_dK_small;
+ __Pyx_Buffer __pyx_pybuffer_dL_dK_small;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_index;
+ __Pyx_Buffer __pyx_pybuffer_index;
+ __Pyx_LocalBuf_ND __pyx_pybuffernd_index2;
+ __Pyx_Buffer __pyx_pybuffer_index2;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyArrayObject *__pyx_t_6 = NULL;
+ int __pyx_t_7;
+ int __pyx_t_8;
+ int __pyx_t_9;
+ int __pyx_t_10;
+ int __pyx_t_11;
+ int __pyx_t_12;
+ int __pyx_t_13;
+ int __pyx_t_14;
+ int __pyx_t_15;
+ __pyx_t_5numpy_int64_t __pyx_t_16;
+ __pyx_t_5numpy_int64_t __pyx_t_17;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("gradient_reduce", 0);
+ __pyx_pybuffer_dL_dK_small.pybuffer.buf = NULL;
+ __pyx_pybuffer_dL_dK_small.refcount = 0;
+ __pyx_pybuffernd_dL_dK_small.data = NULL;
+ __pyx_pybuffernd_dL_dK_small.rcbuffer = &__pyx_pybuffer_dL_dK_small;
+ __pyx_pybuffer_dL_dK.pybuffer.buf = NULL;
+ __pyx_pybuffer_dL_dK.refcount = 0;
+ __pyx_pybuffernd_dL_dK.data = NULL;
+ __pyx_pybuffernd_dL_dK.rcbuffer = &__pyx_pybuffer_dL_dK;
+ __pyx_pybuffer_index.pybuffer.buf = NULL;
+ __pyx_pybuffer_index.refcount = 0;
+ __pyx_pybuffernd_index.data = NULL;
+ __pyx_pybuffernd_index.rcbuffer = &__pyx_pybuffer_index;
+ __pyx_pybuffer_index2.pybuffer.buf = NULL;
+ __pyx_pybuffer_index2.refcount = 0;
+ __pyx_pybuffernd_index2.data = NULL;
+ __pyx_pybuffernd_index2.rcbuffer = &__pyx_pybuffer_index2;
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dL_dK.rcbuffer->pybuffer, (PyObject*)__pyx_v_dL_dK, &__Pyx_TypeInfo_double, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_pybuffernd_dL_dK.diminfo[0].strides = __pyx_pybuffernd_dL_dK.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dL_dK.diminfo[0].shape = __pyx_pybuffernd_dL_dK.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dL_dK.diminfo[1].strides = __pyx_pybuffernd_dL_dK.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dL_dK.diminfo[1].shape = __pyx_pybuffernd_dL_dK.rcbuffer->pybuffer.shape[1];
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_index.rcbuffer->pybuffer, (PyObject*)__pyx_v_index, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_pybuffernd_index.diminfo[0].strides = __pyx_pybuffernd_index.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_index.diminfo[0].shape = __pyx_pybuffernd_index.rcbuffer->pybuffer.shape[0];
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_index2.rcbuffer->pybuffer, (PyObject*)__pyx_v_index2, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int64_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_pybuffernd_index2.diminfo[0].strides = __pyx_pybuffernd_index2.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_index2.diminfo[0].shape = __pyx_pybuffernd_index2.rcbuffer->pybuffer.shape[0];
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":25
+ *
+ * def gradient_reduce(int D, np.ndarray[double, ndim=2] dL_dK, np.ndarray[np.int64_t, ndim=1] index, np.ndarray[np.int64_t, ndim=1] index2):
+ * cdef np.ndarray[np.double_t, ndim=2] dL_dK_small = np.zeros((D, D)) # <<<<<<<<<<<<<<
+ * cdef int N = index.size
+ * cdef int M = index2.size
+ */
+ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_D); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_D); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_2 = 0;
+ __pyx_t_4 = 0;
+ __pyx_t_4 = NULL;
+ if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_3, function);
+ }
+ }
+ if (!__pyx_t_4) {
+ __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ } else {
+ __pyx_t_2 = PyTuple_New(1+1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = NULL;
+ PyTuple_SET_ITEM(__pyx_t_2, 0+1, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_t_6 = ((PyArrayObject *)__pyx_t_1);
+ {
+ __Pyx_BufFmt_StackElem __pyx_stack[1];
+ if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dL_dK_small.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_double_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 2, 0, __pyx_stack) == -1)) {
+ __pyx_v_dL_dK_small = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_dL_dK_small.rcbuffer->pybuffer.buf = NULL;
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ } else {__pyx_pybuffernd_dL_dK_small.diminfo[0].strides = __pyx_pybuffernd_dL_dK_small.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dL_dK_small.diminfo[0].shape = __pyx_pybuffernd_dL_dK_small.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dL_dK_small.diminfo[1].strides = __pyx_pybuffernd_dL_dK_small.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dL_dK_small.diminfo[1].shape = __pyx_pybuffernd_dL_dK_small.rcbuffer->pybuffer.shape[1];
+ }
+ }
+ __pyx_t_6 = 0;
+ __pyx_v_dL_dK_small = ((PyArrayObject *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":26
+ * def gradient_reduce(int D, np.ndarray[double, ndim=2] dL_dK, np.ndarray[np.int64_t, ndim=1] index, np.ndarray[np.int64_t, ndim=1] index2):
+ * cdef np.ndarray[np.double_t, ndim=2] dL_dK_small = np.zeros((D, D))
+ * cdef int N = index.size # <<<<<<<<<<<<<<
+ * cdef int M = index2.size
+ * for i in range(N):
+ */
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_index), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_N = __pyx_t_7;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":27
+ * cdef np.ndarray[np.double_t, ndim=2] dL_dK_small = np.zeros((D, D))
+ * cdef int N = index.size
+ * cdef int M = index2.size # <<<<<<<<<<<<<<
+ * for i in range(N):
+ * for j in range(M):
+ */
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_index2), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_7 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_7 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_M = __pyx_t_7;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":28
+ * cdef int N = index.size
+ * cdef int M = index2.size
+ * for i in range(N): # <<<<<<<<<<<<<<
+ * for j in range(M):
+ * dL_dK_small[index2[j],index[i]] += dL_dK[i,j];
+ */
+ __pyx_t_7 = __pyx_v_N;
+ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
+ __pyx_v_i = __pyx_t_8;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":29
+ * cdef int M = index2.size
+ * for i in range(N):
+ * for j in range(M): # <<<<<<<<<<<<<<
+ * dL_dK_small[index2[j],index[i]] += dL_dK[i,j];
+ * return dL_dK_small
+ */
+ __pyx_t_9 = __pyx_v_M;
+ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
+ __pyx_v_j = __pyx_t_10;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":30
+ * for i in range(N):
+ * for j in range(M):
+ * dL_dK_small[index2[j],index[i]] += dL_dK[i,j]; # <<<<<<<<<<<<<<
+ * return dL_dK_small
+ *
+ */
+ __pyx_t_11 = __pyx_v_i;
+ __pyx_t_12 = __pyx_v_j;
+ __pyx_t_13 = -1;
+ if (__pyx_t_11 < 0) {
+ __pyx_t_11 += __pyx_pybuffernd_dL_dK.diminfo[0].shape;
+ if (unlikely(__pyx_t_11 < 0)) __pyx_t_13 = 0;
+ } else if (unlikely(__pyx_t_11 >= __pyx_pybuffernd_dL_dK.diminfo[0].shape)) __pyx_t_13 = 0;
+ if (__pyx_t_12 < 0) {
+ __pyx_t_12 += __pyx_pybuffernd_dL_dK.diminfo[1].shape;
+ if (unlikely(__pyx_t_12 < 0)) __pyx_t_13 = 1;
+ } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_dL_dK.diminfo[1].shape)) __pyx_t_13 = 1;
+ if (unlikely(__pyx_t_13 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_13);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_13 = __pyx_v_j;
+ __pyx_t_14 = -1;
+ if (__pyx_t_13 < 0) {
+ __pyx_t_13 += __pyx_pybuffernd_index2.diminfo[0].shape;
+ if (unlikely(__pyx_t_13 < 0)) __pyx_t_14 = 0;
+ } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_index2.diminfo[0].shape)) __pyx_t_14 = 0;
+ if (unlikely(__pyx_t_14 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_14);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_14 = __pyx_v_i;
+ __pyx_t_15 = -1;
+ if (__pyx_t_14 < 0) {
+ __pyx_t_14 += __pyx_pybuffernd_index.diminfo[0].shape;
+ if (unlikely(__pyx_t_14 < 0)) __pyx_t_15 = 0;
+ } else if (unlikely(__pyx_t_14 >= __pyx_pybuffernd_index.diminfo[0].shape)) __pyx_t_15 = 0;
+ if (unlikely(__pyx_t_15 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_15);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_16 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_index2.rcbuffer->pybuffer.buf, __pyx_t_13, __pyx_pybuffernd_index2.diminfo[0].strides));
+ __pyx_t_17 = (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int64_t *, __pyx_pybuffernd_index.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_index.diminfo[0].strides));
+ __pyx_t_15 = -1;
+ if (__pyx_t_16 < 0) {
+ __pyx_t_16 += __pyx_pybuffernd_dL_dK_small.diminfo[0].shape;
+ if (unlikely(__pyx_t_16 < 0)) __pyx_t_15 = 0;
+ } else if (unlikely(__pyx_t_16 >= __pyx_pybuffernd_dL_dK_small.diminfo[0].shape)) __pyx_t_15 = 0;
+ if (__pyx_t_17 < 0) {
+ __pyx_t_17 += __pyx_pybuffernd_dL_dK_small.diminfo[1].shape;
+ if (unlikely(__pyx_t_17 < 0)) __pyx_t_15 = 1;
+ } else if (unlikely(__pyx_t_17 >= __pyx_pybuffernd_dL_dK_small.diminfo[1].shape)) __pyx_t_15 = 1;
+ if (unlikely(__pyx_t_15 != -1)) {
+ __Pyx_RaiseBufferIndexError(__pyx_t_15);
+ {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ *__Pyx_BufPtrStrided2d(__pyx_t_5numpy_double_t *, __pyx_pybuffernd_dL_dK_small.rcbuffer->pybuffer.buf, __pyx_t_16, __pyx_pybuffernd_dL_dK_small.diminfo[0].strides, __pyx_t_17, __pyx_pybuffernd_dL_dK_small.diminfo[1].strides) += (*__Pyx_BufPtrStrided2d(double *, __pyx_pybuffernd_dL_dK.rcbuffer->pybuffer.buf, __pyx_t_11, __pyx_pybuffernd_dL_dK.diminfo[0].strides, __pyx_t_12, __pyx_pybuffernd_dL_dK.diminfo[1].strides));
+ }
+ }
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":31
+ * for j in range(M):
+ * dL_dK_small[index2[j],index[i]] += dL_dK[i,j];
+ * return dL_dK_small # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_dL_dK_small));
+ __pyx_r = ((PyObject *)__pyx_v_dL_dK_small);
+ goto __pyx_L0;
+
+ /* "GPy/kern/_src/coregionalize_cython.pyx":24
+ * return K
+ *
+ * def gradient_reduce(int D, np.ndarray[double, ndim=2] dL_dK, np.ndarray[np.int64_t, ndim=1] index, np.ndarray[np.int64_t, ndim=1] index2): # <<<<<<<<<<<<<<
+ * cdef np.ndarray[np.double_t, ndim=2] dL_dK_small = np.zeros((D, D))
+ * cdef int N = index.size
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ { PyObject *__pyx_type, *__pyx_value, *__pyx_tb;
+ __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dL_dK.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dL_dK_small.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_index.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_index2.rcbuffer->pybuffer);
+ __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}
+ __Pyx_AddTraceback("GPy.kern._src.coregionalize_cython.gradient_reduce", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ goto __pyx_L2;
+ __pyx_L0:;
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dL_dK.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dL_dK_small.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_index.rcbuffer->pybuffer);
+ __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_index2.rcbuffer->pybuffer);
+ __pyx_L2:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_dL_dK_small);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+ * # experimental exception made for __getbuffer__ and __releasebuffer__
+ * # -- the details of this may change.
+ * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
+ * # This implementation of getbuffer is geared towards Cython
+ * # requirements, and does not yet fullfill the PEP.
+ */
+
+/* Python wrapper */
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
+static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
+ int __pyx_v_copy_shape;
+ int __pyx_v_i;
+ int __pyx_v_ndim;
+ int __pyx_v_endian_detector;
+ int __pyx_v_little_endian;
+ int __pyx_v_t;
+ char *__pyx_v_f;
+ PyArray_Descr *__pyx_v_descr = 0;
+ int __pyx_v_offset;
+ int __pyx_v_hasfields;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ char *__pyx_t_7;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__getbuffer__", 0);
+ if (__pyx_v_info != NULL) {
+ __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(__pyx_v_info->obj);
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":200
+ * # of flags
+ *
+ * if info == NULL: return # <<<<<<<<<<<<<<
+ *
+ * cdef int copy_shape, i, ndim
+ */
+ __pyx_t_1 = ((__pyx_v_info == NULL) != 0);
+ if (__pyx_t_1) {
+ __pyx_r = 0;
+ goto __pyx_L0;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203
+ *
+ * cdef int copy_shape, i, ndim
+ * cdef int endian_detector = 1 # <<<<<<<<<<<<<<
+ * cdef bint little_endian = ((&endian_detector)[0] != 0)
+ *
+ */
+ __pyx_v_endian_detector = 1;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":204
+ * cdef int copy_shape, i, ndim
+ * cdef int endian_detector = 1
+ * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
+ *
+ * ndim = PyArray_NDIM(self)
+ */
+ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206
+ * cdef bint little_endian = ((&endian_detector)[0] != 0)
+ *
+ * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
+ *
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
+ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":208
+ * ndim = PyArray_NDIM(self)
+ *
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
+ * copy_shape = 1
+ * else:
+ */
+ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209
+ *
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ * copy_shape = 1 # <<<<<<<<<<<<<<
+ * else:
+ * copy_shape = 0
+ */
+ __pyx_v_copy_shape = 1;
+ goto __pyx_L4;
+ }
+ /*else*/ {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211
+ * copy_shape = 1
+ * else:
+ * copy_shape = 0 # <<<<<<<<<<<<<<
+ *
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ */
+ __pyx_v_copy_shape = 0;
+ }
+ __pyx_L4:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":213
+ * copy_shape = 0
+ *
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
+ * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not C contiguous")
+ */
+ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
+ if (__pyx_t_2) {
+ goto __pyx_L7_next_and;
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L6_bool_binop_done;
+ }
+ __pyx_L7_next_and:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214
+ *
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
+ * raise ValueError(u"ndarray is not C contiguous")
+ *
+ */
+ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
+ __pyx_t_1 = __pyx_t_2;
+ __pyx_L6_bool_binop_done:;
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":215
+ * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
+ * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
+ *
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217
+ * raise ValueError(u"ndarray is not C contiguous")
+ *
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
+ * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not Fortran contiguous")
+ */
+ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
+ if (__pyx_t_2) {
+ goto __pyx_L10_next_and;
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L9_bool_binop_done;
+ }
+ __pyx_L10_next_and:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218
+ *
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
+ * raise ValueError(u"ndarray is not Fortran contiguous")
+ *
+ */
+ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
+ __pyx_t_1 = __pyx_t_2;
+ __pyx_L9_bool_binop_done:;
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":219
+ * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
+ * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
+ * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
+ *
+ * info.buf = PyArray_DATA(self)
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221
+ * raise ValueError(u"ndarray is not Fortran contiguous")
+ *
+ * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
+ * info.ndim = ndim
+ * if copy_shape:
+ */
+ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222
+ *
+ * info.buf = PyArray_DATA(self)
+ * info.ndim = ndim # <<<<<<<<<<<<<<
+ * if copy_shape:
+ * # Allocate new buffer for strides and shape info.
+ */
+ __pyx_v_info->ndim = __pyx_v_ndim;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":223
+ * info.buf = PyArray_DATA(self)
+ * info.ndim = ndim
+ * if copy_shape: # <<<<<<<<<<<<<<
+ * # Allocate new buffer for strides and shape info.
+ * # This is allocated as one block, strides first.
+ */
+ __pyx_t_1 = (__pyx_v_copy_shape != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226
+ * # Allocate new buffer for strides and shape info.
+ * # This is allocated as one block, strides first.
+ * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<<
+ * info.shape = info.strides + ndim
+ * for i in range(ndim):
+ */
+ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":227
+ * # This is allocated as one block, strides first.
+ * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2)
+ * info.shape = info.strides + ndim # <<<<<<<<<<<<<<
+ * for i in range(ndim):
+ * info.strides[i] = PyArray_STRIDES(self)[i]
+ */
+ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":228
+ * info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2)
+ * info.shape = info.strides + ndim
+ * for i in range(ndim): # <<<<<<<<<<<<<<
+ * info.strides[i] = PyArray_STRIDES(self)[i]
+ * info.shape[i] = PyArray_DIMS(self)[i]
+ */
+ __pyx_t_4 = __pyx_v_ndim;
+ for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
+ __pyx_v_i = __pyx_t_5;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229
+ * info.shape = info.strides + ndim
+ * for i in range(ndim):
+ * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
+ * info.shape[i] = PyArray_DIMS(self)[i]
+ * else:
+ */
+ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230
+ * for i in range(ndim):
+ * info.strides[i] = PyArray_STRIDES(self)[i]
+ * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
+ * else:
+ * info.strides = PyArray_STRIDES(self)
+ */
+ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
+ }
+ goto __pyx_L11;
+ }
+ /*else*/ {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232
+ * info.shape[i] = PyArray_DIMS(self)[i]
+ * else:
+ * info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<<
+ * info.shape = PyArray_DIMS(self)
+ * info.suboffsets = NULL
+ */
+ __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233
+ * else:
+ * info.strides = PyArray_STRIDES(self)
+ * info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<<
+ * info.suboffsets = NULL
+ * info.itemsize = PyArray_ITEMSIZE(self)
+ */
+ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
+ }
+ __pyx_L11:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":234
+ * info.strides = PyArray_STRIDES(self)
+ * info.shape = PyArray_DIMS(self)
+ * info.suboffsets = NULL # <<<<<<<<<<<<<<
+ * info.itemsize = PyArray_ITEMSIZE(self)
+ * info.readonly = not PyArray_ISWRITEABLE(self)
+ */
+ __pyx_v_info->suboffsets = NULL;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235
+ * info.shape = PyArray_DIMS(self)
+ * info.suboffsets = NULL
+ * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
+ * info.readonly = not PyArray_ISWRITEABLE(self)
+ *
+ */
+ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236
+ * info.suboffsets = NULL
+ * info.itemsize = PyArray_ITEMSIZE(self)
+ * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
+ *
+ * cdef int t
+ */
+ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239
+ *
+ * cdef int t
+ * cdef char* f = NULL # <<<<<<<<<<<<<<
+ * cdef dtype descr = self.descr
+ * cdef list stack
+ */
+ __pyx_v_f = NULL;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":240
+ * cdef int t
+ * cdef char* f = NULL
+ * cdef dtype descr = self.descr # <<<<<<<<<<<<<<
+ * cdef list stack
+ * cdef int offset
+ */
+ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
+ __Pyx_INCREF(__pyx_t_3);
+ __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":244
+ * cdef int offset
+ *
+ * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
+ *
+ * if not hasfields and not copy_shape:
+ */
+ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246
+ * cdef bint hasfields = PyDataType_HASFIELDS(descr)
+ *
+ * if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
+ * # do not call releasebuffer
+ * info.obj = None
+ */
+ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
+ if (__pyx_t_2) {
+ goto __pyx_L16_next_and;
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L15_bool_binop_done;
+ }
+ __pyx_L16_next_and:;
+ __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
+ __pyx_t_1 = __pyx_t_2;
+ __pyx_L15_bool_binop_done:;
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248
+ * if not hasfields and not copy_shape:
+ * # do not call releasebuffer
+ * info.obj = None # <<<<<<<<<<<<<<
+ * else:
+ * # need to call releasebuffer
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_info->obj);
+ __Pyx_DECREF(__pyx_v_info->obj);
+ __pyx_v_info->obj = Py_None;
+ goto __pyx_L14;
+ }
+ /*else*/ {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":251
+ * else:
+ * # need to call releasebuffer
+ * info.obj = self # <<<<<<<<<<<<<<
+ *
+ * if not hasfields:
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_v_self));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_self));
+ __Pyx_GOTREF(__pyx_v_info->obj);
+ __Pyx_DECREF(__pyx_v_info->obj);
+ __pyx_v_info->obj = ((PyObject *)__pyx_v_self);
+ }
+ __pyx_L14:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253
+ * info.obj = self
+ *
+ * if not hasfields: # <<<<<<<<<<<<<<
+ * t = descr.type_num
+ * if ((descr.byteorder == c'>' and little_endian) or
+ */
+ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":254
+ *
+ * if not hasfields:
+ * t = descr.type_num # <<<<<<<<<<<<<<
+ * if ((descr.byteorder == c'>' and little_endian) or
+ * (descr.byteorder == c'<' and not little_endian)):
+ */
+ __pyx_t_4 = __pyx_v_descr->type_num;
+ __pyx_v_t = __pyx_t_4;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255
+ * if not hasfields:
+ * t = descr.type_num
+ * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
+ * (descr.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ */
+ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
+ if (!__pyx_t_2) {
+ goto __pyx_L20_next_or;
+ } else {
+ goto __pyx_L21_next_and;
+ }
+ __pyx_L21_next_and:;
+ __pyx_t_2 = (__pyx_v_little_endian != 0);
+ if (!__pyx_t_2) {
+ goto __pyx_L20_next_or;
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L19_bool_binop_done;
+ }
+ __pyx_L20_next_or:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256
+ * t = descr.type_num
+ * if ((descr.byteorder == c'>' and little_endian) or
+ * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
+ * raise ValueError(u"Non-native byte order not supported")
+ * if t == NPY_BYTE: f = "b"
+ */
+ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
+ if (__pyx_t_2) {
+ goto __pyx_L22_next_and;
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L19_bool_binop_done;
+ }
+ __pyx_L22_next_and:;
+ __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
+ __pyx_t_1 = __pyx_t_2;
+ __pyx_L19_bool_binop_done:;
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257
+ * if ((descr.byteorder == c'>' and little_endian) or
+ * (descr.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
+ * if t == NPY_BYTE: f = "b"
+ * elif t == NPY_UBYTE: f = "B"
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+ * elif t == NPY_CDOUBLE: f = "Zd"
+ * elif t == NPY_CLONGDOUBLE: f = "Zg"
+ * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+ switch (__pyx_v_t) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258
+ * (descr.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
+ * elif t == NPY_UBYTE: f = "B"
+ * elif t == NPY_SHORT: f = "h"
+ */
+ case NPY_BYTE:
+ __pyx_v_f = __pyx_k_b;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259
+ * raise ValueError(u"Non-native byte order not supported")
+ * if t == NPY_BYTE: f = "b"
+ * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
+ * elif t == NPY_SHORT: f = "h"
+ * elif t == NPY_USHORT: f = "H"
+ */
+ case NPY_UBYTE:
+ __pyx_v_f = __pyx_k_B;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260
+ * if t == NPY_BYTE: f = "b"
+ * elif t == NPY_UBYTE: f = "B"
+ * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
+ * elif t == NPY_USHORT: f = "H"
+ * elif t == NPY_INT: f = "i"
+ */
+ case NPY_SHORT:
+ __pyx_v_f = __pyx_k_h;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261
+ * elif t == NPY_UBYTE: f = "B"
+ * elif t == NPY_SHORT: f = "h"
+ * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
+ * elif t == NPY_INT: f = "i"
+ * elif t == NPY_UINT: f = "I"
+ */
+ case NPY_USHORT:
+ __pyx_v_f = __pyx_k_H;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262
+ * elif t == NPY_SHORT: f = "h"
+ * elif t == NPY_USHORT: f = "H"
+ * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
+ * elif t == NPY_UINT: f = "I"
+ * elif t == NPY_LONG: f = "l"
+ */
+ case NPY_INT:
+ __pyx_v_f = __pyx_k_i;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263
+ * elif t == NPY_USHORT: f = "H"
+ * elif t == NPY_INT: f = "i"
+ * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONG: f = "l"
+ * elif t == NPY_ULONG: f = "L"
+ */
+ case NPY_UINT:
+ __pyx_v_f = __pyx_k_I;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264
+ * elif t == NPY_INT: f = "i"
+ * elif t == NPY_UINT: f = "I"
+ * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
+ * elif t == NPY_ULONG: f = "L"
+ * elif t == NPY_LONGLONG: f = "q"
+ */
+ case NPY_LONG:
+ __pyx_v_f = __pyx_k_l;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265
+ * elif t == NPY_UINT: f = "I"
+ * elif t == NPY_LONG: f = "l"
+ * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONGLONG: f = "q"
+ * elif t == NPY_ULONGLONG: f = "Q"
+ */
+ case NPY_ULONG:
+ __pyx_v_f = __pyx_k_L;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266
+ * elif t == NPY_LONG: f = "l"
+ * elif t == NPY_ULONG: f = "L"
+ * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
+ * elif t == NPY_ULONGLONG: f = "Q"
+ * elif t == NPY_FLOAT: f = "f"
+ */
+ case NPY_LONGLONG:
+ __pyx_v_f = __pyx_k_q;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267
+ * elif t == NPY_ULONG: f = "L"
+ * elif t == NPY_LONGLONG: f = "q"
+ * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
+ * elif t == NPY_FLOAT: f = "f"
+ * elif t == NPY_DOUBLE: f = "d"
+ */
+ case NPY_ULONGLONG:
+ __pyx_v_f = __pyx_k_Q;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268
+ * elif t == NPY_LONGLONG: f = "q"
+ * elif t == NPY_ULONGLONG: f = "Q"
+ * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
+ * elif t == NPY_DOUBLE: f = "d"
+ * elif t == NPY_LONGDOUBLE: f = "g"
+ */
+ case NPY_FLOAT:
+ __pyx_v_f = __pyx_k_f;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269
+ * elif t == NPY_ULONGLONG: f = "Q"
+ * elif t == NPY_FLOAT: f = "f"
+ * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONGDOUBLE: f = "g"
+ * elif t == NPY_CFLOAT: f = "Zf"
+ */
+ case NPY_DOUBLE:
+ __pyx_v_f = __pyx_k_d;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270
+ * elif t == NPY_FLOAT: f = "f"
+ * elif t == NPY_DOUBLE: f = "d"
+ * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
+ * elif t == NPY_CFLOAT: f = "Zf"
+ * elif t == NPY_CDOUBLE: f = "Zd"
+ */
+ case NPY_LONGDOUBLE:
+ __pyx_v_f = __pyx_k_g;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271
+ * elif t == NPY_DOUBLE: f = "d"
+ * elif t == NPY_LONGDOUBLE: f = "g"
+ * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
+ * elif t == NPY_CDOUBLE: f = "Zd"
+ * elif t == NPY_CLONGDOUBLE: f = "Zg"
+ */
+ case NPY_CFLOAT:
+ __pyx_v_f = __pyx_k_Zf;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272
+ * elif t == NPY_LONGDOUBLE: f = "g"
+ * elif t == NPY_CFLOAT: f = "Zf"
+ * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
+ * elif t == NPY_CLONGDOUBLE: f = "Zg"
+ * elif t == NPY_OBJECT: f = "O"
+ */
+ case NPY_CDOUBLE:
+ __pyx_v_f = __pyx_k_Zd;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273
+ * elif t == NPY_CFLOAT: f = "Zf"
+ * elif t == NPY_CDOUBLE: f = "Zd"
+ * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
+ * elif t == NPY_OBJECT: f = "O"
+ * else:
+ */
+ case NPY_CLONGDOUBLE:
+ __pyx_v_f = __pyx_k_Zg;
+ break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274
+ * elif t == NPY_CDOUBLE: f = "Zd"
+ * elif t == NPY_CLONGDOUBLE: f = "Zg"
+ * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+ case NPY_OBJECT:
+ __pyx_v_f = __pyx_k_O;
+ break;
+ default:
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276
+ * elif t == NPY_OBJECT: f = "O"
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
+ * info.format = f
+ * return
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_6);
+ __pyx_t_6 = 0;
+ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_6, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ break;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":277
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ * info.format = f # <<<<<<<<<<<<<<
+ * return
+ * else:
+ */
+ __pyx_v_info->format = __pyx_v_f;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ * info.format = f
+ * return # <<<<<<<<<<<<<<
+ * else:
+ * info.format = stdlib.malloc(_buffer_format_string_len)
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ }
+ /*else*/ {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280
+ * return
+ * else:
+ * info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
+ * info.format[0] = c'^' # Native data types, manual alignment
+ * offset = 0
+ */
+ __pyx_v_info->format = ((char *)malloc(255));
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":281
+ * else:
+ * info.format = stdlib.malloc(_buffer_format_string_len)
+ * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
+ * offset = 0
+ * f = _util_dtypestring(descr, info.format + 1,
+ */
+ (__pyx_v_info->format[0]) = '^';
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282
+ * info.format = stdlib.malloc(_buffer_format_string_len)
+ * info.format[0] = c'^' # Native data types, manual alignment
+ * offset = 0 # <<<<<<<<<<<<<<
+ * f = _util_dtypestring(descr, info.format + 1,
+ * info.format + _buffer_format_string_len,
+ */
+ __pyx_v_offset = 0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283
+ * info.format[0] = c'^' # Native data types, manual alignment
+ * offset = 0
+ * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
+ * info.format + _buffer_format_string_len,
+ * &offset)
+ */
+ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_f = __pyx_t_7;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286
+ * info.format + _buffer_format_string_len,
+ * &offset)
+ * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
+ *
+ * def __releasebuffer__(ndarray self, Py_buffer* info):
+ */
+ (__pyx_v_f[0]) = '\x00';
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":194
+ * # experimental exception made for __getbuffer__ and __releasebuffer__
+ * # -- the details of this may change.
+ * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
+ * # This implementation of getbuffer is geared towards Cython
+ * # requirements, and does not yet fullfill the PEP.
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
+ __Pyx_GOTREF(__pyx_v_info->obj);
+ __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
+ }
+ goto __pyx_L2;
+ __pyx_L0:;
+ if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
+ __Pyx_GOTREF(Py_None);
+ __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
+ }
+ __pyx_L2:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_descr);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+ * f[0] = c'\0' # Terminate format string
+ *
+ * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
+ * if PyArray_HASFIELDS(self):
+ * stdlib.free(info.format)
+ */
+
+/* Python wrapper */
+static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
+static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
+ __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("__releasebuffer__", 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289
+ *
+ * def __releasebuffer__(ndarray self, Py_buffer* info):
+ * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
+ * stdlib.free(info.format)
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ */
+ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290
+ * def __releasebuffer__(ndarray self, Py_buffer* info):
+ * if PyArray_HASFIELDS(self):
+ * stdlib.free(info.format) # <<<<<<<<<<<<<<
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ * stdlib.free(info.strides)
+ */
+ free(__pyx_v_info->format);
+ goto __pyx_L3;
+ }
+ __pyx_L3:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291
+ * if PyArray_HASFIELDS(self):
+ * stdlib.free(info.format)
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
+ * stdlib.free(info.strides)
+ * # info.shape was stored after info.strides in the same block
+ */
+ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292
+ * stdlib.free(info.format)
+ * if sizeof(npy_intp) != sizeof(Py_ssize_t):
+ * stdlib.free(info.strides) # <<<<<<<<<<<<<<
+ * # info.shape was stored after info.strides in the same block
+ *
+ */
+ free(__pyx_v_info->strides);
+ goto __pyx_L4;
+ }
+ __pyx_L4:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288
+ * f[0] = c'\0' # Terminate format string
+ *
+ * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
+ * if PyArray_HASFIELDS(self):
+ * stdlib.free(info.format)
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+ * ctypedef npy_cdouble complex_t
+ *
+ * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(1, a)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":769
+ *
+ * cdef inline object PyArray_MultiIterNew1(a):
+ * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew2(a, b):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768
+ * ctypedef npy_cdouble complex_t
+ *
+ * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(1, a)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+ * return PyArray_MultiIterNew(1, a)
+ *
+ * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(2, a, b)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":772
+ *
+ * cdef inline object PyArray_MultiIterNew2(a, b):
+ * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771
+ * return PyArray_MultiIterNew(1, a)
+ *
+ * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(2, a, b)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+ * return PyArray_MultiIterNew(2, a, b)
+ *
+ * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(3, a, b, c)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":775
+ *
+ * cdef inline object PyArray_MultiIterNew3(a, b, c):
+ * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774
+ * return PyArray_MultiIterNew(2, a, b)
+ *
+ * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(3, a, b, c)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+ * return PyArray_MultiIterNew(3, a, b, c)
+ *
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(4, a, b, c, d)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778
+ *
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<<
+ *
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777
+ * return PyArray_MultiIterNew(3, a, b, c)
+ *
+ * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(4, a, b, c, d)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+ * return PyArray_MultiIterNew(4, a, b, c, d)
+ *
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(5, a, b, c, d, e)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":781
+ *
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<<
+ *
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780
+ * return PyArray_MultiIterNew(4, a, b, c, d)
+ *
+ * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
+ * return PyArray_MultiIterNew(5, a, b, c, d, e)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+ * return PyArray_MultiIterNew(5, a, b, c, d, e)
+ *
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
+ * # Recursive utility function used in __getbuffer__ to get format
+ * # string. The new location in the format string is returned.
+ */
+
+static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
+ PyArray_Descr *__pyx_v_child = 0;
+ int __pyx_v_endian_detector;
+ int __pyx_v_little_endian;
+ PyObject *__pyx_v_fields = 0;
+ PyObject *__pyx_v_childname = NULL;
+ PyObject *__pyx_v_new_offset = NULL;
+ PyObject *__pyx_v_t = NULL;
+ char *__pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ long __pyx_t_8;
+ char *__pyx_t_9;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_util_dtypestring", 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790
+ * cdef int delta_offset
+ * cdef tuple i
+ * cdef int endian_detector = 1 # <<<<<<<<<<<<<<
+ * cdef bint little_endian = ((&endian_detector)[0] != 0)
+ * cdef tuple fields
+ */
+ __pyx_v_endian_detector = 1;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791
+ * cdef tuple i
+ * cdef int endian_detector = 1
+ * cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
+ * cdef tuple fields
+ *
+ */
+ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
+ * cdef tuple fields
+ *
+ * for childname in descr.names: # <<<<<<<<<<<<<<
+ * fields = descr.fields[childname]
+ * child, new_offset = fields
+ */
+ if (unlikely(__pyx_v_descr->names == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
+ for (;;) {
+ if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
+ #if CYTHON_COMPILING_IN_CPYTHON
+ __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #else
+ __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ #endif
+ __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795
+ *
+ * for childname in descr.names:
+ * fields = descr.fields[childname] # <<<<<<<<<<<<<<
+ * child, new_offset = fields
+ *
+ */
+ __pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
+ __Pyx_GOTREF(__pyx_t_3);
+ if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
+ __pyx_t_3 = 0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796
+ * for childname in descr.names:
+ * fields = descr.fields[childname]
+ * child, new_offset = fields # <<<<<<<<<<<<<<
+ *
+ * if (end - f) - (new_offset - offset[0]) < 15:
+ */
+ if (likely(__pyx_v_fields != Py_None)) {
+ PyObject* sequence = __pyx_v_fields;
+ #if CYTHON_COMPILING_IN_CPYTHON
+ Py_ssize_t size = Py_SIZE(sequence);
+ #else
+ Py_ssize_t size = PySequence_Size(sequence);
+ #endif
+ if (unlikely(size != 2)) {
+ if (size > 2) __Pyx_RaiseTooManyValuesError(2);
+ else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ #if CYTHON_COMPILING_IN_CPYTHON
+ __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
+ __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ #else
+ __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ #endif
+ } else {
+ __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
+ __pyx_t_3 = 0;
+ __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
+ __pyx_t_4 = 0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798
+ * child, new_offset = fields
+ *
+ * if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
+ * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ *
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
+ if (__pyx_t_6) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799
+ *
+ * if (end - f) - (new_offset - offset[0]) < 15:
+ * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
+ *
+ * if ((child.byteorder == c'>' and little_endian) or
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801
+ * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+ *
+ * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
+ * (child.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported")
+ */
+ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
+ if (!__pyx_t_7) {
+ goto __pyx_L8_next_or;
+ } else {
+ goto __pyx_L9_next_and;
+ }
+ __pyx_L9_next_and:;
+ __pyx_t_7 = (__pyx_v_little_endian != 0);
+ if (!__pyx_t_7) {
+ goto __pyx_L8_next_or;
+ } else {
+ __pyx_t_6 = __pyx_t_7;
+ goto __pyx_L7_bool_binop_done;
+ }
+ __pyx_L8_next_or:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802
+ *
+ * if ((child.byteorder == c'>' and little_endian) or
+ * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
+ * raise ValueError(u"Non-native byte order not supported")
+ * # One could encode it in the format string and have Cython
+ */
+ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
+ if (__pyx_t_7) {
+ goto __pyx_L10_next_and;
+ } else {
+ __pyx_t_6 = __pyx_t_7;
+ goto __pyx_L7_bool_binop_done;
+ }
+ __pyx_L10_next_and:;
+ __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
+ __pyx_t_6 = __pyx_t_7;
+ __pyx_L7_bool_binop_done:;
+ if (__pyx_t_6) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803
+ * if ((child.byteorder == c'>' and little_endian) or
+ * (child.byteorder == c'<' and not little_endian)):
+ * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
+ * # One could encode it in the format string and have Cython
+ * # complain instead, BUT: < and > in format strings also imply
+ */
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813
+ *
+ * # Output padding bytes
+ * while offset[0] < new_offset: # <<<<<<<<<<<<<<
+ * f[0] = 120 # "x"; pad byte
+ * f += 1
+ */
+ while (1) {
+ __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (!__pyx_t_6) break;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814
+ * # Output padding bytes
+ * while offset[0] < new_offset:
+ * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
+ * f += 1
+ * offset[0] += 1
+ */
+ (__pyx_v_f[0]) = 120;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815
+ * while offset[0] < new_offset:
+ * f[0] = 120 # "x"; pad byte
+ * f += 1 # <<<<<<<<<<<<<<
+ * offset[0] += 1
+ *
+ */
+ __pyx_v_f = (__pyx_v_f + 1);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816
+ * f[0] = 120 # "x"; pad byte
+ * f += 1
+ * offset[0] += 1 # <<<<<<<<<<<<<<
+ *
+ * offset[0] += child.itemsize
+ */
+ __pyx_t_8 = 0;
+ (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818
+ * offset[0] += 1
+ *
+ * offset[0] += child.itemsize # <<<<<<<<<<<<<<
+ *
+ * if not PyDataType_HASFIELDS(child):
+ */
+ __pyx_t_8 = 0;
+ (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820
+ * offset[0] += child.itemsize
+ *
+ * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
+ * t = child.type_num
+ * if end - f < 5:
+ */
+ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
+ if (__pyx_t_6) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821
+ *
+ * if not PyDataType_HASFIELDS(child):
+ * t = child.type_num # <<<<<<<<<<<<<<
+ * if end - f < 5:
+ * raise RuntimeError(u"Format string allocated too short.")
+ */
+ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
+ __pyx_t_4 = 0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822
+ * if not PyDataType_HASFIELDS(child):
+ * t = child.type_num
+ * if end - f < 5: # <<<<<<<<<<<<<<
+ * raise RuntimeError(u"Format string allocated too short.")
+ *
+ */
+ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
+ if (__pyx_t_6) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823
+ * t = child.type_num
+ * if end - f < 5:
+ * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
+ *
+ * # Until ticket #99 is fixed, use integers to avoid warnings
+ */
+ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826
+ *
+ * # Until ticket #99 is fixed, use integers to avoid warnings
+ * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
+ * elif t == NPY_UBYTE: f[0] = 66 #"B"
+ * elif t == NPY_SHORT: f[0] = 104 #"h"
+ */
+ __pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 98;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827
+ * # Until ticket #99 is fixed, use integers to avoid warnings
+ * if t == NPY_BYTE: f[0] = 98 #"b"
+ * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
+ * elif t == NPY_SHORT: f[0] = 104 #"h"
+ * elif t == NPY_USHORT: f[0] = 72 #"H"
+ */
+ __pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 66;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828
+ * if t == NPY_BYTE: f[0] = 98 #"b"
+ * elif t == NPY_UBYTE: f[0] = 66 #"B"
+ * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
+ * elif t == NPY_USHORT: f[0] = 72 #"H"
+ * elif t == NPY_INT: f[0] = 105 #"i"
+ */
+ __pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 104;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829
+ * elif t == NPY_UBYTE: f[0] = 66 #"B"
+ * elif t == NPY_SHORT: f[0] = 104 #"h"
+ * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
+ * elif t == NPY_INT: f[0] = 105 #"i"
+ * elif t == NPY_UINT: f[0] = 73 #"I"
+ */
+ __pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 72;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830
+ * elif t == NPY_SHORT: f[0] = 104 #"h"
+ * elif t == NPY_USHORT: f[0] = 72 #"H"
+ * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
+ * elif t == NPY_UINT: f[0] = 73 #"I"
+ * elif t == NPY_LONG: f[0] = 108 #"l"
+ */
+ __pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 105;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831
+ * elif t == NPY_USHORT: f[0] = 72 #"H"
+ * elif t == NPY_INT: f[0] = 105 #"i"
+ * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONG: f[0] = 108 #"l"
+ * elif t == NPY_ULONG: f[0] = 76 #"L"
+ */
+ __pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 73;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832
+ * elif t == NPY_INT: f[0] = 105 #"i"
+ * elif t == NPY_UINT: f[0] = 73 #"I"
+ * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
+ * elif t == NPY_ULONG: f[0] = 76 #"L"
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ */
+ __pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 108;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833
+ * elif t == NPY_UINT: f[0] = 73 #"I"
+ * elif t == NPY_LONG: f[0] = 108 #"l"
+ * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ */
+ __pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 76;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834
+ * elif t == NPY_LONG: f[0] = 108 #"l"
+ * elif t == NPY_ULONG: f[0] = 76 #"L"
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ * elif t == NPY_FLOAT: f[0] = 102 #"f"
+ */
+ __pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 113;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835
+ * elif t == NPY_ULONG: f[0] = 76 #"L"
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
+ * elif t == NPY_FLOAT: f[0] = 102 #"f"
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ */
+ __pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 81;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836
+ * elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ */
+ __pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 102;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837
+ * elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ * elif t == NPY_FLOAT: f[0] = 102 #"f"
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ */
+ __pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 100;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838
+ * elif t == NPY_FLOAT: f[0] = 102 #"f"
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ */
+ __pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 103;
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839
+ * elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ */
+ __pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 90;
+ (__pyx_v_f[1]) = 102;
+ __pyx_v_f = (__pyx_v_f + 1);
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840
+ * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
+ * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ * elif t == NPY_OBJECT: f[0] = 79 #"O"
+ */
+ __pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 90;
+ (__pyx_v_f[1]) = 100;
+ __pyx_v_f = (__pyx_v_f + 1);
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841
+ * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
+ * elif t == NPY_OBJECT: f[0] = 79 #"O"
+ * else:
+ */
+ __pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 90;
+ (__pyx_v_f[1]) = 103;
+ __pyx_v_f = (__pyx_v_f + 1);
+ goto __pyx_L15;
+ }
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842
+ * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ */
+ __pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_6) {
+ (__pyx_v_f[0]) = 79;
+ goto __pyx_L15;
+ }
+ /*else*/ {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844
+ * elif t == NPY_OBJECT: f[0] = 79 #"O"
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
+ * f += 1
+ * else:
+ */
+ __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __pyx_t_3 = 0;
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ }
+ __pyx_L15:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845
+ * else:
+ * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ * f += 1 # <<<<<<<<<<<<<<
+ * else:
+ * # Cython ignores struct boundary information ("T{...}"),
+ */
+ __pyx_v_f = (__pyx_v_f + 1);
+ goto __pyx_L13;
+ }
+ /*else*/ {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849
+ * # Cython ignores struct boundary information ("T{...}"),
+ * # so don't output it
+ * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
+ * return f
+ *
+ */
+ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
+ __pyx_v_f = __pyx_t_9;
+ }
+ __pyx_L13:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794
+ * cdef tuple fields
+ *
+ * for childname in descr.names: # <<<<<<<<<<<<<<
+ * fields = descr.fields[childname]
+ * child, new_offset = fields
+ */
+ }
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850
+ * # so don't output it
+ * f = _util_dtypestring(child, f, end, offset)
+ * return f # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = __pyx_v_f;
+ goto __pyx_L0;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783
+ * return PyArray_MultiIterNew(5, a, b, c, d, e)
+ *
+ * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
+ * # Recursive utility function used in __getbuffer__ to get format
+ * # string. The new location in the format string is returned.
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_child);
+ __Pyx_XDECREF(__pyx_v_fields);
+ __Pyx_XDECREF(__pyx_v_childname);
+ __Pyx_XDECREF(__pyx_v_new_offset);
+ __Pyx_XDECREF(__pyx_v_t);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+ *
+ *
+ * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
+ * cdef PyObject* baseptr
+ * if base is None:
+ */
+
+static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
+ PyObject *__pyx_v_baseptr;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("set_array_base", 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968
+ * cdef inline void set_array_base(ndarray arr, object base):
+ * cdef PyObject* baseptr
+ * if base is None: # <<<<<<<<<<<<<<
+ * baseptr = NULL
+ * else:
+ */
+ __pyx_t_1 = (__pyx_v_base == Py_None);
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969
+ * cdef PyObject* baseptr
+ * if base is None:
+ * baseptr = NULL # <<<<<<<<<<<<<<
+ * else:
+ * Py_INCREF(base) # important to do this before decref below!
+ */
+ __pyx_v_baseptr = NULL;
+ goto __pyx_L3;
+ }
+ /*else*/ {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971
+ * baseptr = NULL
+ * else:
+ * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
+ * baseptr = base
+ * Py_XDECREF(arr.base)
+ */
+ Py_INCREF(__pyx_v_base);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972
+ * else:
+ * Py_INCREF(base) # important to do this before decref below!
+ * baseptr = base # <<<<<<<<<<<<<<
+ * Py_XDECREF(arr.base)
+ * arr.base = baseptr
+ */
+ __pyx_v_baseptr = ((PyObject *)__pyx_v_base);
+ }
+ __pyx_L3:;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973
+ * Py_INCREF(base) # important to do this before decref below!
+ * baseptr = base
+ * Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
+ * arr.base = baseptr
+ *
+ */
+ Py_XDECREF(__pyx_v_arr->base);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974
+ * baseptr = base
+ * Py_XDECREF(arr.base)
+ * arr.base = baseptr # <<<<<<<<<<<<<<
+ *
+ * cdef inline object get_array_base(ndarray arr):
+ */
+ __pyx_v_arr->base = __pyx_v_baseptr;
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966
+ *
+ *
+ * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
+ * cdef PyObject* baseptr
+ * if base is None:
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976
+ * arr.base = baseptr
+ *
+ * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
+ * if arr.base is NULL:
+ * return None
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("get_array_base", 0);
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977
+ *
+ * cdef inline object get_array_base(ndarray arr):
+ * if arr.base is NULL: # <<<<<<<<<<<<<<
+ * return None
+ * else:
+ */
+ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
+ if (__pyx_t_1) {
+
+ /* "../../../../../anaconda/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978
+ * cdef inline object get_array_base(ndarray arr):
+ * if arr.base is NULL:
+ * return None # <<<<<<<<<<<<<<
+ * else:
+ * return