mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 19:42:39 +02:00
Merge branch 'devel' of https://github.com/SheffieldML/GPy into devel
This commit is contained in:
commit
aaa5020bf1
133 changed files with 1761 additions and 957 deletions
|
|
@ -3,23 +3,23 @@
|
|||
import warnings
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
|
||||
import core
|
||||
from core.parameterization import transformations, priors
|
||||
from . import core
|
||||
from .core.parameterization import transformations, priors
|
||||
constraints = transformations
|
||||
import models
|
||||
import mappings
|
||||
import inference
|
||||
import util
|
||||
import examples
|
||||
import likelihoods
|
||||
import testing
|
||||
from . import models
|
||||
from . import mappings
|
||||
from . import inference
|
||||
from . import util
|
||||
from . import examples
|
||||
from . import likelihoods
|
||||
from . import testing
|
||||
from numpy.testing import Tester
|
||||
import kern
|
||||
import plotting
|
||||
from . import kern
|
||||
from . import plotting
|
||||
|
||||
# Direct imports for convenience:
|
||||
from core import Model
|
||||
from core.parameterization import Param, Parameterized, ObsAr
|
||||
from .core import Model
|
||||
from .core.parameterization import Param, Parameterized, ObsAr
|
||||
|
||||
#@nottest
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from model import *
|
||||
from parameterization.parameterized import adjust_name_for_printing, Parameterizable
|
||||
from parameterization.param import Param, ParamConcatenation
|
||||
from parameterization.observable_array import ObsAr
|
||||
from .model import *
|
||||
from .parameterization.parameterized import adjust_name_for_printing, Parameterizable
|
||||
from .parameterization.param import Param, ParamConcatenation
|
||||
from .parameterization.observable_array import ObsAr
|
||||
|
||||
from gp import GP
|
||||
from svgp import SVGP
|
||||
from sparse_gp import SparseGP
|
||||
from mapping import *
|
||||
from .gp import GP
|
||||
from .svgp import SVGP
|
||||
from .sparse_gp import SparseGP
|
||||
from .mapping import *
|
||||
|
|
|
|||
|
|
@ -4,14 +4,15 @@
|
|||
import numpy as np
|
||||
import sys
|
||||
from .. import kern
|
||||
from model import Model
|
||||
from mapping import Mapping
|
||||
from parameterization import ObsAr
|
||||
from .model import Model
|
||||
from .parameterization import ObsAr
|
||||
from .mapping import Mapping
|
||||
from .. import likelihoods
|
||||
from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation
|
||||
from parameterization.variational import VariationalPosterior
|
||||
from .parameterization.variational import VariationalPosterior
|
||||
|
||||
import logging
|
||||
import warnings
|
||||
from GPy.util.normalizer import MeanNorm
|
||||
logger = logging.getLogger("GP")
|
||||
|
||||
|
|
@ -63,10 +64,14 @@ class GP(Model):
|
|||
self.Y = ObsAr(Y)
|
||||
self.Y_normalized = self.Y
|
||||
|
||||
assert Y.shape[0] == self.num_data
|
||||
if Y.shape[0] != self.num_data:
|
||||
#There can be cases where we want inputs than outputs, for example if we have multiple latent
|
||||
#function values
|
||||
warnings.warn("There are more rows in your input data X, \
|
||||
than in your output data Y, be VERY sure this is what you want")
|
||||
_, self.output_dim = self.Y.shape
|
||||
|
||||
#TODO: check the type of this is okay?
|
||||
assert ((Y_metadata is None) or isinstance(Y_metadata, dict))
|
||||
self.Y_metadata = Y_metadata
|
||||
|
||||
assert isinstance(kernel, kern.Kern)
|
||||
|
|
@ -92,7 +97,7 @@ class GP(Model):
|
|||
inference_method = exact_gaussian_inference.ExactGaussianInference()
|
||||
else:
|
||||
inference_method = expectation_propagation.EP()
|
||||
print "defaulting to ", inference_method, "for latent function inference"
|
||||
print("defaulting to ", inference_method, "for latent function inference")
|
||||
self.inference_method = inference_method
|
||||
|
||||
logger.info("adding kernel and likelihood as parameters")
|
||||
|
|
@ -296,7 +301,7 @@ class GP(Model):
|
|||
:type size: int.
|
||||
:param full_cov: whether to return the full covariance matrix, or just the diagonal.
|
||||
:type full_cov: bool.
|
||||
:returns: Ysim: set of simulations
|
||||
:returns: fsim: set of simulations
|
||||
:rtype: np.ndarray (N x samples)
|
||||
"""
|
||||
m, v = self._raw_predict(X, full_cov=full_cov)
|
||||
|
|
@ -304,11 +309,11 @@ class GP(Model):
|
|||
m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
|
||||
v = v.reshape(m.size,-1) if len(v.shape)==3 else v
|
||||
if not full_cov:
|
||||
Ysim = np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T
|
||||
fsim = np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T
|
||||
else:
|
||||
Ysim = np.random.multivariate_normal(m.flatten(), v, size).T
|
||||
fsim = np.random.multivariate_normal(m.flatten(), v, size).T
|
||||
|
||||
return Ysim
|
||||
return fsim
|
||||
|
||||
def posterior_samples(self, X, size=10, full_cov=False, Y_metadata=None):
|
||||
"""
|
||||
|
|
@ -324,16 +329,16 @@ class GP(Model):
|
|||
:type noise_model: integer.
|
||||
:returns: Ysim: set of simulations, a Numpy array (N x samples).
|
||||
"""
|
||||
Ysim = self.posterior_samples_f(X, size, full_cov=full_cov)
|
||||
Ysim = self.likelihood.samples(Ysim, Y_metadata)
|
||||
|
||||
fsim = self.posterior_samples_f(X, size, full_cov=full_cov)
|
||||
Ysim = self.likelihood.samples(fsim, Y_metadata)
|
||||
return Ysim
|
||||
|
||||
def plot_f(self, plot_limits=None, which_data_rows='all',
|
||||
which_data_ycols='all', fixed_inputs=[],
|
||||
levels=20, samples=0, fignum=None, ax=None, resolution=None,
|
||||
plot_raw=True,
|
||||
linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx'):
|
||||
linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx',
|
||||
apply_link=False):
|
||||
"""
|
||||
Plot the GP's view of the world, where the data is normalized and before applying a likelihood.
|
||||
This is a call to plot with plot_raw=True.
|
||||
|
|
@ -370,6 +375,8 @@ class GP(Model):
|
|||
:type Y_metadata: dict
|
||||
:param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx')
|
||||
:type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib.
|
||||
:param apply_link: if there is a link function of the likelihood, plot the link(f*) rather than f*
|
||||
:type apply_link: boolean
|
||||
"""
|
||||
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
|
||||
from ..plotting.matplot_dep import models_plots
|
||||
|
|
@ -382,7 +389,7 @@ class GP(Model):
|
|||
which_data_ycols, fixed_inputs,
|
||||
levels, samples, fignum, ax, resolution,
|
||||
plot_raw=plot_raw, Y_metadata=Y_metadata,
|
||||
data_symbol=data_symbol, **kw)
|
||||
data_symbol=data_symbol, apply_link=apply_link, **kw)
|
||||
|
||||
def plot(self, plot_limits=None, which_data_rows='all',
|
||||
which_data_ycols='all', fixed_inputs=[],
|
||||
|
|
@ -461,7 +468,7 @@ class GP(Model):
|
|||
try:
|
||||
super(GP, self).optimize(optimizer, start, **kwargs)
|
||||
except KeyboardInterrupt:
|
||||
print "KeyboardInterrupt caught, calling on_optimization_end() to round things up"
|
||||
print("KeyboardInterrupt caught, calling on_optimization_end() to round things up")
|
||||
self.inference_method.on_optimization_end()
|
||||
raise
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import sys
|
||||
from parameterization import Parameterized
|
||||
from .parameterization import Parameterized
|
||||
import numpy as np
|
||||
|
||||
class Mapping(Parameterized):
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
from .. import likelihoods
|
||||
from ..inference import optimization
|
||||
from ..util.misc import opt_wrapper
|
||||
from parameterization import Parameterized
|
||||
from .parameterization import Parameterized
|
||||
import multiprocessing as mp
|
||||
import numpy as np
|
||||
from numpy.linalg.linalg import LinAlgError
|
||||
|
|
@ -13,6 +13,7 @@ import itertools
|
|||
import sys
|
||||
from .verbose_optimization import VerboseOptimization
|
||||
# import numdifftools as ndt
|
||||
from functools import reduce
|
||||
|
||||
class Model(Parameterized):
|
||||
_fail_count = 0 # Count of failed optimization steps (see objective)
|
||||
|
|
@ -30,7 +31,7 @@ class Model(Parameterized):
|
|||
self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500)
|
||||
|
||||
def log_likelihood(self):
|
||||
raise NotImplementedError, "this needs to be implemented to use the model class"
|
||||
raise NotImplementedError("this needs to be implemented to use the model class")
|
||||
def _log_likelihood_gradients(self):
|
||||
return self.gradient.copy()
|
||||
|
||||
|
|
@ -82,7 +83,7 @@ class Model(Parameterized):
|
|||
pool.close() # signal that no more data coming in
|
||||
pool.join() # wait for all the tasks to complete
|
||||
except KeyboardInterrupt:
|
||||
print "Ctrl+c received, terminating and joining pool."
|
||||
print("Ctrl+c received, terminating and joining pool.")
|
||||
pool.terminate()
|
||||
pool.join()
|
||||
|
||||
|
|
@ -95,10 +96,10 @@ class Model(Parameterized):
|
|||
self.optimization_runs.append(jobs[i].get())
|
||||
|
||||
if verbose:
|
||||
print("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt))
|
||||
print(("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt)))
|
||||
except Exception as e:
|
||||
if robust:
|
||||
print("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts))
|
||||
print(("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts)))
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
|
@ -119,7 +120,7 @@ class Model(Parameterized):
|
|||
|
||||
DEPRECATED.
|
||||
"""
|
||||
raise DeprecationWarning, 'parameters now have default constraints'
|
||||
raise DeprecationWarning('parameters now have default constraints')
|
||||
|
||||
def objective_function(self):
|
||||
"""
|
||||
|
|
@ -237,10 +238,10 @@ class Model(Parameterized):
|
|||
|
||||
"""
|
||||
if self.is_fixed or self.size == 0:
|
||||
print 'nothing to optimize'
|
||||
print('nothing to optimize')
|
||||
|
||||
if not self.update_model():
|
||||
print "updates were off, setting updates on again"
|
||||
print("updates were off, setting updates on again")
|
||||
self.update_model(True)
|
||||
|
||||
if start == None:
|
||||
|
|
@ -255,7 +256,7 @@ class Model(Parameterized):
|
|||
else:
|
||||
optimizer = optimization.get_optimizer(optimizer)
|
||||
opt = optimizer(start, model=self, max_iters=max_iters, **kwargs)
|
||||
|
||||
|
||||
with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook) as vo:
|
||||
opt.run(f_fp=self._objective_grads, f=self._objective, fp=self._grads)
|
||||
vo.finish(opt)
|
||||
|
|
@ -305,7 +306,7 @@ class Model(Parameterized):
|
|||
transformed_index = (indices - (~self._fixes_).cumsum())[transformed_index[which[0]]]
|
||||
|
||||
if transformed_index.size == 0:
|
||||
print "No free parameters to check"
|
||||
print("No free parameters to check")
|
||||
return
|
||||
|
||||
# just check the global ratio
|
||||
|
|
@ -340,9 +341,9 @@ class Model(Parameterized):
|
|||
cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])
|
||||
cols = np.array(cols) + 5
|
||||
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
|
||||
header_string = map(lambda x: '|'.join(x), [header_string])
|
||||
header_string = list(map(lambda x: '|'.join(x), [header_string]))
|
||||
separator = '-' * len(header_string[0])
|
||||
print '\n'.join([header_string[0], separator])
|
||||
print('\n'.join([header_string[0], separator]))
|
||||
if target_param is None:
|
||||
param_index = range(len(x))
|
||||
transformed_index = param_index
|
||||
|
|
@ -358,19 +359,24 @@ class Model(Parameterized):
|
|||
transformed_index = param_index
|
||||
|
||||
if param_index.size == 0:
|
||||
print "No free parameters to check"
|
||||
print("No free parameters to check")
|
||||
return
|
||||
|
||||
gradient = self._grads(x).copy()
|
||||
np.where(gradient == 0, 1e-312, gradient)
|
||||
ret = True
|
||||
for nind, xind in itertools.izip(param_index, transformed_index):
|
||||
for nind, xind in zip(param_index, transformed_index):
|
||||
xx = x.copy()
|
||||
xx[xind] += step
|
||||
f1 = self._objective(xx)
|
||||
xx[xind] -= 2.*step
|
||||
f2 = self._objective(xx)
|
||||
df_ratio = np.abs((f1 - f2) / min(f1, f2))
|
||||
#Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall
|
||||
#the same
|
||||
if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15:
|
||||
df_ratio = np.abs((f1 - f2) / min(f1, f2))
|
||||
else:
|
||||
df_ratio = 1.0
|
||||
df_unstable = df_ratio < df_tolerance
|
||||
numerical_gradient = (f1 - f2) / (2 * step)
|
||||
if np.all(gradient[xind] == 0): ratio = (f1 - f2) == gradient[xind]
|
||||
|
|
@ -392,7 +398,7 @@ class Model(Parameterized):
|
|||
ng = '%.6f' % float(numerical_gradient)
|
||||
df = '%1.e' % float(df_ratio)
|
||||
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
|
||||
print grad_string
|
||||
print(grad_string)
|
||||
|
||||
self.optimizer_array = x
|
||||
return ret
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from param import Param, ObsAr
|
||||
from parameterized import Parameterized
|
||||
from .param import Param, ObsAr
|
||||
from .parameterized import Parameterized
|
||||
|
|
|
|||
|
|
@ -3,7 +3,8 @@
|
|||
|
||||
import numpy
|
||||
from numpy.lib.function_base import vectorize
|
||||
from lists_and_dicts import IntArrayDict
|
||||
from .lists_and_dicts import IntArrayDict
|
||||
from functools import reduce
|
||||
|
||||
def extract_properties_to_index(index, props):
|
||||
prop_index = dict()
|
||||
|
|
@ -62,12 +63,15 @@ class ParameterIndexOperations(object):
|
|||
def __init__(self, constraints=None):
|
||||
self._properties = IntArrayDict()
|
||||
if constraints is not None:
|
||||
for t, i in constraints.iteritems():
|
||||
#python 3 fix
|
||||
#for t, i in constraints.iteritems():
|
||||
for t, i in constraints.items():
|
||||
self.add(t, i)
|
||||
|
||||
def iteritems(self):
|
||||
return self._properties.iteritems()
|
||||
|
||||
#iteritems has gone in python 3
|
||||
#def iteritems(self):
|
||||
# return self._properties.iteritems()
|
||||
|
||||
def items(self):
|
||||
return self._properties.items()
|
||||
|
||||
|
|
@ -75,7 +79,7 @@ class ParameterIndexOperations(object):
|
|||
return self._properties.keys()
|
||||
|
||||
def iterproperties(self):
|
||||
return self._properties.iterkeys()
|
||||
return iter(self._properties)
|
||||
|
||||
def shift_right(self, start, size):
|
||||
for ind in self.iterindices():
|
||||
|
|
@ -83,7 +87,7 @@ class ParameterIndexOperations(object):
|
|||
ind[toshift] += size
|
||||
|
||||
def shift_left(self, start, size):
|
||||
for v, ind in self.items():
|
||||
for v, ind in list(self.items()):
|
||||
todelete = (ind>=start) * (ind<start+size)
|
||||
if todelete.size != 0:
|
||||
ind = ind[~todelete]
|
||||
|
|
@ -101,7 +105,11 @@ class ParameterIndexOperations(object):
|
|||
return reduce(lambda a,b: a+b.size, self.iterindices(), 0)
|
||||
|
||||
def iterindices(self):
|
||||
return self._properties.itervalues()
|
||||
try:
|
||||
return self._properties.itervalues()
|
||||
except AttributeError:
|
||||
#Changed this from itervalues to values for Py3 compatibility. It didn't break the test suite.
|
||||
return self._properties.values()
|
||||
|
||||
def indices(self):
|
||||
return self._properties.values()
|
||||
|
|
@ -150,14 +158,18 @@ class ParameterIndexOperations(object):
|
|||
return numpy.array([]).astype(int)
|
||||
|
||||
def update(self, parameter_index_view, offset=0):
|
||||
for i, v in parameter_index_view.iteritems():
|
||||
#py3 fix
|
||||
#for i, v in parameter_index_view.iteritems():
|
||||
for i, v in parameter_index_view.items():
|
||||
self.add(i, v+offset)
|
||||
|
||||
def copy(self):
|
||||
return self.__deepcopy__(None)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
return ParameterIndexOperations(dict(self.iteritems()))
|
||||
#py3 fix
|
||||
#return ParameterIndexOperations(dict(self.iteritems()))
|
||||
return ParameterIndexOperations(dict(self.items()))
|
||||
|
||||
def __getitem__(self, prop):
|
||||
return self._properties[prop]
|
||||
|
|
@ -195,22 +207,26 @@ class ParameterIndexOperationsView(object):
|
|||
def _filter_index(self, ind):
|
||||
return ind[(ind >= self._offset) * (ind < (self._offset + self._size))] - self._offset
|
||||
|
||||
|
||||
def iteritems(self):
|
||||
for i, ind in self._param_index_ops.iteritems():
|
||||
#iteritems has gone in python 3. It has been renamed items()
|
||||
def items(self):
|
||||
_items_list = list(self._param_index_ops.items())
|
||||
for i, ind in _items_list:
|
||||
ind2 = self._filter_index(ind)
|
||||
if ind2.size > 0:
|
||||
yield i, ind2
|
||||
|
||||
def items(self):
|
||||
return [[i,v] for i,v in self.iteritems()]
|
||||
|
||||
#Python 3 items() is now implemented as per py2 iteritems
|
||||
#def items(self):
|
||||
# return [[i,v] for i,v in self.iteritems()]
|
||||
|
||||
def properties(self):
|
||||
return [i for i in self.iterproperties()]
|
||||
|
||||
|
||||
def iterproperties(self):
|
||||
for i, _ in self.iteritems():
|
||||
#py3 fix
|
||||
#for i, _ in self.iteritems():
|
||||
for i, _ in self.items():
|
||||
yield i
|
||||
|
||||
|
||||
|
|
@ -230,7 +246,9 @@ class ParameterIndexOperationsView(object):
|
|||
|
||||
|
||||
def iterindices(self):
|
||||
for _, ind in self.iteritems():
|
||||
#py3 fix
|
||||
#for _, ind in self.iteritems():
|
||||
for _, ind in self.items():
|
||||
yield ind
|
||||
|
||||
|
||||
|
|
@ -286,10 +304,14 @@ class ParameterIndexOperationsView(object):
|
|||
|
||||
def __str__(self, *args, **kwargs):
|
||||
import pprint
|
||||
return pprint.pformat(dict(self.iteritems()))
|
||||
#py3 fixes
|
||||
#return pprint.pformat(dict(self.iteritems()))
|
||||
return pprint.pformat(dict(self.items()))
|
||||
|
||||
def update(self, parameter_index_view, offset=0):
|
||||
for i, v in parameter_index_view.iteritems():
|
||||
#py3 fixes
|
||||
#for i, v in parameter_index_view.iteritems():
|
||||
for i, v in parameter_index_view.items():
|
||||
self.add(i, v+offset)
|
||||
|
||||
|
||||
|
|
@ -297,6 +319,8 @@ class ParameterIndexOperationsView(object):
|
|||
return self.__deepcopy__(None)
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
return ParameterIndexOperations(dict(self.iteritems()))
|
||||
#py3 fix
|
||||
#return ParameterIndexOperations(dict(self.iteritems()))
|
||||
return ParameterIndexOperations(dict(self.items()))
|
||||
pass
|
||||
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class ArrayList(list):
|
|||
if el is item:
|
||||
return index
|
||||
index += 1
|
||||
raise ValueError, "{} is not in list".format(item)
|
||||
raise ValueError("{} is not in list".format(item))
|
||||
pass
|
||||
|
||||
class ObserverList(object):
|
||||
|
|
@ -75,7 +75,7 @@ class ObserverList(object):
|
|||
|
||||
def __str__(self):
|
||||
from . import ObsAr, Param
|
||||
from parameter_core import Parameterizable
|
||||
from .parameter_core import Parameterizable
|
||||
ret = []
|
||||
curr_p = None
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ class Observable(object):
|
|||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Observable, self).__init__()
|
||||
from lists_and_dicts import ObserverList
|
||||
from .lists_and_dicts import ObserverList
|
||||
self.observers = ObserverList()
|
||||
self._update_on = True
|
||||
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from parameter_core import Pickleable
|
||||
from observable import Observable
|
||||
from .parameter_core import Pickleable
|
||||
from .observable import Observable
|
||||
|
||||
class ObsAr(np.ndarray, Pickleable, Observable):
|
||||
"""
|
||||
|
|
@ -39,7 +39,7 @@ class ObsAr(np.ndarray, Pickleable, Observable):
|
|||
return self.view(np.ndarray)
|
||||
|
||||
def copy(self):
|
||||
from lists_and_dicts import ObserverList
|
||||
from .lists_and_dicts import ObserverList
|
||||
memo = {}
|
||||
memo[id(self)] = self
|
||||
memo[id(self.observers)] = ObserverList()
|
||||
|
|
|
|||
|
|
@ -4,8 +4,9 @@
|
|||
import itertools
|
||||
import numpy
|
||||
np = numpy
|
||||
from parameter_core import Parameterizable, adjust_name_for_printing, Pickleable
|
||||
from observable_array import ObsAr
|
||||
from .parameter_core import Parameterizable, adjust_name_for_printing, Pickleable
|
||||
from .observable_array import ObsAr
|
||||
from functools import reduce
|
||||
|
||||
###### printing
|
||||
__constraints_name__ = "Constraint"
|
||||
|
|
@ -156,7 +157,7 @@ class Param(Parameterizable, ObsAr):
|
|||
#===========================================================================
|
||||
@property
|
||||
def is_fixed(self):
|
||||
from transformations import __fixed__
|
||||
from .transformations import __fixed__
|
||||
return self.constraints[__fixed__].size == self.size
|
||||
|
||||
def _get_original(self, param):
|
||||
|
|
@ -207,10 +208,14 @@ class Param(Parameterizable, ObsAr):
|
|||
return 0
|
||||
@property
|
||||
def _constraints_str(self):
|
||||
return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.iteritems()))]
|
||||
#py3 fix
|
||||
#return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.iteritems()))]
|
||||
return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.items()))]
|
||||
@property
|
||||
def _priors_str(self):
|
||||
return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.iteritems()))]
|
||||
#py3 fix
|
||||
#return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.iteritems()))]
|
||||
return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.items()))]
|
||||
@property
|
||||
def _ties_str(self):
|
||||
return ['']
|
||||
|
|
@ -279,7 +284,7 @@ class Param(Parameterizable, ObsAr):
|
|||
.tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
|
||||
.tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;}
|
||||
.tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;}
|
||||
</style>"""] + ['<table class="tg">'] + [header] + ["<tr><td class=tg-left>{i}</td><td class=tg-right>{x}</td><td class=tg-left>{c}</td><td class=tg-left>{p}</td><td class=tg-left>{t}</td></tr>".format(x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in itertools.izip(indices, vals, constr_matrix, ties, prirs)] + ["</table>"])
|
||||
</style>"""] + ['<table class="tg">'] + [header] + ["<tr><td class=tg-left>{i}</td><td class=tg-right>{x}</td><td class=tg-left>{c}</td><td class=tg-left>{p}</td><td class=tg-left>{t}</td></tr>".format(x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in zip(indices, vals, constr_matrix, ties, prirs)] + ["</table>"])
|
||||
|
||||
def __str__(self, constr_matrix=None, indices=None, prirs=None, ties=None, lc=None, lx=None, li=None, lp=None, lt=None, only_name=False):
|
||||
filter_ = self._current_slice_
|
||||
|
|
@ -300,7 +305,7 @@ class Param(Parameterizable, ObsAr):
|
|||
if only_name: header = header_format.format(lc, lx, li, lt, lp, ' ', x=self.hierarchy_name(), c=sep*lc, i=sep*li, t=sep*lt, p=sep*lp) # nice header for printing
|
||||
else: header = header_format.format(lc, lx, li, lt, lp, ' ', x=self.hierarchy_name(), c=__constraints_name__, i=__index_name__, t=__tie_name__, p=__priors_name__) # nice header for printing
|
||||
if not ties: ties = itertools.cycle([''])
|
||||
return "\n".join([header] + [" {i!s:^{3}s} | {x: >{1}.{2}g} | {c:^{0}s} | {p:^{5}s} | {t:^{4}s} ".format(lc, lx, __precision__, li, lt, lp, x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in itertools.izip(indices, vals, constr_matrix, ties, prirs)]) # return all the constraints with right indices
|
||||
return "\n".join([header] + [" {i!s:^{3}s} | {x: >{1}.{2}g} | {c:^{0}s} | {p:^{5}s} | {t:^{4}s} ".format(lc, lx, __precision__, li, lt, lp, x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in zip(indices, vals, constr_matrix, ties, prirs)]) # return all the constraints with right indices
|
||||
# except: return super(Param, self).__str__()
|
||||
|
||||
class ParamConcatenation(object):
|
||||
|
|
@ -313,7 +318,7 @@ class ParamConcatenation(object):
|
|||
See :py:class:`GPy.core.parameter.Param` for more details on constraining.
|
||||
"""
|
||||
# self.params = params
|
||||
from lists_and_dicts import ArrayList
|
||||
from .lists_and_dicts import ArrayList
|
||||
self.params = ArrayList([])
|
||||
for p in params:
|
||||
for p in p.flattened_parameters:
|
||||
|
|
@ -336,7 +341,9 @@ class ParamConcatenation(object):
|
|||
level += 1
|
||||
parent = parent._parent_
|
||||
import operator
|
||||
self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1)))
|
||||
#py3 fix
|
||||
#self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1)))
|
||||
self.parents = map(lambda x: x[0], sorted(parents.items(), key=operator.itemgetter(1)))
|
||||
#===========================================================================
|
||||
# Get/set items, enable broadcasting
|
||||
#===========================================================================
|
||||
|
|
@ -429,14 +436,14 @@ class ParamConcatenation(object):
|
|||
params = self.params
|
||||
constr_matrices, ties_matrices, prior_matrices = zip(*map(f, params))
|
||||
indices = [p._indices() for p in params]
|
||||
lc = max([p._max_len_names(cm, __constraints_name__) for p, cm in itertools.izip(params, constr_matrices)])
|
||||
lc = max([p._max_len_names(cm, __constraints_name__) for p, cm in zip(params, constr_matrices)])
|
||||
lx = max([p._max_len_values() for p in params])
|
||||
li = max([p._max_len_index(i) for p, i in itertools.izip(params, indices)])
|
||||
lt = max([p._max_len_names(tm, __tie_name__) for p, tm in itertools.izip(params, ties_matrices)])
|
||||
lp = max([p._max_len_names(pm, __constraints_name__) for p, pm in itertools.izip(params, prior_matrices)])
|
||||
li = max([p._max_len_index(i) for p, i in zip(params, indices)])
|
||||
lt = max([p._max_len_names(tm, __tie_name__) for p, tm in zip(params, ties_matrices)])
|
||||
lp = max([p._max_len_names(pm, __constraints_name__) for p, pm in zip(params, prior_matrices)])
|
||||
strings = []
|
||||
start = True
|
||||
for p, cm, i, tm, pm in itertools.izip(params,constr_matrices,indices,ties_matrices,prior_matrices):
|
||||
for p, cm, i, tm, pm in zip(params,constr_matrices,indices,ties_matrices,prior_matrices):
|
||||
strings.append(p.__str__(constr_matrix=cm, indices=i, prirs=pm, ties=tm, lc=lc, lx=lx, li=li, lp=lp, lt=lt, only_name=(1-start)))
|
||||
start = False
|
||||
return "\n".join(strings)
|
||||
|
|
|
|||
|
|
@ -13,11 +13,12 @@ Observable Pattern for patameterization
|
|||
|
||||
"""
|
||||
|
||||
from transformations import Transformation,Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED
|
||||
from .transformations import Transformation,Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED
|
||||
import numpy as np
|
||||
import re
|
||||
import logging
|
||||
from updateable import Updateable
|
||||
from .updateable import Updateable
|
||||
from functools import reduce
|
||||
|
||||
class HierarchyError(Exception):
|
||||
"""
|
||||
|
|
@ -36,7 +37,7 @@ def adjust_name_for_printing(name):
|
|||
name = name.replace("/", "_l_").replace("@", '_at_')
|
||||
name = name.replace("(", "_of_").replace(")", "")
|
||||
if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None:
|
||||
raise NameError, "name {} converted to {} cannot be further converted to valid python variable name!".format(name2, name)
|
||||
raise NameError("name {} converted to {} cannot be further converted to valid python variable name!".format(name2, name))
|
||||
return name
|
||||
return ''
|
||||
|
||||
|
|
@ -65,13 +66,13 @@ class Parentable(object):
|
|||
Gets called, when the parent changed, so we can adjust our
|
||||
inner attributes according to the new parent.
|
||||
"""
|
||||
raise NotImplementedError, "shouldnt happen, Parentable objects need to be able to change their parent"
|
||||
raise NotImplementedError("shouldnt happen, Parentable objects need to be able to change their parent")
|
||||
|
||||
def _disconnect_parent(self, *args, **kw):
|
||||
"""
|
||||
Disconnect this object from its parent
|
||||
"""
|
||||
raise NotImplementedError, "Abstract superclass"
|
||||
raise NotImplementedError("Abstract superclass")
|
||||
|
||||
@property
|
||||
def _highest_parent_(self):
|
||||
|
|
@ -109,7 +110,10 @@ class Pickleable(object):
|
|||
it properly.
|
||||
:param protocol: pickling protocol to use, python-pickle for details.
|
||||
"""
|
||||
import cPickle as pickle
|
||||
try: #Py2
|
||||
import cPickle as pickle
|
||||
except ImportError: #Py3
|
||||
import pickle
|
||||
if isinstance(f, str):
|
||||
with open(f, 'wb') as f:
|
||||
pickle.dump(self, f, protocol)
|
||||
|
|
@ -138,9 +142,9 @@ class Pickleable(object):
|
|||
which = self
|
||||
which.traverse_parents(parents.append) # collect parents
|
||||
for p in parents:
|
||||
if not memo.has_key(id(p)):memo[id(p)] = None # set all parents to be None, so they will not be copied
|
||||
if not memo.has_key(id(self.gradient)):memo[id(self.gradient)] = None # reset the gradient
|
||||
if not memo.has_key(id(self._fixes_)):memo[id(self._fixes_)] = None # fixes have to be reset, as this is now highest parent
|
||||
if not id(p) in memo :memo[id(p)] = None # set all parents to be None, so they will not be copied
|
||||
if not id(self.gradient) in memo:memo[id(self.gradient)] = None # reset the gradient
|
||||
if not id(self._fixes_) in memo :memo[id(self._fixes_)] = None # fixes have to be reset, as this is now highest parent
|
||||
copy = copy.deepcopy(self, memo) # and start the copy
|
||||
copy._parent_index_ = None
|
||||
copy._trigger_params_changed()
|
||||
|
|
@ -163,14 +167,16 @@ class Pickleable(object):
|
|||
'_Cacher_wrap__cachers', # never pickle cachers
|
||||
]
|
||||
dc = dict()
|
||||
for k,v in self.__dict__.iteritems():
|
||||
#py3 fix
|
||||
#for k,v in self.__dict__.iteritems():
|
||||
for k,v in self.__dict__.items():
|
||||
if k not in ignore_list:
|
||||
dc[k] = v
|
||||
return dc
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.__dict__.update(state)
|
||||
from lists_and_dicts import ObserverList
|
||||
from .lists_and_dicts import ObserverList
|
||||
self.observers = ObserverList()
|
||||
self._setup_observers()
|
||||
self._optimizer_copy_transformed = False
|
||||
|
|
@ -214,7 +220,7 @@ class Gradcheckable(Pickleable, Parentable):
|
|||
Perform the checkgrad on the model.
|
||||
TODO: this can be done more efficiently, when doing it inside here
|
||||
"""
|
||||
raise HierarchyError, "This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!"
|
||||
raise HierarchyError("This parameter is not in a model with a likelihood, and, therefore, cannot be gradient checked!")
|
||||
|
||||
class Nameable(Gradcheckable):
|
||||
"""
|
||||
|
|
@ -268,7 +274,7 @@ class Indexable(Nameable, Updateable):
|
|||
def __init__(self, name, default_constraint=None, *a, **kw):
|
||||
super(Indexable, self).__init__(name=name, *a, **kw)
|
||||
self._default_constraint_ = default_constraint
|
||||
from index_operations import ParameterIndexOperations
|
||||
from .index_operations import ParameterIndexOperations
|
||||
self.constraints = ParameterIndexOperations()
|
||||
self.priors = ParameterIndexOperations()
|
||||
if self._default_constraint_ is not None:
|
||||
|
|
@ -310,7 +316,7 @@ class Indexable(Nameable, Updateable):
|
|||
that is an int array, containing the indexes for the flattened
|
||||
param inside this parameterized logic.
|
||||
"""
|
||||
from param import ParamConcatenation
|
||||
from .param import ParamConcatenation
|
||||
if isinstance(param, ParamConcatenation):
|
||||
return np.hstack((self._raveled_index_for(p) for p in param.params))
|
||||
return param._raveled_index() + self._offset_for(param)
|
||||
|
|
@ -407,7 +413,7 @@ class Indexable(Nameable, Updateable):
|
|||
repriorized = self.unset_priors()
|
||||
self._add_to_index_operations(self.priors, repriorized, prior, warning)
|
||||
|
||||
from domains import _REAL, _POSITIVE, _NEGATIVE
|
||||
from .domains import _REAL, _POSITIVE, _NEGATIVE
|
||||
if prior.domain is _POSITIVE:
|
||||
self.constrain_positive(warning)
|
||||
elif prior.domain is _NEGATIVE:
|
||||
|
|
@ -426,7 +432,9 @@ class Indexable(Nameable, Updateable):
|
|||
"""evaluate the prior"""
|
||||
if self.priors.size > 0:
|
||||
x = self.param_array
|
||||
return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()), 0)
|
||||
#py3 fix
|
||||
#return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.iteritems()), 0)
|
||||
return reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.items()), 0)
|
||||
return 0.
|
||||
|
||||
def _log_prior_gradients(self):
|
||||
|
|
@ -434,7 +442,9 @@ class Indexable(Nameable, Updateable):
|
|||
if self.priors.size > 0:
|
||||
x = self.param_array
|
||||
ret = np.zeros(x.size)
|
||||
[np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()]
|
||||
#py3 fix
|
||||
#[np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()]
|
||||
[np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()]
|
||||
return ret
|
||||
return 0.
|
||||
|
||||
|
|
@ -536,7 +546,7 @@ class Indexable(Nameable, Updateable):
|
|||
update the constraints and priors view, so that
|
||||
constraining is automized for the parent.
|
||||
"""
|
||||
from index_operations import ParameterIndexOperationsView
|
||||
from .index_operations import ParameterIndexOperationsView
|
||||
#if getattr(self, "_in_init_"):
|
||||
#import ipdb;ipdb.set_trace()
|
||||
#self.constraints.update(param.constraints, start)
|
||||
|
|
@ -558,7 +568,7 @@ class Indexable(Nameable, Updateable):
|
|||
"""
|
||||
if warning and reconstrained.size > 0:
|
||||
# TODO: figure out which parameters have changed and only print those
|
||||
print "WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name)
|
||||
print("WARNING: reconstraining parameters {}".format(self.hierarchy_name() or self.name))
|
||||
index = self._raveled_index()
|
||||
which.add(what, index)
|
||||
return index
|
||||
|
|
@ -571,7 +581,7 @@ class Indexable(Nameable, Updateable):
|
|||
if len(transforms) == 0:
|
||||
transforms = which.properties()
|
||||
removed = np.empty((0,), dtype=int)
|
||||
for t in transforms:
|
||||
for t in list(transforms):
|
||||
unconstrained = which.remove(t, self._raveled_index())
|
||||
removed = np.union1d(removed, unconstrained)
|
||||
if t is __fixed__:
|
||||
|
|
@ -612,7 +622,9 @@ class OptimizationHandlable(Indexable):
|
|||
|
||||
if not self._optimizer_copy_transformed:
|
||||
self._optimizer_copy_.flat = self.param_array.flat
|
||||
[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
|
||||
#py3 fix
|
||||
#[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
|
||||
[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.items() if c != __fixed__]
|
||||
if self.has_parent() and (self.constraints[__fixed__].size != 0 or self._has_ties()):
|
||||
fixes = np.ones(self.size).astype(bool)
|
||||
fixes[self.constraints[__fixed__]] = FIXED
|
||||
|
|
@ -641,21 +653,25 @@ class OptimizationHandlable(Indexable):
|
|||
if f is None:
|
||||
self.param_array.flat = p
|
||||
[np.put(self.param_array, ind, c.f(self.param_array.flat[ind]))
|
||||
for c, ind in self.constraints.iteritems() if c != __fixed__]
|
||||
#py3 fix
|
||||
#for c, ind in self.constraints.iteritems() if c != __fixed__]
|
||||
for c, ind in self.constraints.items() if c != __fixed__]
|
||||
else:
|
||||
self.param_array.flat[f] = p
|
||||
[np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))
|
||||
for c, ind in self.constraints.iteritems() if c != __fixed__]
|
||||
#py3 fix
|
||||
#for c, ind in self.constraints.iteritems() if c != __fixed__]
|
||||
for c, ind in self.constraints.items() if c != __fixed__]
|
||||
#self._highest_parent_.tie.propagate_val()
|
||||
|
||||
self._optimizer_copy_transformed = False
|
||||
self.trigger_update()
|
||||
|
||||
def _get_params_transformed(self):
|
||||
raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!"
|
||||
raise DeprecationWarning("_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!")
|
||||
#
|
||||
def _set_params_transformed(self, p):
|
||||
raise DeprecationWarning, "_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!"
|
||||
raise DeprecationWarning("_get|set_params{_optimizer_copy_transformed} is deprecated, use self.optimizer array insetad!")
|
||||
|
||||
def _trigger_params_changed(self, trigger_parent=True):
|
||||
"""
|
||||
|
|
@ -680,7 +696,9 @@ class OptimizationHandlable(Indexable):
|
|||
constraint to it.
|
||||
"""
|
||||
self._highest_parent_.tie.collate_gradient()
|
||||
[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
|
||||
#py3 fix
|
||||
#[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
|
||||
[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
|
||||
if self._has_fixes(): return g[self._fixes_]
|
||||
return g
|
||||
|
||||
|
|
@ -690,7 +708,9 @@ class OptimizationHandlable(Indexable):
|
|||
constraint to it.
|
||||
"""
|
||||
self._highest_parent_.tie.collate_gradient()
|
||||
[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
|
||||
#py3 fix
|
||||
#[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
|
||||
[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
|
||||
if self._has_fixes(): return g[self._fixes_]
|
||||
return g
|
||||
|
||||
|
|
@ -701,7 +721,7 @@ class OptimizationHandlable(Indexable):
|
|||
Return the number of parameters of this parameter_handle.
|
||||
Param objects will always return 0.
|
||||
"""
|
||||
raise NotImplemented, "Abstract, please implement in respective classes"
|
||||
raise NotImplemented("Abstract, please implement in respective classes")
|
||||
|
||||
def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True):
|
||||
"""
|
||||
|
|
@ -750,7 +770,9 @@ class OptimizationHandlable(Indexable):
|
|||
self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)
|
||||
# now draw from prior where possible
|
||||
x = self.param_array.copy()
|
||||
[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]
|
||||
#Py3 fix
|
||||
#[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]
|
||||
[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.items() if not p is None]
|
||||
unfixlist = np.ones((self.size,),dtype=np.bool)
|
||||
unfixlist[self.constraints[__fixed__]] = False
|
||||
self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
# Copyright (c) 2014, Max Zwiessele, James Hensman
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
import six # For metaclass support in Python 2 and 3 simultaneously
|
||||
import numpy; np = numpy
|
||||
import itertools
|
||||
from re import compile, _pattern_type
|
||||
from param import ParamConcatenation
|
||||
from parameter_core import HierarchyError, Parameterizable, adjust_name_for_printing
|
||||
from .param import ParamConcatenation
|
||||
from .parameter_core import HierarchyError, Parameterizable, adjust_name_for_printing
|
||||
|
||||
import logging
|
||||
from GPy.core.parameterization.index_operations import ParameterIndexOperationsView
|
||||
|
|
@ -27,6 +27,7 @@ class ParametersChangedMeta(type):
|
|||
self.parameters_changed()
|
||||
return self
|
||||
|
||||
@six.add_metaclass(ParametersChangedMeta)
|
||||
class Parameterized(Parameterizable):
|
||||
"""
|
||||
Parameterized class
|
||||
|
|
@ -73,7 +74,9 @@ class Parameterized(Parameterizable):
|
|||
# Metaclass for parameters changed after init.
|
||||
# This makes sure, that parameters changed will always be called after __init__
|
||||
# **Never** call parameters_changed() yourself
|
||||
__metaclass__ = ParametersChangedMeta
|
||||
#This is ignored in Python 3 -- you need to put the meta class in the function definition.
|
||||
#__metaclass__ = ParametersChangedMeta
|
||||
#The six module is used to support both Python 2 and 3 simultaneously
|
||||
#===========================================================================
|
||||
def __init__(self, name=None, parameters=[], *a, **kw):
|
||||
super(Parameterized, self).__init__(name=name, *a, **kw)
|
||||
|
|
@ -131,7 +134,7 @@ class Parameterized(Parameterizable):
|
|||
if param.has_parent():
|
||||
def visit(parent, self):
|
||||
if parent is self:
|
||||
raise HierarchyError, "You cannot add a parameter twice into the hierarchy"
|
||||
raise HierarchyError("You cannot add a parameter twice into the hierarchy")
|
||||
param.traverse_parents(visit, self)
|
||||
param._parent_.unlink_parameter(param)
|
||||
# make sure the size is set
|
||||
|
|
@ -173,7 +176,7 @@ class Parameterized(Parameterizable):
|
|||
self._highest_parent_._connect_fixes()
|
||||
|
||||
else:
|
||||
raise HierarchyError, """Parameter exists already, try making a copy"""
|
||||
raise HierarchyError("""Parameter exists already, try making a copy""")
|
||||
|
||||
|
||||
def link_parameters(self, *parameters):
|
||||
|
|
@ -189,9 +192,9 @@ class Parameterized(Parameterizable):
|
|||
"""
|
||||
if not param in self.parameters:
|
||||
try:
|
||||
raise RuntimeError, "{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name)
|
||||
raise RuntimeError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name))
|
||||
except AttributeError:
|
||||
raise RuntimeError, "{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param))
|
||||
raise RuntimeError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param)))
|
||||
|
||||
start = sum([p.size for p in self.parameters[:param._parent_index_]])
|
||||
self._remove_parameter_name(param)
|
||||
|
|
@ -215,9 +218,9 @@ class Parameterized(Parameterizable):
|
|||
self._highest_parent_._notify_parent_change()
|
||||
|
||||
def add_parameter(self, *args, **kwargs):
|
||||
raise DeprecationWarning, "add_parameter was renamed to link_parameter to avoid confusion of setting variables, use link_parameter instead"
|
||||
raise DeprecationWarning("add_parameter was renamed to link_parameter to avoid confusion of setting variables, use link_parameter instead")
|
||||
def remove_parameter(self, *args, **kwargs):
|
||||
raise DeprecationWarning, "remove_parameter was renamed to unlink_parameter to avoid confusion of setting variables, use unlink_parameter instead"
|
||||
raise DeprecationWarning("remove_parameter was renamed to unlink_parameter to avoid confusion of setting variables, use unlink_parameter instead")
|
||||
|
||||
def _connect_parameters(self, ignore_added_names=False):
|
||||
# connect parameterlist to this parameterized object
|
||||
|
|
@ -237,7 +240,7 @@ class Parameterized(Parameterizable):
|
|||
self._param_slices_ = []
|
||||
for i, p in enumerate(self.parameters):
|
||||
if not p.param_array.flags['C_CONTIGUOUS']:
|
||||
raise ValueError, "This should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS"
|
||||
raise ValueError("This should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS")
|
||||
|
||||
p._parent_ = self
|
||||
p._parent_index_ = i
|
||||
|
|
@ -268,7 +271,7 @@ class Parameterized(Parameterizable):
|
|||
"""
|
||||
if not isinstance(regexp, _pattern_type): regexp = compile(regexp)
|
||||
found_params = []
|
||||
for n, p in itertools.izip(self.parameter_names(False, False, True), self.flattened_parameters):
|
||||
for n, p in zip(self.parameter_names(False, False, True), self.flattened_parameters):
|
||||
if regexp.match(n) is not None:
|
||||
found_params.append(p)
|
||||
return found_params
|
||||
|
|
@ -279,7 +282,7 @@ class Parameterized(Parameterizable):
|
|||
else:
|
||||
if paramlist is None:
|
||||
paramlist = self.grep_param_names(name)
|
||||
if len(paramlist) < 1: raise AttributeError, name
|
||||
if len(paramlist) < 1: raise AttributeError(name)
|
||||
if len(paramlist) == 1:
|
||||
if isinstance(paramlist[-1], Parameterized):
|
||||
paramlist = paramlist[-1].flattened_parameters
|
||||
|
|
@ -295,7 +298,7 @@ class Parameterized(Parameterizable):
|
|||
try:
|
||||
self.param_array[name] = value
|
||||
except:
|
||||
raise ValueError, "Setting by slice or index only allowed with array-like"
|
||||
raise ValueError("Setting by slice or index only allowed with array-like")
|
||||
self.trigger_update()
|
||||
else:
|
||||
try: param = self.__getitem__(name, paramlist)
|
||||
|
|
@ -325,7 +328,7 @@ class Parameterized(Parameterizable):
|
|||
self._notify_parent_change()
|
||||
self.parameters_changed()
|
||||
except Exception as e:
|
||||
print "WARNING: caught exception {!s}, trying to continue".format(e)
|
||||
print("WARNING: caught exception {!s}, trying to continue".format(e))
|
||||
|
||||
def copy(self, memo=None):
|
||||
if memo is None:
|
||||
|
|
@ -379,7 +382,7 @@ class Parameterized(Parameterizable):
|
|||
pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]])
|
||||
format_spec = "<tr><td class=tg-left>{{name:<{0}s}}</td><td class=tg-right>{{desc:>{1}s}}</td><td class=tg-left>{{const:^{2}s}}</td><td class=tg-left>{{pri:^{3}s}}</td><td class=tg-left>{{t:^{4}s}}</td></tr>".format(nl, sl, cl, pl, tl)
|
||||
to_print = []
|
||||
for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs):
|
||||
for n, d, c, t, p in zip(names, desc, constrs, ts, prirs):
|
||||
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))
|
||||
sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3)
|
||||
if header:
|
||||
|
|
@ -414,7 +417,7 @@ class Parameterized(Parameterizable):
|
|||
pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]])
|
||||
format_spec = " \033[1m{{name:<{0}s}}\033[0;0m | {{desc:>{1}s}} | {{const:^{2}s}} | {{pri:^{3}s}} | {{t:^{4}s}}".format(nl, sl, cl, pl, tl)
|
||||
to_print = []
|
||||
for n, d, c, t, p in itertools.izip(names, desc, constrs, ts, prirs):
|
||||
for n, d, c, t, p in zip(names, desc, constrs, ts, prirs):
|
||||
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))
|
||||
sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3)
|
||||
if header:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
import numpy as np
|
||||
from scipy.special import gammaln, digamma
|
||||
from ...util.linalg import pdinv
|
||||
from domains import _REAL, _POSITIVE
|
||||
from .domains import _REAL, _POSITIVE
|
||||
import warnings
|
||||
import weakref
|
||||
|
||||
|
|
@ -15,8 +15,12 @@ class Prior(object):
|
|||
_instance = None
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if not cls._instance or cls._instance.__class__ is not cls:
|
||||
cls._instance = super(Prior, cls).__new__(cls, *args, **kwargs)
|
||||
return cls._instance
|
||||
newfunc = super(Prior, cls).__new__
|
||||
if newfunc is object.__new__:
|
||||
cls._instance = newfunc(cls)
|
||||
else:
|
||||
cls._instance = newfunc(cls, *args, **kwargs)
|
||||
return cls._instance
|
||||
|
||||
def pdf(self, x):
|
||||
return np.exp(self.lnpdf(x))
|
||||
|
|
@ -52,7 +56,11 @@ class Gaussian(Prior):
|
|||
for instance in cls._instances:
|
||||
if instance().mu == mu and instance().sigma == sigma:
|
||||
return instance()
|
||||
o = super(Prior, cls).__new__(cls, mu, sigma)
|
||||
newfunc = super(Prior, cls).__new__
|
||||
if newfunc is object.__new__:
|
||||
o = newfunc(cls)
|
||||
else:
|
||||
o = newfunc(cls, mu, sigma)
|
||||
cls._instances.append(weakref.ref(o))
|
||||
return cls._instances[-1]()
|
||||
|
||||
|
|
@ -140,7 +148,11 @@ class LogGaussian(Gaussian):
|
|||
for instance in cls._instances:
|
||||
if instance().mu == mu and instance().sigma == sigma:
|
||||
return instance()
|
||||
o = super(Prior, cls).__new__(cls, mu, sigma)
|
||||
newfunc = super(Prior, cls).__new__
|
||||
if newfunc is object.__new__:
|
||||
o = newfunc(cls)
|
||||
else:
|
||||
o = newfunc(cls, mu, sigma)
|
||||
cls._instances.append(weakref.ref(o))
|
||||
return cls._instances[-1]()
|
||||
|
||||
|
|
@ -258,7 +270,11 @@ class Gamma(Prior):
|
|||
for instance in cls._instances:
|
||||
if instance().a == a and instance().b == b:
|
||||
return instance()
|
||||
o = super(Prior, cls).__new__(cls, a, b)
|
||||
newfunc = super(Prior, cls).__new__
|
||||
if newfunc is object.__new__:
|
||||
o = newfunc(cls)
|
||||
else:
|
||||
o = newfunc(cls, a, b)
|
||||
cls._instances.append(weakref.ref(o))
|
||||
return cls._instances[-1]()
|
||||
|
||||
|
|
@ -398,7 +414,7 @@ class DGPLVM_KFDA(Prior):
|
|||
def compute_cls(self, x):
|
||||
cls = {}
|
||||
# Appending each data point to its proper class
|
||||
for j in xrange(self.datanum):
|
||||
for j in range(self.datanum):
|
||||
class_label = self.get_class_label(self.lbl[j])
|
||||
if class_label not in cls:
|
||||
cls[class_label] = []
|
||||
|
|
@ -537,7 +553,7 @@ class DGPLVM(Prior):
|
|||
def compute_cls(self, x):
|
||||
cls = {}
|
||||
# Appending each data point to its proper class
|
||||
for j in xrange(self.datanum):
|
||||
for j in range(self.datanum):
|
||||
class_label = self.get_class_label(self.lbl[j])
|
||||
if class_label not in cls:
|
||||
cls[class_label] = []
|
||||
|
|
@ -549,14 +565,14 @@ class DGPLVM(Prior):
|
|||
M_i = np.zeros((self.classnum, self.dim))
|
||||
for i in cls:
|
||||
# Mean of each class
|
||||
class_i = cls[i]
|
||||
class_i = cls[i]
|
||||
M_i[i] = np.mean(class_i, axis=0)
|
||||
return M_i
|
||||
|
||||
# Adding data points as tuple to the dictionary so that we can access indices
|
||||
def compute_indices(self, x):
|
||||
data_idx = {}
|
||||
for j in xrange(self.datanum):
|
||||
for j in range(self.datanum):
|
||||
class_label = self.get_class_label(self.lbl[j])
|
||||
if class_label not in data_idx:
|
||||
data_idx[class_label] = []
|
||||
|
|
@ -575,7 +591,7 @@ class DGPLVM(Prior):
|
|||
else:
|
||||
lst_idx = []
|
||||
# Here we put indices of each class in to the list called lst_idx_all
|
||||
for m in xrange(len(data_idx[i])):
|
||||
for m in range(len(data_idx[i])):
|
||||
lst_idx.append(data_idx[i][m][0])
|
||||
lst_idx_all.append(lst_idx)
|
||||
return lst_idx_all
|
||||
|
|
@ -611,7 +627,7 @@ class DGPLVM(Prior):
|
|||
# pdb.set_trace()
|
||||
# Calculating Bi
|
||||
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
|
||||
for k in xrange(self.datanum):
|
||||
for k in range(self.datanum):
|
||||
for i in data_idx:
|
||||
N_i = float(len(data_idx[i]))
|
||||
if k in lst_idx_all[i]:
|
||||
|
|
@ -663,7 +679,7 @@ class DGPLVM(Prior):
|
|||
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
|
||||
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
|
||||
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
|
||||
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
|
||||
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
|
||||
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
|
||||
|
||||
# This function calculates derivative of the log of prior function
|
||||
|
|
@ -684,7 +700,7 @@ class DGPLVM(Prior):
|
|||
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
|
||||
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
|
||||
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
|
||||
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
|
||||
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
|
||||
Sb_inv_N_trans = np.transpose(Sb_inv_N)
|
||||
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
|
||||
Sw_trans = np.transpose(Sw)
|
||||
|
|
@ -742,7 +758,7 @@ class DGPLVM_T(Prior):
|
|||
self.datanum = lbl.shape[0]
|
||||
self.x_shape = x_shape
|
||||
self.dim = x_shape[1]
|
||||
self.vec = vec
|
||||
self.vec = vec
|
||||
|
||||
|
||||
def get_class_label(self, y):
|
||||
|
|
@ -756,7 +772,7 @@ class DGPLVM_T(Prior):
|
|||
def compute_cls(self, x):
|
||||
cls = {}
|
||||
# Appending each data point to its proper class
|
||||
for j in xrange(self.datanum):
|
||||
for j in range(self.datanum):
|
||||
class_label = self.get_class_label(self.lbl[j])
|
||||
if class_label not in cls:
|
||||
cls[class_label] = []
|
||||
|
|
@ -768,14 +784,14 @@ class DGPLVM_T(Prior):
|
|||
M_i = np.zeros((self.classnum, self.dim))
|
||||
for i in cls:
|
||||
# Mean of each class
|
||||
class_i = np.multiply(cls[i],vec)
|
||||
class_i = np.multiply(cls[i],vec)
|
||||
M_i[i] = np.mean(class_i, axis=0)
|
||||
return M_i
|
||||
|
||||
# Adding data points as tuple to the dictionary so that we can access indices
|
||||
def compute_indices(self, x):
|
||||
data_idx = {}
|
||||
for j in xrange(self.datanum):
|
||||
for j in range(self.datanum):
|
||||
class_label = self.get_class_label(self.lbl[j])
|
||||
if class_label not in data_idx:
|
||||
data_idx[class_label] = []
|
||||
|
|
@ -794,7 +810,7 @@ class DGPLVM_T(Prior):
|
|||
else:
|
||||
lst_idx = []
|
||||
# Here we put indices of each class in to the list called lst_idx_all
|
||||
for m in xrange(len(data_idx[i])):
|
||||
for m in range(len(data_idx[i])):
|
||||
lst_idx.append(data_idx[i][m][0])
|
||||
lst_idx_all.append(lst_idx)
|
||||
return lst_idx_all
|
||||
|
|
@ -830,7 +846,7 @@ class DGPLVM_T(Prior):
|
|||
# pdb.set_trace()
|
||||
# Calculating Bi
|
||||
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
|
||||
for k in xrange(self.datanum):
|
||||
for k in range(self.datanum):
|
||||
for i in data_idx:
|
||||
N_i = float(len(data_idx[i]))
|
||||
if k in lst_idx_all[i]:
|
||||
|
|
@ -883,7 +899,7 @@ class DGPLVM_T(Prior):
|
|||
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
|
||||
#print 'SB_inv: ', Sb_inv_N
|
||||
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
|
||||
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
|
||||
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
|
||||
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
|
||||
|
||||
# This function calculates derivative of the log of prior function
|
||||
|
|
@ -905,7 +921,7 @@ class DGPLVM_T(Prior):
|
|||
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
|
||||
#print 'SB_inv: ',Sb_inv_N
|
||||
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
|
||||
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
|
||||
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
|
||||
Sb_inv_N_trans = np.transpose(Sb_inv_N)
|
||||
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
|
||||
Sw_trans = np.transpose(Sw)
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
from parameterized import Parameterized
|
||||
from param import Param
|
||||
from .parameterized import Parameterized
|
||||
from .param import Param
|
||||
|
||||
class Remapping(Parameterized):
|
||||
def mapping(self):
|
||||
|
|
@ -98,7 +98,7 @@ class Tie(Parameterized):
|
|||
if np.all(self.label_buf[idx]==0):
|
||||
# None of p has been tied before.
|
||||
tie_idx = self._expandTieParam(1)
|
||||
print tie_idx
|
||||
print(tie_idx)
|
||||
tie_id = self.label_buf.max()+1
|
||||
self.label_buf[tie_idx] = tie_id
|
||||
else:
|
||||
|
|
@ -185,18 +185,18 @@ class Tie(Parameterized):
|
|||
def _check_change(self):
|
||||
changed = False
|
||||
if self.tied_param is not None:
|
||||
for i in xrange(self.tied_param.size):
|
||||
for i in range(self.tied_param.size):
|
||||
b0 = self.label_buf==self.label_buf[self.buf_idx[i]]
|
||||
b = self._highest_parent_.param_array[b0]!=self.tied_param[i]
|
||||
if b.sum()==0:
|
||||
print 'XXX'
|
||||
print('XXX')
|
||||
continue
|
||||
elif b.sum()==1:
|
||||
print '!!!'
|
||||
print('!!!')
|
||||
val = self._highest_parent_.param_array[b0][b][0]
|
||||
self._highest_parent_.param_array[b0] = val
|
||||
else:
|
||||
print '@@@'
|
||||
print('@@@')
|
||||
self._highest_parent_.param_array[b0] = self.tied_param[i]
|
||||
changed = True
|
||||
return changed
|
||||
|
|
@ -212,11 +212,11 @@ class Tie(Parameterized):
|
|||
if self.tied_param is not None:
|
||||
self.tied_param.gradient = 0.
|
||||
[np.put(self.tied_param.gradient, i, self._highest_parent_.gradient[self.label_buf==self.label_buf[self.buf_idx[i]]].sum())
|
||||
for i in xrange(self.tied_param.size)]
|
||||
for i in range(self.tied_param.size)]
|
||||
|
||||
def propagate_val(self):
|
||||
if self.tied_param is not None:
|
||||
for i in xrange(self.tied_param.size):
|
||||
for i in range(self.tied_param.size):
|
||||
self._highest_parent_.param_array[self.label_buf==self.label_buf[self.buf_idx[i]]] = self.tied_param[i]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from domains import _POSITIVE,_NEGATIVE, _BOUNDED
|
||||
from .domains import _POSITIVE,_NEGATIVE, _BOUNDED
|
||||
import weakref
|
||||
|
||||
import sys
|
||||
|
|
@ -72,7 +72,7 @@ class Logexp(Transformation):
|
|||
return np.einsum('i,i->i', df, np.where(f>_lim_val, 1., 1. - np.exp(-f)))
|
||||
def initialize(self, f):
|
||||
if np.any(f < 0.):
|
||||
print "Warning: changing parameters to satisfy constraints"
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
return np.abs(f)
|
||||
def __str__(self):
|
||||
return '+ve'
|
||||
|
|
@ -130,7 +130,7 @@ class NormalTheta(Transformation):
|
|||
|
||||
def initialize(self, f):
|
||||
if np.any(f[self.var_indices] < 0.):
|
||||
print "Warning: changing parameters to satisfy constraints"
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
f[self.var_indices] = np.abs(f[self.var_indices])
|
||||
return f
|
||||
|
||||
|
|
@ -177,7 +177,7 @@ class NormalNaturalAntti(NormalTheta):
|
|||
|
||||
def initialize(self, f):
|
||||
if np.any(f[self.var_indices] < 0.):
|
||||
print "Warning: changing parameters to satisfy constraints"
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
f[self.var_indices] = np.abs(f[self.var_indices])
|
||||
return f
|
||||
|
||||
|
|
@ -220,7 +220,7 @@ class NormalEta(Transformation):
|
|||
|
||||
def initialize(self, f):
|
||||
if np.any(f[self.var_indices] < 0.):
|
||||
print "Warning: changing parameters to satisfy constraints"
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
f[self.var_indices] = np.abs(f[self.var_indices])
|
||||
return f
|
||||
|
||||
|
|
@ -360,7 +360,7 @@ class LogexpNeg(Transformation):
|
|||
return np.einsum('i,i->i', df, np.where(f>_lim_val, -1, -1 + np.exp(-f)))
|
||||
def initialize(self, f):
|
||||
if np.any(f < 0.):
|
||||
print "Warning: changing parameters to satisfy constraints"
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
return np.abs(f)
|
||||
def __str__(self):
|
||||
return '+ve'
|
||||
|
|
@ -412,7 +412,7 @@ class LogexpClipped(Logexp):
|
|||
return np.einsum('i,i->i', df, gf) # np.where(f < self.lower, 0, gf)
|
||||
def initialize(self, f):
|
||||
if np.any(f < 0.):
|
||||
print "Warning: changing parameters to satisfy constraints"
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
return np.abs(f)
|
||||
def __str__(self):
|
||||
return '+ve_c'
|
||||
|
|
@ -428,7 +428,7 @@ class Exponent(Transformation):
|
|||
return np.einsum('i,i->i', df, f)
|
||||
def initialize(self, f):
|
||||
if np.any(f < 0.):
|
||||
print "Warning: changing parameters to satisfy constraints"
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
return np.abs(f)
|
||||
def __str__(self):
|
||||
return '+ve'
|
||||
|
|
@ -468,7 +468,11 @@ class Logistic(Transformation):
|
|||
for instance in cls._instances:
|
||||
if instance().lower == lower and instance().upper == upper:
|
||||
return instance()
|
||||
o = super(Transformation, cls).__new__(cls, lower, upper, *args, **kwargs)
|
||||
newfunc = super(Transformation, cls).__new__
|
||||
if newfunc is object.__new__:
|
||||
o = newfunc(cls)
|
||||
else:
|
||||
o = newfunc(cls, lower, upper, *args, **kwargs)
|
||||
cls._instances.append(weakref.ref(o))
|
||||
return cls._instances[-1]()
|
||||
def __init__(self, lower, upper):
|
||||
|
|
@ -486,7 +490,7 @@ class Logistic(Transformation):
|
|||
return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference)
|
||||
def initialize(self, f):
|
||||
if np.any(np.logical_or(f < self.lower, f > self.upper)):
|
||||
print "Warning: changing parameters to satisfy constraints"
|
||||
print("Warning: changing parameters to satisfy constraints")
|
||||
#return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f)
|
||||
#FIXME: Max, zeros_like right?
|
||||
return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ Created on 11 Nov 2014
|
|||
|
||||
@author: maxz
|
||||
'''
|
||||
from observable import Observable
|
||||
from .observable import Observable
|
||||
|
||||
|
||||
class Updateable(Observable):
|
||||
|
|
@ -35,7 +35,7 @@ class Updateable(Observable):
|
|||
self.trigger_update()
|
||||
|
||||
def toggle_update(self):
|
||||
print "deprecated: toggle_update was renamed to update_toggle for easier access"
|
||||
print("deprecated: toggle_update was renamed to update_toggle for easier access")
|
||||
self.update_toggle()
|
||||
def update_toggle(self):
|
||||
self.update_model(not self.update_model())
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ Created on 6 Nov 2013
|
|||
'''
|
||||
|
||||
import numpy as np
|
||||
from parameterized import Parameterized
|
||||
from param import Param
|
||||
from transformations import Logexp, Logistic,__fixed__
|
||||
from .parameterized import Parameterized
|
||||
from .param import Param
|
||||
from .transformations import Logexp, Logistic,__fixed__
|
||||
from GPy.util.misc import param_to_array
|
||||
from GPy.util.caching import Cache_this
|
||||
|
||||
|
|
@ -16,13 +16,13 @@ class VariationalPrior(Parameterized):
|
|||
super(VariationalPrior, self).__init__(name=name, **kw)
|
||||
|
||||
def KL_divergence(self, variational_posterior):
|
||||
raise NotImplementedError, "override this for variational inference of latent space"
|
||||
raise NotImplementedError("override this for variational inference of latent space")
|
||||
|
||||
def update_gradients_KL(self, variational_posterior):
|
||||
"""
|
||||
updates the gradients for mean and variance **in place**
|
||||
"""
|
||||
raise NotImplementedError, "override this for variational inference of latent space"
|
||||
raise NotImplementedError("override this for variational inference of latent space")
|
||||
|
||||
class NormalPrior(VariationalPrior):
|
||||
def KL_divergence(self, variational_posterior):
|
||||
|
|
|
|||
|
|
@ -2,11 +2,11 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
from gp import GP
|
||||
from parameterization.param import Param
|
||||
from .gp import GP
|
||||
from .parameterization.param import Param
|
||||
from ..inference.latent_function_inference import var_dtc
|
||||
from .. import likelihoods
|
||||
from parameterization.variational import VariationalPosterior, NormalPosterior
|
||||
from .parameterization.variational import VariationalPosterior, NormalPosterior
|
||||
from ..util.linalg import mdot
|
||||
|
||||
import logging
|
||||
|
|
@ -47,8 +47,8 @@ class SparseGP(GP):
|
|||
inference_method = var_dtc.VarDTC(limit=1 if not self.missing_data else Y.shape[1])
|
||||
else:
|
||||
#inference_method = ??
|
||||
raise NotImplementedError, "what to do what to do?"
|
||||
print "defaulting to ", inference_method, "for latent function inference"
|
||||
raise NotImplementedError("what to do what to do?")
|
||||
print("defaulting to ", inference_method, "for latent function inference")
|
||||
|
||||
self.Z = Param('inducing inputs', Z)
|
||||
self.num_inducing = Z.shape[0]
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
from sparse_gp import SparseGP
|
||||
from .sparse_gp import SparseGP
|
||||
from numpy.linalg.linalg import LinAlgError
|
||||
from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch
|
||||
|
||||
|
|
@ -56,7 +56,7 @@ class SparseGP_MPI(SparseGP):
|
|||
self.N_range = (N_start, N_end)
|
||||
self.N_list = np.array(N_list)
|
||||
self.Y_local = self.Y[N_start:N_end]
|
||||
print 'MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range)
|
||||
print('MPI RANK '+str(self.mpi_comm.rank)+' with the data range '+str(self.N_range))
|
||||
mpi_comm.Bcast(self.param_array, root=0)
|
||||
self.update_model(True)
|
||||
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
import numpy as np
|
||||
from ..util import choleskies
|
||||
from sparse_gp import SparseGP
|
||||
from parameterization.param import Param
|
||||
from .sparse_gp import SparseGP
|
||||
from .parameterization.param import Param
|
||||
from ..inference.latent_function_inference import SVGP as svgp_inf
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -223,7 +223,7 @@ class Symbolic_core():
|
|||
|
||||
def code_gradients_cacheable(self, function, variable):
|
||||
if variable not in self.cacheable:
|
||||
raise RuntimeError, variable + ' must be a cacheable.'
|
||||
raise RuntimeError(variable + ' must be a cacheable.')
|
||||
lcode = 'gradients_' + variable + ' = np.zeros_like(' + variable + ')\n'
|
||||
lcode += 'self.update_cache(' + ', '.join(self.cacheable) + ')\n'
|
||||
for i, theta in enumerate(self.variables[variable]):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2012-2014, Max Zwiessele.
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
from __future__ import print_function
|
||||
import numpy as np
|
||||
import sys
|
||||
import time
|
||||
|
|
@ -73,8 +73,8 @@ class VerboseOptimization(object):
|
|||
#self.progress.add_class('box-flex1')
|
||||
else:
|
||||
self.exps = exponents(self.fnow, self.current_gradient)
|
||||
print 'Running {} Code:'.format(self.opt_name)
|
||||
print ' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters)
|
||||
print('Running {} Code:'.format(self.opt_name))
|
||||
print(' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "secs", mi=self.len_maxiters))
|
||||
|
||||
def __enter__(self):
|
||||
self.start = time.time()
|
||||
|
|
@ -116,11 +116,11 @@ class VerboseOptimization(object):
|
|||
b = np.any(n_exps < self.exps)
|
||||
if a or b:
|
||||
self.p_iter = self.iteration
|
||||
print ''
|
||||
print('')
|
||||
if b:
|
||||
self.exps = n_exps
|
||||
print '\r',
|
||||
print '{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
|
||||
print('\r', end=' ')
|
||||
print('{3:> 7.2g} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), time.time()-self.start, mi=self.len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
|
||||
sys.stdout.flush()
|
||||
|
||||
def print_status(self, me, which=None):
|
||||
|
|
@ -149,9 +149,9 @@ class VerboseOptimization(object):
|
|||
self.print_out()
|
||||
|
||||
if not self.ipython_notebook:
|
||||
print ''
|
||||
print 'Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start)
|
||||
print 'Optimization status: {0:s}'.format(self.status)
|
||||
print
|
||||
print()
|
||||
print('Optimization finished in {0:.5g} Seconds'.format(self.stop-self.start))
|
||||
print('Optimization status: {0}'.format(self.status))
|
||||
print()
|
||||
elif self.clear:
|
||||
self.hor_align.close()
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import classification
|
||||
import regression
|
||||
import dimensionality_reduction
|
||||
import non_gaussian
|
||||
from . import classification
|
||||
from . import regression
|
||||
from . import dimensionality_reduction
|
||||
from . import non_gaussian
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ def oil(num_inducing=50, max_iters=100, kernel=None, optimize=True, plot=True):
|
|||
|
||||
"""
|
||||
try:import pods
|
||||
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
data = pods.datasets.oil()
|
||||
X = data['X']
|
||||
Xtest = data['Xtest']
|
||||
|
|
@ -52,7 +52,7 @@ def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True):
|
|||
"""
|
||||
|
||||
try:import pods
|
||||
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
data = pods.datasets.toy_linear_1d_classification(seed=seed)
|
||||
Y = data['Y'][:, 0:1]
|
||||
Y[Y.flatten() == -1] = 0
|
||||
|
|
@ -75,7 +75,7 @@ def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True):
|
|||
m.plot_f(ax=axes[0])
|
||||
m.plot(ax=axes[1])
|
||||
|
||||
print m
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=True):
|
||||
|
|
@ -88,7 +88,7 @@ def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=
|
|||
"""
|
||||
|
||||
try:import pods
|
||||
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
data = pods.datasets.toy_linear_1d_classification(seed=seed)
|
||||
Y = data['Y'][:, 0:1]
|
||||
Y[Y.flatten() == -1] = 0
|
||||
|
|
@ -114,7 +114,7 @@ def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=
|
|||
m.plot_f(ax=axes[0])
|
||||
m.plot(ax=axes[1])
|
||||
|
||||
print m
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, optimize=True, plot=True):
|
||||
|
|
@ -127,7 +127,7 @@ def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, opti
|
|||
"""
|
||||
|
||||
try:import pods
|
||||
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
data = pods.datasets.toy_linear_1d_classification(seed=seed)
|
||||
Y = data['Y'][:, 0:1]
|
||||
Y[Y.flatten() == -1] = 0
|
||||
|
|
@ -147,7 +147,7 @@ def sparse_toy_linear_1d_classification(num_inducing=10, seed=default_seed, opti
|
|||
m.plot_f(ax=axes[0])
|
||||
m.plot(ax=axes[1])
|
||||
|
||||
print m
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
|
||||
|
|
@ -160,7 +160,7 @@ def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
|
|||
"""
|
||||
|
||||
try:import pods
|
||||
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
data = pods.datasets.toy_linear_1d_classification(seed=seed)
|
||||
Y = data['Y'][:, 0:1]
|
||||
Y[Y.flatten() == -1] = 0
|
||||
|
|
@ -177,7 +177,7 @@ def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
|
|||
# Parameters optimization:
|
||||
for _ in range(5):
|
||||
m.optimize(max_iters=int(max_iters/5))
|
||||
print m
|
||||
print(m)
|
||||
|
||||
# Plot
|
||||
if plot:
|
||||
|
|
@ -186,7 +186,7 @@ def toy_heaviside(seed=default_seed, max_iters=100, optimize=True, plot=True):
|
|||
m.plot_f(ax=axes[0])
|
||||
m.plot(ax=axes[1])
|
||||
|
||||
print m
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=None, optimize=True, plot=True):
|
||||
|
|
@ -202,7 +202,7 @@ def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=
|
|||
:type kernel: a GPy kernel
|
||||
"""
|
||||
try:import pods
|
||||
except ImportError:print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
data = pods.datasets.crescent_data(seed=seed)
|
||||
Y = data['Y']
|
||||
Y[Y.flatten()==-1] = 0
|
||||
|
|
@ -224,5 +224,5 @@ def crescent_data(model_type='Full', num_inducing=10, seed=default_seed, kernel=
|
|||
if plot:
|
||||
m.plot()
|
||||
|
||||
print m
|
||||
print(m)
|
||||
return m
|
||||
|
|
|
|||
|
|
@ -333,7 +333,7 @@ def bgplvm_simulation(optimize=True, verbose=1,
|
|||
m.likelihood.variance = .1
|
||||
|
||||
if optimize:
|
||||
print "Optimizing model:"
|
||||
print("Optimizing model:")
|
||||
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
|
||||
gtol=.05)
|
||||
if plot:
|
||||
|
|
@ -358,7 +358,7 @@ def ssgplvm_simulation(optimize=True, verbose=1,
|
|||
m.likelihood.variance = .1
|
||||
|
||||
if optimize:
|
||||
print "Optimizing model:"
|
||||
print("Optimizing model:")
|
||||
m.optimize('scg', messages=verbose, max_iters=max_iters,
|
||||
gtol=.05)
|
||||
if plot:
|
||||
|
|
@ -388,7 +388,7 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1,
|
|||
m.Yreal = Y
|
||||
|
||||
if optimize:
|
||||
print "Optimizing model:"
|
||||
print("Optimizing model:")
|
||||
m.optimize('bfgs', messages=verbose, max_iters=max_iters,
|
||||
gtol=.05)
|
||||
if plot:
|
||||
|
|
@ -411,7 +411,7 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw):
|
|||
m['.*noise'] = [Y.var() / 40. for Y in Ylist]
|
||||
|
||||
if optimize:
|
||||
print "Optimizing Model:"
|
||||
print("Optimizing Model:")
|
||||
m.optimize(messages=verbose, max_iters=8e3)
|
||||
if plot:
|
||||
m.X.plot("MRD Latent Space 1D")
|
||||
|
|
@ -439,7 +439,7 @@ def mrd_simulation_missing_data(optimize=True, verbose=True, plot=True, plot_sim
|
|||
initx="random", initz='permute', **kw)
|
||||
|
||||
if optimize:
|
||||
print "Optimizing Model:"
|
||||
print("Optimizing Model:")
|
||||
m.optimize('bfgs', messages=verbose, max_iters=8e3, gtol=.1)
|
||||
if plot:
|
||||
m.X.plot("MRD Latent Space 1D")
|
||||
|
|
@ -603,7 +603,7 @@ def stick_bgplvm(model=None, optimize=True, verbose=True, plot=True):
|
|||
try:
|
||||
if optimize: m.optimize('bfgs', messages=verbose, max_iters=5e3, bfgs_factor=10)
|
||||
except KeyboardInterrupt:
|
||||
print "Keyboard interrupt, continuing to plot and return"
|
||||
print("Keyboard interrupt, continuing to plot and return")
|
||||
|
||||
if plot:
|
||||
fig, (latent_axes, sense_axes) = plt.subplots(1, 2)
|
||||
|
|
@ -653,7 +653,7 @@ def ssgplvm_simulation_linear():
|
|||
def sample_X(Q, pi):
|
||||
x = np.empty(Q)
|
||||
dies = np.random.rand(Q)
|
||||
for q in xrange(Q):
|
||||
for q in range(Q):
|
||||
if dies[q] < pi:
|
||||
x[q] = np.random.randn()
|
||||
else:
|
||||
|
|
@ -663,7 +663,7 @@ def ssgplvm_simulation_linear():
|
|||
Y = np.empty((N, D))
|
||||
X = np.empty((N, Q))
|
||||
# Generate data from random sampled weight matrices
|
||||
for n in xrange(N):
|
||||
for n in range(N):
|
||||
X[n] = sample_X(Q, pi)
|
||||
w = np.random.randn(D, Q)
|
||||
Y[n] = np.dot(w, X[n])
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ def student_t_approx(optimize=True, plot=True):
|
|||
|
||||
#Add student t random noise to datapoints
|
||||
deg_free = 1
|
||||
print "Real noise: ", real_std
|
||||
print("Real noise: ", real_std)
|
||||
initial_var_guess = 0.5
|
||||
edited_real_sd = initial_var_guess
|
||||
|
||||
|
|
@ -73,7 +73,7 @@ def student_t_approx(optimize=True, plot=True):
|
|||
m4['.*t_scale2'].constrain_bounded(1e-6, 10.)
|
||||
m4['.*white'].constrain_fixed(1e-5)
|
||||
m4.randomize()
|
||||
print m4
|
||||
print(m4)
|
||||
debug=True
|
||||
if debug:
|
||||
m4.optimize(messages=1)
|
||||
|
|
@ -81,18 +81,18 @@ def student_t_approx(optimize=True, plot=True):
|
|||
pb.plot(m4.X, m4.inference_method.f_hat)
|
||||
pb.plot(m4.X, m4.Y, 'rx')
|
||||
m4.plot()
|
||||
print m4
|
||||
print(m4)
|
||||
return m4
|
||||
|
||||
if optimize:
|
||||
optimizer='scg'
|
||||
print "Clean Gaussian"
|
||||
print("Clean Gaussian")
|
||||
m1.optimize(optimizer, messages=1)
|
||||
print "Corrupt Gaussian"
|
||||
print("Corrupt Gaussian")
|
||||
m2.optimize(optimizer, messages=1)
|
||||
print "Clean student t"
|
||||
print("Clean student t")
|
||||
m3.optimize(optimizer, messages=1)
|
||||
print "Corrupt student t"
|
||||
print("Corrupt student t")
|
||||
m4.optimize(optimizer, messages=1)
|
||||
|
||||
if plot:
|
||||
|
|
@ -151,7 +151,7 @@ def boston_example(optimize=True, plot=True):
|
|||
|
||||
for n, (train, test) in enumerate(kf):
|
||||
X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]
|
||||
print "Fold {}".format(n)
|
||||
print("Fold {}".format(n))
|
||||
|
||||
noise = 1e-1 #np.exp(-2)
|
||||
rbf_len = 0.5
|
||||
|
|
@ -163,21 +163,21 @@ def boston_example(optimize=True, plot=True):
|
|||
score_folds[0, n] = rmse(Y_test, np.mean(Y_train))
|
||||
|
||||
#Gaussian GP
|
||||
print "Gauss GP"
|
||||
print("Gauss GP")
|
||||
mgp = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelgp.copy())
|
||||
mgp.constrain_fixed('.*white', 1e-5)
|
||||
mgp['.*len'] = rbf_len
|
||||
mgp['.*noise'] = noise
|
||||
print mgp
|
||||
print(mgp)
|
||||
if optimize:
|
||||
mgp.optimize(optimizer=optimizer, messages=messages)
|
||||
Y_test_pred = mgp.predict(X_test)
|
||||
score_folds[1, n] = rmse(Y_test, Y_test_pred[0])
|
||||
pred_density[1, n] = np.mean(mgp.log_predictive_density(X_test, Y_test))
|
||||
print mgp
|
||||
print pred_density
|
||||
print(mgp)
|
||||
print(pred_density)
|
||||
|
||||
print "Gaussian Laplace GP"
|
||||
print("Gaussian Laplace GP")
|
||||
N, D = Y_train.shape
|
||||
g_distribution = GPy.likelihoods.noise_model_constructors.gaussian(variance=noise, N=N, D=D)
|
||||
g_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), g_distribution)
|
||||
|
|
@ -186,18 +186,18 @@ def boston_example(optimize=True, plot=True):
|
|||
mg.constrain_fixed('.*white', 1e-5)
|
||||
mg['rbf_len'] = rbf_len
|
||||
mg['noise'] = noise
|
||||
print mg
|
||||
print(mg)
|
||||
if optimize:
|
||||
mg.optimize(optimizer=optimizer, messages=messages)
|
||||
Y_test_pred = mg.predict(X_test)
|
||||
score_folds[2, n] = rmse(Y_test, Y_test_pred[0])
|
||||
pred_density[2, n] = np.mean(mg.log_predictive_density(X_test, Y_test))
|
||||
print pred_density
|
||||
print mg
|
||||
print(pred_density)
|
||||
print(mg)
|
||||
|
||||
for stu_num, df in enumerate(degrees_freedoms):
|
||||
#Student T
|
||||
print "Student-T GP {}df".format(df)
|
||||
print("Student-T GP {}df".format(df))
|
||||
t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=df, sigma2=noise)
|
||||
stu_t_likelihood = GPy.likelihoods.Laplace(Y_train.copy(), t_distribution)
|
||||
mstu_t = GPy.models.GPRegression(X_train.copy(), Y_train.copy(), kernel=kernelstu.copy(), likelihood=stu_t_likelihood)
|
||||
|
|
@ -205,14 +205,14 @@ def boston_example(optimize=True, plot=True):
|
|||
mstu_t.constrain_bounded('.*t_scale2', 0.0001, 1000)
|
||||
mstu_t['rbf_len'] = rbf_len
|
||||
mstu_t['.*t_scale2'] = noise
|
||||
print mstu_t
|
||||
print(mstu_t)
|
||||
if optimize:
|
||||
mstu_t.optimize(optimizer=optimizer, messages=messages)
|
||||
Y_test_pred = mstu_t.predict(X_test)
|
||||
score_folds[3+stu_num, n] = rmse(Y_test, Y_test_pred[0])
|
||||
pred_density[3+stu_num, n] = np.mean(mstu_t.log_predictive_density(X_test, Y_test))
|
||||
print pred_density
|
||||
print mstu_t
|
||||
print(pred_density)
|
||||
print(mstu_t)
|
||||
|
||||
if plot:
|
||||
plt.figure()
|
||||
|
|
@ -230,8 +230,8 @@ def boston_example(optimize=True, plot=True):
|
|||
plt.scatter(X_test[:, data_axis_plot], Y_test, c='r', marker='x')
|
||||
plt.title('Stu t {}df'.format(df))
|
||||
|
||||
print "Average scores: {}".format(np.mean(score_folds, 1))
|
||||
print "Average pred density: {}".format(np.mean(pred_density, 1))
|
||||
print("Average scores: {}".format(np.mean(score_folds, 1)))
|
||||
print("Average pred density: {}".format(np.mean(pred_density, 1)))
|
||||
|
||||
if plot:
|
||||
#Plotting
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ def olympic_marathon_men(optimize=True, plot=True):
|
|||
"""Run a standard Gaussian process regression on the Olympic marathon data."""
|
||||
try:import pods
|
||||
except ImportError:
|
||||
print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
return
|
||||
data = pods.datasets.olympic_marathon_men()
|
||||
|
||||
|
|
@ -88,7 +88,7 @@ def epomeo_gpx(max_iters=200, optimize=True, plot=True):
|
|||
"""
|
||||
try:import pods
|
||||
except ImportError:
|
||||
print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
return
|
||||
data = pods.datasets.epomeo_gpx()
|
||||
num_data_list = []
|
||||
|
|
@ -135,7 +135,7 @@ def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=1000
|
|||
|
||||
try:import pods
|
||||
except ImportError:
|
||||
print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
return
|
||||
data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number)
|
||||
# data['Y'] = data['Y'][0::2, :]
|
||||
|
|
@ -219,7 +219,7 @@ def olympic_100m_men(optimize=True, plot=True):
|
|||
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
|
||||
try:import pods
|
||||
except ImportError:
|
||||
print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
return
|
||||
data = pods.datasets.olympic_100m_men()
|
||||
|
||||
|
|
@ -240,7 +240,7 @@ def toy_rbf_1d(optimize=True, plot=True):
|
|||
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
|
||||
try:import pods
|
||||
except ImportError:
|
||||
print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
return
|
||||
data = pods.datasets.toy_rbf_1d()
|
||||
|
||||
|
|
@ -258,7 +258,7 @@ def toy_rbf_1d_50(optimize=True, plot=True):
|
|||
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
|
||||
try:import pods
|
||||
except ImportError:
|
||||
print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
return
|
||||
data = pods.datasets.toy_rbf_1d_50()
|
||||
|
||||
|
|
@ -377,7 +377,7 @@ def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
|
|||
"""Predict the location of a robot given wirelss signal strength readings."""
|
||||
try:import pods
|
||||
except ImportError:
|
||||
print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
return
|
||||
data = pods.datasets.robot_wireless()
|
||||
|
||||
|
|
@ -398,14 +398,14 @@ def robot_wireless(max_iters=100, kernel=None, optimize=True, plot=True):
|
|||
|
||||
sse = ((data['Xtest'] - Xpredict)**2).sum()
|
||||
|
||||
print('Sum of squares error on test data: ' + str(sse))
|
||||
print(('Sum of squares error on test data: ' + str(sse)))
|
||||
return m
|
||||
|
||||
def silhouette(max_iters=100, optimize=True, plot=True):
|
||||
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
|
||||
try:import pods
|
||||
except ImportError:
|
||||
print 'pods unavailable, see https://github.com/sods/ods for example datasets'
|
||||
print('pods unavailable, see https://github.com/sods/ods for example datasets')
|
||||
return
|
||||
data = pods.datasets.silhouette()
|
||||
|
||||
|
|
@ -416,7 +416,7 @@ def silhouette(max_iters=100, optimize=True, plot=True):
|
|||
if optimize:
|
||||
m.optimize(messages=True, max_iters=max_iters)
|
||||
|
||||
print m
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def sparse_GP_regression_1D(num_samples=400, num_inducing=5, max_iters=100, optimize=True, plot=True, checkgrad=False):
|
||||
|
|
@ -468,7 +468,7 @@ def sparse_GP_regression_2D(num_samples=400, num_inducing=50, max_iters=100, opt
|
|||
if plot:
|
||||
m.plot()
|
||||
|
||||
print m
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
|
||||
|
|
@ -492,7 +492,7 @@ def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
|
|||
if plot:
|
||||
m.plot(ax=axes[0])
|
||||
axes[0].set_title('no input uncertainty')
|
||||
print m
|
||||
print(m)
|
||||
|
||||
# the same Model with uncertainty
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel=GPy.kern.RBF(1), Z=Z, X_variance=S)
|
||||
|
|
@ -503,7 +503,7 @@ def uncertain_inputs_sparse_regression(max_iters=200, optimize=True, plot=True):
|
|||
axes[1].set_title('with input uncertainty')
|
||||
fig.canvas.draw()
|
||||
|
||||
print m
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def simple_mean_function(max_iters=100, optimize=True, plot=True):
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
import latent_function_inference
|
||||
import optimization
|
||||
import mcmc
|
||||
from . import latent_function_inference
|
||||
from . import optimization
|
||||
from . import mcmc
|
||||
|
|
|
|||
|
|
@ -61,15 +61,15 @@ class InferenceMethodList(LatentFunctionInference, list):
|
|||
for inf in state:
|
||||
self.append(inf)
|
||||
|
||||
from exact_gaussian_inference import ExactGaussianInference
|
||||
from laplace import Laplace, LaplaceBlock
|
||||
from .exact_gaussian_inference import ExactGaussianInference
|
||||
from .laplace import Laplace,LaplaceBlock
|
||||
from GPy.inference.latent_function_inference.var_dtc import VarDTC
|
||||
from expectation_propagation import EP
|
||||
from expectation_propagation_dtc import EPDTC
|
||||
from dtc import DTC
|
||||
from fitc import FITC
|
||||
from var_dtc_parallel import VarDTC_minibatch
|
||||
from svgp import SVGP
|
||||
from .expectation_propagation import EP
|
||||
from .expectation_propagation_dtc import EPDTC
|
||||
from .dtc import DTC
|
||||
from .fitc import FITC
|
||||
from .var_dtc_parallel import VarDTC_minibatch
|
||||
from .svgp import SVGP
|
||||
|
||||
# class FullLatentFunctionData(object):
|
||||
#
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2012-2014, James Hensman
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from posterior import Posterior
|
||||
from .posterior import Posterior
|
||||
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
|
||||
import numpy as np
|
||||
from . import LatentFunctionInference
|
||||
|
|
@ -30,7 +30,7 @@ class DTC(LatentFunctionInference):
|
|||
#make sure the noise is not hetero
|
||||
beta = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if beta.size > 1:
|
||||
raise NotImplementedError, "no hetero noise with this implementation of DTC"
|
||||
raise NotImplementedError("no hetero noise with this implementation of DTC")
|
||||
|
||||
Kmm = kern.K(Z)
|
||||
Knn = kern.Kdiag(X)
|
||||
|
|
@ -99,7 +99,7 @@ class vDTC(object):
|
|||
#make sure the noise is not hetero
|
||||
beta = 1./likelihood.gaussian_variance(Y_metadata)
|
||||
if beta.size > 1:
|
||||
raise NotImplementedError, "no hetero noise with this implementation of DTC"
|
||||
raise NotImplementedError("no hetero noise with this implementation of DTC")
|
||||
|
||||
Kmm = kern.K(Z)
|
||||
Knn = kern.Kdiag(X)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from posterior import Posterior
|
||||
from .posterior import Posterior
|
||||
from ...util.linalg import pdinv, dpotrs, tdot
|
||||
from ...util import diag
|
||||
import numpy as np
|
||||
|
|
@ -64,3 +64,17 @@ class ExactGaussianInference(LatentFunctionInference):
|
|||
dL_dthetaL = likelihood.exact_inference_gradients(np.diag(dL_dK),Y_metadata)
|
||||
|
||||
return Posterior(woodbury_chol=LW, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL, 'dL_dm':alpha}
|
||||
|
||||
def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None):
|
||||
"""
|
||||
Leave one out error as found in
|
||||
"Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models"
|
||||
Vehtari et al. 2014.
|
||||
"""
|
||||
g = posterior.woodbury_vector
|
||||
c = posterior.woodbury_inv
|
||||
c_diag = np.diag(c)[:, None]
|
||||
neg_log_marginal_LOO = 0.5*np.log(2*np.pi) - 0.5*np.log(c_diag) + 0.5*(g**2)/c_diag
|
||||
#believe from Predictive Approaches for Choosing Hyperparameters in Gaussian Processes
|
||||
#this is the negative marginal LOO
|
||||
return -neg_log_marginal_LOO
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
import numpy as np
|
||||
from ...util.linalg import pdinv,jitchol,DSYR,tdot,dtrtrs, dpotrs
|
||||
from posterior import Posterior
|
||||
from .posterior import Posterior
|
||||
from . import LatentFunctionInference
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ from ...util import diag
|
|||
from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify, DSYR
|
||||
from ...core.parameterization.variational import VariationalPosterior
|
||||
from . import LatentFunctionInference
|
||||
from posterior import Posterior
|
||||
from .posterior import Posterior
|
||||
log_2_pi = np.log(2*np.pi)
|
||||
|
||||
class EPDTC(LatentFunctionInference):
|
||||
|
|
@ -180,7 +180,7 @@ class EPDTC(LatentFunctionInference):
|
|||
if VVT_factor.shape[1] == Y.shape[1]:
|
||||
woodbury_vector = Cpsi1Vf # == Cpsi1V
|
||||
else:
|
||||
print 'foobar'
|
||||
print('foobar')
|
||||
psi1V = np.dot(mu_tilde[:,None].T*beta, psi1).T
|
||||
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
|
||||
tmp, _ = dpotrs(LB, tmp, lower=1)
|
||||
|
|
@ -315,7 +315,7 @@ def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf,
|
|||
dL_dR = None
|
||||
elif het_noise:
|
||||
if uncertain_inputs:
|
||||
raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented"
|
||||
raise NotImplementedError("heteroscedatic derivates with uncertain inputs not implemented")
|
||||
else:
|
||||
#from ...util.linalg import chol_inv
|
||||
#LBi = chol_inv(LB)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2012, James Hensman
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from posterior import Posterior
|
||||
from .posterior import Posterior
|
||||
from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv
|
||||
from ...util import diag
|
||||
import numpy as np
|
||||
|
|
@ -27,7 +27,7 @@ class FITC(LatentFunctionInference):
|
|||
#make sure the noise is not hetero
|
||||
sigma_n = likelihood.gaussian_variance(Y_metadata)
|
||||
if sigma_n.size >1:
|
||||
raise NotImplementedError, "no hetero noise with this implementation of FITC"
|
||||
raise NotImplementedError("no hetero noise with this implementation of FITC")
|
||||
|
||||
Kmm = kern.K(Z)
|
||||
Knn = kern.Kdiag(X)
|
||||
|
|
|
|||
|
|
@ -12,13 +12,14 @@
|
|||
|
||||
import numpy as np
|
||||
from ...util.linalg import mdot, jitchol, dpotrs, dtrtrs, dpotri, symmetrify, pdinv
|
||||
from posterior import Posterior
|
||||
from .posterior import Posterior
|
||||
import warnings
|
||||
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
|
||||
return ' %s:%s: %s:%s\n' % (filename, lineno, category.__name__, message)
|
||||
warnings.formatwarning = warning_on_one_line
|
||||
from scipy import optimize
|
||||
from . import LatentFunctionInference
|
||||
from scipy.integrate import quad
|
||||
|
||||
class Laplace(LatentFunctionInference):
|
||||
|
||||
|
|
@ -39,6 +40,85 @@ class Laplace(LatentFunctionInference):
|
|||
self.first_run = True
|
||||
self._previous_Ki_fhat = None
|
||||
|
||||
def LOO(self, kern, X, Y, likelihood, posterior, Y_metadata=None, K=None, f_hat=None, W=None, Ki_W_i=None):
|
||||
"""
|
||||
Leave one out log predictive density as found in
|
||||
"Bayesian leave-one-out cross-validation approximations for Gaussian latent variable models"
|
||||
Vehtari et al. 2014.
|
||||
"""
|
||||
Ki_f_init = np.zeros_like(Y)
|
||||
|
||||
if K is None:
|
||||
K = kern.K(X)
|
||||
|
||||
if f_hat is None:
|
||||
f_hat, _ = self.rasm_mode(K, Y, likelihood, Ki_f_init, Y_metadata=Y_metadata)
|
||||
|
||||
if W is None:
|
||||
W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata)
|
||||
|
||||
if Ki_W_i is None:
|
||||
_, _, _, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave)
|
||||
|
||||
logpdf_dfhat = likelihood.dlogpdf_df(f_hat, Y, Y_metadata=Y_metadata)
|
||||
|
||||
if W.shape[1] == 1:
|
||||
W = np.diagflat(W)
|
||||
|
||||
#Eq 14, and 16
|
||||
var_site = 1./np.diag(W)[:, None]
|
||||
mu_site = f_hat + var_site*logpdf_dfhat
|
||||
prec_site = 1./var_site
|
||||
#Eq 19
|
||||
marginal_cov = Ki_W_i
|
||||
marginal_mu = marginal_cov.dot(np.diagflat(prec_site)).dot(mu_site)
|
||||
marginal_var = np.diag(marginal_cov)[:, None]
|
||||
#Eq 30 with using site parameters instead of Gaussian site parameters
|
||||
#(var_site instead of sigma^{2} )
|
||||
posterior_cav_var = 1./(1./marginal_var - 1./var_site)
|
||||
posterior_cav_mean = posterior_cav_var*((1./marginal_var)*marginal_mu - (1./var_site)*Y)
|
||||
|
||||
flat_y = Y.flatten()
|
||||
flat_mu = posterior_cav_mean.flatten()
|
||||
flat_var = posterior_cav_var.flatten()
|
||||
|
||||
if Y_metadata is not None:
|
||||
#Need to zip individual elements of Y_metadata aswell
|
||||
Y_metadata_flat = {}
|
||||
if Y_metadata is not None:
|
||||
for key, val in Y_metadata.items():
|
||||
Y_metadata_flat[key] = np.atleast_1d(val).reshape(-1, 1)
|
||||
|
||||
zipped_values = []
|
||||
|
||||
for i in range(Y.shape[0]):
|
||||
y_m = {}
|
||||
for key, val in Y_metadata_flat.items():
|
||||
if np.isscalar(val) or val.shape[0] == 1:
|
||||
y_m[key] = val
|
||||
else:
|
||||
#Won't broadcast yet
|
||||
y_m[key] = val[i]
|
||||
zipped_values.append((flat_y[i], flat_mu[i], flat_var[i], y_m))
|
||||
else:
|
||||
#Otherwise just pass along None's
|
||||
zipped_values = zip(flat_y, flat_mu, flat_var, [None]*Y.shape[0])
|
||||
|
||||
def integral_generator(yi, mi, vi, yi_m):
|
||||
def f(fi_star):
|
||||
#More stable in the log space
|
||||
p_fi = np.exp(likelihood.logpdf(fi_star, yi, yi_m)
|
||||
- 0.5*np.log(2*np.pi*vi)
|
||||
- 0.5*np.square(mi-fi_star)/vi)
|
||||
return p_fi
|
||||
return f
|
||||
|
||||
#Eq 30
|
||||
p_ystar, _ = zip(*[quad(integral_generator(y, m, v, yi_m), -np.inf, np.inf)
|
||||
for y, m, v, yi_m in zipped_values])
|
||||
p_ystar = np.array(p_ystar).reshape(-1, 1)
|
||||
return np.log(p_ystar)
|
||||
|
||||
def inference(self, kern, X, likelihood, Y, mean_function=None, Y_metadata=None):
|
||||
"""
|
||||
Returns a Posterior class containing essential quantities of the posterior
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class Posterior(object):
|
|||
or ((mean is not None) and (cov is not None)):
|
||||
pass # we have sufficient to compute the posterior
|
||||
else:
|
||||
raise ValueError, "insufficient information to compute the posterior"
|
||||
raise ValueError("insufficient information to compute the posterior")
|
||||
|
||||
self._K_chol = K_chol
|
||||
self._K = K
|
||||
|
|
@ -108,7 +108,7 @@ class Posterior(object):
|
|||
if self._precision is None:
|
||||
cov = np.atleast_3d(self.covariance)
|
||||
self._precision = np.zeros(cov.shape) # if one covariance per dimension
|
||||
for p in xrange(cov.shape[-1]):
|
||||
for p in range(cov.shape[-1]):
|
||||
self._precision[:,:,p] = pdinv(cov[:,:,p])[0]
|
||||
return self._precision
|
||||
|
||||
|
|
@ -126,7 +126,7 @@ class Posterior(object):
|
|||
if self._woodbury_inv is not None:
|
||||
winv = np.atleast_3d(self._woodbury_inv)
|
||||
self._woodbury_chol = np.zeros(winv.shape)
|
||||
for p in xrange(winv.shape[-1]):
|
||||
for p in range(winv.shape[-1]):
|
||||
self._woodbury_chol[:,:,p] = pdinv(winv[:,:,p])[2]
|
||||
#Li = jitchol(self._woodbury_inv)
|
||||
#self._woodbury_chol, _ = dtrtri(Li)
|
||||
|
|
@ -135,13 +135,13 @@ class Posterior(object):
|
|||
#self._woodbury_chol = jitchol(W)
|
||||
#try computing woodbury chol from cov
|
||||
elif self._covariance is not None:
|
||||
raise NotImplementedError, "TODO: check code here"
|
||||
raise NotImplementedError("TODO: check code here")
|
||||
B = self._K - self._covariance
|
||||
tmp, _ = dpotrs(self.K_chol, B)
|
||||
self._woodbury_inv, _ = dpotrs(self.K_chol, tmp.T)
|
||||
_, _, self._woodbury_chol, _ = pdinv(self._woodbury_inv)
|
||||
else:
|
||||
raise ValueError, "insufficient information to compute posterior"
|
||||
raise ValueError("insufficient information to compute posterior")
|
||||
return self._woodbury_chol
|
||||
|
||||
@property
|
||||
|
|
@ -161,7 +161,7 @@ class Posterior(object):
|
|||
elif self._covariance is not None:
|
||||
B = np.atleast_3d(self._K) - np.atleast_3d(self._covariance)
|
||||
self._woodbury_inv = np.empty_like(B)
|
||||
for i in xrange(B.shape[-1]):
|
||||
for i in range(B.shape[-1]):
|
||||
tmp, _ = dpotrs(self.K_chol, B[:,:,i])
|
||||
self._woodbury_inv[:,:,i], _ = dpotrs(self.K_chol, tmp.T)
|
||||
return self._woodbury_inv
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ from . import LatentFunctionInference
|
|||
from ...util import linalg
|
||||
from ...util import choleskies
|
||||
import numpy as np
|
||||
from posterior import Posterior
|
||||
from .posterior import Posterior
|
||||
|
||||
class SVGP(LatentFunctionInference):
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from posterior import Posterior
|
||||
from .posterior import Posterior
|
||||
from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify
|
||||
from ...util import diag
|
||||
from ...core.parameterization.variational import VariationalPosterior
|
||||
|
|
@ -170,7 +170,7 @@ class VarDTC(LatentFunctionInference):
|
|||
if VVT_factor.shape[1] == Y.shape[1]:
|
||||
woodbury_vector = Cpsi1Vf # == Cpsi1V
|
||||
else:
|
||||
print 'foobar'
|
||||
print('foobar')
|
||||
import ipdb; ipdb.set_trace()
|
||||
psi1V = np.dot(Y.T*beta, psi1).T
|
||||
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
|
||||
|
|
@ -213,7 +213,7 @@ def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf,
|
|||
dL_dR = None
|
||||
elif het_noise:
|
||||
if uncertain_inputs:
|
||||
raise NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented"
|
||||
raise NotImplementedError("heteroscedatic derivates with uncertain inputs not implemented")
|
||||
else:
|
||||
#from ...util.linalg import chol_inv
|
||||
#LBi = chol_inv(LB)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2014, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from posterior import Posterior
|
||||
from .posterior import Posterior
|
||||
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri,pdinv
|
||||
from ...util import diag
|
||||
from ...core.parameterization.variational import VariationalPosterior
|
||||
|
|
@ -92,7 +92,7 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
psi0_full = 0.
|
||||
YRY_full = 0.
|
||||
|
||||
for n_start in xrange(0,num_data,batchsize):
|
||||
for n_start in range(0,num_data,batchsize):
|
||||
n_end = min(batchsize+n_start, num_data)
|
||||
if batchsize==num_data:
|
||||
Y_slice = Y
|
||||
|
|
@ -170,7 +170,7 @@ class VarDTC_minibatch(LatentFunctionInference):
|
|||
Kmm = kern.K(Z).copy()
|
||||
diag.add(Kmm, self.const_jitter)
|
||||
if not np.isfinite(Kmm).all():
|
||||
print Kmm
|
||||
print(Kmm)
|
||||
Lm = jitchol(Kmm)
|
||||
|
||||
LmInvPsi2LmInvT = backsub_both_sides(Lm,psi2_full,transpose='right')
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
from hmc import HMC
|
||||
from .hmc import HMC
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class HMC:
|
|||
:rtype: numpy.ndarray
|
||||
"""
|
||||
params = np.empty((num_samples,self.p.size))
|
||||
for i in xrange(num_samples):
|
||||
for i in range(num_samples):
|
||||
self.p[:] = np.random.multivariate_normal(np.zeros(self.p.size),self.M)
|
||||
H_old = self._computeH()
|
||||
theta_old = self.model.optimizer_array.copy()
|
||||
|
|
@ -59,7 +59,7 @@ class HMC:
|
|||
return params
|
||||
|
||||
def _update(self, hmc_iters):
|
||||
for i in xrange(hmc_iters):
|
||||
for i in range(hmc_iters):
|
||||
self.p[:] += -self.stepsize/2.*self.model._transform_gradients(self.model.objective_function_gradients())
|
||||
self.model.optimizer_array = self.model.optimizer_array + self.stepsize*np.dot(self.Minv, self.p)
|
||||
self.p[:] += -self.stepsize/2.*self.model._transform_gradients(self.model.objective_function_gradients())
|
||||
|
|
@ -82,7 +82,7 @@ class HMC_shortcut:
|
|||
|
||||
def sample(self, m_iters=1000, hmc_iters=20):
|
||||
params = np.empty((m_iters,self.p.size))
|
||||
for i in xrange(m_iters):
|
||||
for i in range(m_iters):
|
||||
# sample a stepsize from the uniform distribution
|
||||
stepsize = np.exp(np.random.rand()*(self.stepsize_range[1]-self.stepsize_range[0])+self.stepsize_range[0])
|
||||
self.p[:] = np.random.multivariate_normal(np.zeros(self.p.size),self.M)
|
||||
|
|
|
|||
|
|
@ -9,7 +9,13 @@ import sys
|
|||
import re
|
||||
import numdifftools as ndt
|
||||
import pdb
|
||||
import cPickle
|
||||
|
||||
try:
|
||||
#In Python 2, cPickle is faster. It does not exist in Python 3 but the underlying code is always used
|
||||
#if available
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
|
||||
class Metropolis_Hastings:
|
||||
|
|
@ -40,7 +46,7 @@ class Metropolis_Hastings:
|
|||
fcurrent = self.model.log_likelihood() + self.model.log_prior()
|
||||
accepted = np.zeros(Ntotal,dtype=np.bool)
|
||||
for it in range(Ntotal):
|
||||
print "sample %d of %d\r"%(it,Ntotal),
|
||||
print("sample %d of %d\r"%(it,Ntotal), end=' ')
|
||||
sys.stdout.flush()
|
||||
prop = np.random.multivariate_normal(current, self.cov*self.scale*self.scale)
|
||||
self.model._set_params_transformed(prop)
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
from scg import SCG
|
||||
from optimization import *
|
||||
from .scg import SCG
|
||||
from .optimization import *
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2012-2014, Max Zwiessele
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from gradient_descent_update_rules import FletcherReeves, \
|
||||
from .gradient_descent_update_rules import FletcherReeves, \
|
||||
PolakRibiere
|
||||
from Queue import Empty
|
||||
from multiprocessing import Value
|
||||
|
|
@ -74,7 +74,7 @@ class _Async_Optimization(Thread):
|
|||
if self.outq is not None:
|
||||
self.outq.put(self.SENTINEL)
|
||||
if self.messages:
|
||||
print ""
|
||||
print("")
|
||||
self.runsignal.clear()
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
|
|
@ -213,7 +213,7 @@ class Async_Optimize(object):
|
|||
# # print "^C"
|
||||
# self.runsignal.clear()
|
||||
# c.join()
|
||||
print "WARNING: callback still running, optimisation done!"
|
||||
print("WARNING: callback still running, optimisation done!")
|
||||
return p.result
|
||||
|
||||
class CGD(Async_Optimize):
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ try:
|
|||
rasm_available = True
|
||||
except ImportError:
|
||||
rasm_available = False
|
||||
from scg import SCG
|
||||
from .scg import SCG
|
||||
|
||||
class Optimizer():
|
||||
"""
|
||||
|
|
@ -54,7 +54,7 @@ class Optimizer():
|
|||
self.time = str(end - start)
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
raise NotImplementedError, "this needs to be implemented to use the optimizer class"
|
||||
raise NotImplementedError("this needs to be implemented to use the optimizer class")
|
||||
|
||||
def plot(self):
|
||||
"""
|
||||
|
|
@ -125,9 +125,9 @@ class opt_lbfgsb(Optimizer):
|
|||
|
||||
opt_dict = {}
|
||||
if self.xtol is not None:
|
||||
print "WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it"
|
||||
print("WARNING: l-bfgs-b doesn't have an xtol arg, so I'm going to ignore it")
|
||||
if self.ftol is not None:
|
||||
print "WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it"
|
||||
print("WARNING: l-bfgs-b doesn't have an ftol arg, so I'm going to ignore it")
|
||||
if self.gtol is not None:
|
||||
opt_dict['pgtol'] = self.gtol
|
||||
if self.bfgs_factor is not None:
|
||||
|
|
@ -140,6 +140,10 @@ class opt_lbfgsb(Optimizer):
|
|||
self.funct_eval = opt_result[2]['funcalls']
|
||||
self.status = rcstrings[opt_result[2]['warnflag']]
|
||||
|
||||
#a more helpful error message is available in opt_result in the Error case
|
||||
if opt_result[2]['warnflag']==2:
|
||||
self.status = 'Error' + opt_result[2]['task']
|
||||
|
||||
class opt_simplex(Optimizer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
Optimizer.__init__(self, *args, **kwargs)
|
||||
|
|
@ -158,7 +162,7 @@ class opt_simplex(Optimizer):
|
|||
if self.ftol is not None:
|
||||
opt_dict['ftol'] = self.ftol
|
||||
if self.gtol is not None:
|
||||
print "WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it"
|
||||
print("WARNING: simplex doesn't have an gtol arg, so I'm going to ignore it")
|
||||
|
||||
opt_result = optimize.fmin(f, self.x_init, (), disp=self.messages,
|
||||
maxfun=self.max_f_eval, full_output=True, **opt_dict)
|
||||
|
|
@ -186,11 +190,11 @@ class opt_rasm(Optimizer):
|
|||
|
||||
opt_dict = {}
|
||||
if self.xtol is not None:
|
||||
print "WARNING: minimize doesn't have an xtol arg, so I'm going to ignore it"
|
||||
print("WARNING: minimize doesn't have an xtol arg, so I'm going to ignore it")
|
||||
if self.ftol is not None:
|
||||
print "WARNING: minimize doesn't have an ftol arg, so I'm going to ignore it"
|
||||
print("WARNING: minimize doesn't have an ftol arg, so I'm going to ignore it")
|
||||
if self.gtol is not None:
|
||||
print "WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it"
|
||||
print("WARNING: minimize doesn't have an gtol arg, so I'm going to ignore it")
|
||||
|
||||
opt_result = rasm.minimize(self.x_init, f_fp, (), messages=self.messages,
|
||||
maxnumfuneval=self.max_f_eval)
|
||||
|
|
|
|||
|
|
@ -21,14 +21,13 @@
|
|||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
# POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
from __future__ import print_function
|
||||
import numpy as np
|
||||
import sys
|
||||
|
||||
|
||||
def print_out(len_maxiters, fnow, current_grad, beta, iteration):
|
||||
print '\r',
|
||||
print '{0:>0{mi}g} {1:> 12e} {2:< 12.6e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
|
||||
print('\r', end=' ')
|
||||
print('{0:>0{mi}g} {1:> 12e} {2:< 12.6e} {3:> 12e}'.format(iteration, float(fnow), float(beta), float(current_grad), mi=len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
|
||||
sys.stdout.flush()
|
||||
|
||||
def exponents(fnow, current_grad):
|
||||
|
|
@ -80,7 +79,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True,
|
|||
|
||||
len_maxiters = len(str(maxiters))
|
||||
if display:
|
||||
print ' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters)
|
||||
print(' {0:{mi}s} {1:11s} {2:11s} {3:11s}'.format("I", "F", "Scale", "|g|", mi=len_maxiters))
|
||||
exps = exponents(fnow, current_grad)
|
||||
p_iter = iteration
|
||||
|
||||
|
|
@ -140,7 +139,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True,
|
|||
b = np.any(n_exps < exps)
|
||||
if a or b:
|
||||
p_iter = iteration
|
||||
print ''
|
||||
print('')
|
||||
if b:
|
||||
exps = n_exps
|
||||
|
||||
|
|
@ -189,6 +188,6 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True,
|
|||
|
||||
if display:
|
||||
print_out(len_maxiters, fnow, current_grad, beta, iteration)
|
||||
print ""
|
||||
print status
|
||||
print("")
|
||||
print(status)
|
||||
return x, flog, function_eval, status
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ class SparseGPMissing(StochasticStorage):
|
|||
Thus, we can just make sure the loop goes over self.d every
|
||||
time.
|
||||
"""
|
||||
self.d = xrange(model.Y_normalized.shape[1])
|
||||
self.d = range(model.Y_normalized.shape[1])
|
||||
|
||||
class SparseGPStochastics(StochasticStorage):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,23 +1,23 @@
|
|||
from _src.kern import Kern
|
||||
from _src.rbf import RBF
|
||||
from _src.linear import Linear, LinearFull
|
||||
from _src.static import Bias, White, Fixed
|
||||
from _src.brownian import Brownian
|
||||
from _src.stationary import Exponential, OU, Matern32, Matern52, ExpQuad, RatQuad, Cosine
|
||||
from _src.mlp import MLP
|
||||
from _src.periodic import PeriodicExponential, PeriodicMatern32, PeriodicMatern52
|
||||
from _src.independent_outputs import IndependentOutputs, Hierarchical
|
||||
from _src.coregionalize import Coregionalize
|
||||
from _src.ODE_UY import ODE_UY
|
||||
from _src.ODE_UYC import ODE_UYC
|
||||
from _src.ODE_st import ODE_st
|
||||
from _src.ODE_t import ODE_t
|
||||
from _src.poly import Poly
|
||||
from _src.eq_ode2 import EQ_ODE2
|
||||
from ._src.kern import Kern
|
||||
from ._src.rbf import RBF
|
||||
from ._src.linear import Linear, LinearFull
|
||||
from ._src.static import Bias, White, Fixed
|
||||
from ._src.brownian import Brownian
|
||||
from ._src.stationary import Exponential, OU, Matern32, Matern52, ExpQuad, RatQuad, Cosine
|
||||
from ._src.mlp import MLP
|
||||
from ._src.periodic import PeriodicExponential, PeriodicMatern32, PeriodicMatern52
|
||||
from ._src.independent_outputs import IndependentOutputs, Hierarchical
|
||||
from ._src.coregionalize import Coregionalize
|
||||
from ._src.ODE_UY import ODE_UY
|
||||
from ._src.ODE_UYC import ODE_UYC
|
||||
from ._src.ODE_st import ODE_st
|
||||
from ._src.ODE_t import ODE_t
|
||||
from ._src.poly import Poly
|
||||
from ._src.eq_ode2 import EQ_ODE2
|
||||
from ._src.trunclinear import TruncLinear,TruncLinear_inf
|
||||
from ._src.splitKern import SplitKern,DEtime
|
||||
from ._src.splitKern import DEtime as DiffGenomeKern
|
||||
|
||||
from _src.trunclinear import TruncLinear,TruncLinear_inf
|
||||
from _src.splitKern import SplitKern,DEtime
|
||||
from _src.splitKern import DEtime as DiffGenomeKern
|
||||
|
||||
from _src.basis_funcs import LinearSlopeBasisFuncKernel, BasisFuncKernel, ChangePointBasisFuncKernel, DomainKernel
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
from independent_outputs import index_to_slices
|
||||
from .independent_outputs import index_to_slices
|
||||
|
||||
class ODE_UY(Kern):
|
||||
def __init__(self, input_dim, variance_U=3., variance_Y=1., lengthscale_U=1., lengthscale_Y=1., active_dims=None, name='ode_uy'):
|
||||
|
|
@ -114,7 +114,7 @@ class ODE_UY(Kern):
|
|||
elif i==1:
|
||||
Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
|
||||
else:
|
||||
raise ValueError, "invalid input/output index"
|
||||
raise ValueError("invalid input/output index")
|
||||
#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag
|
||||
#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag
|
||||
return Kdiag
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
from independent_outputs import index_to_slices
|
||||
from .independent_outputs import index_to_slices
|
||||
|
||||
class ODE_UYC(Kern):
|
||||
def __init__(self, input_dim, variance_U=3., variance_Y=1., lengthscale_U=1., lengthscale_Y=1., ubias =1. ,active_dims=None, name='ode_uyc'):
|
||||
|
|
@ -115,7 +115,7 @@ class ODE_UYC(Kern):
|
|||
elif i==1:
|
||||
Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
|
||||
else:
|
||||
raise ValueError, "invalid input/output index"
|
||||
raise ValueError("invalid input/output index")
|
||||
#Kdiag[slices[0][0]]+= self.variance_U #matern32 diag
|
||||
#Kdiag[slices[1][0]]+= self.variance_U*self.variance_Y*(k1+k2+k3) # diag
|
||||
return Kdiag
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
from independent_outputs import index_to_slices
|
||||
from .independent_outputs import index_to_slices
|
||||
|
||||
|
||||
class ODE_st(Kern):
|
||||
|
|
@ -135,7 +135,7 @@ class ODE_st(Kern):
|
|||
Kdiag[s1]+= b**2*k1 - 2*a*c*k2 + a**2*k3 + c**2*vyt*vyx
|
||||
#Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
|
||||
else:
|
||||
raise ValueError, "invalid input/output index"
|
||||
raise ValueError("invalid input/output index")
|
||||
|
||||
return Kdiag
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
from independent_outputs import index_to_slices
|
||||
from .independent_outputs import index_to_slices
|
||||
|
||||
|
||||
class ODE_t(Kern):
|
||||
|
|
@ -85,7 +85,7 @@ class ODE_t(Kern):
|
|||
Kdiag[s1]+= k1 + vyt+self.ubias
|
||||
#Kdiag[s1]+= Vu*Vy*(k1+k2+k3)
|
||||
else:
|
||||
raise ValueError, "invalid input/output index"
|
||||
raise ValueError("invalid input/output index")
|
||||
|
||||
return Kdiag
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,8 @@
|
|||
import numpy as np
|
||||
import itertools
|
||||
from ...util.caching import Cache_this
|
||||
from kern import CombinationKernel
|
||||
from .kern import CombinationKernel
|
||||
from functools import reduce
|
||||
|
||||
class Add(CombinationKernel):
|
||||
"""
|
||||
|
|
@ -84,10 +85,10 @@ class Add(CombinationKernel):
|
|||
psi2 = reduce(np.add, (p.psi2(Z, variational_posterior) for p in self.parts))
|
||||
#return psi2
|
||||
# compute the "cross" terms
|
||||
from static import White, Bias
|
||||
from rbf import RBF
|
||||
from .static import White, Bias
|
||||
from .rbf import RBF
|
||||
#from rbf_inv import RBFInv
|
||||
from linear import Linear
|
||||
from .linear import Linear
|
||||
#ffrom fixed import Fixed
|
||||
|
||||
for p1, p2 in itertools.combinations(self.parts, 2):
|
||||
|
|
@ -111,11 +112,11 @@ class Add(CombinationKernel):
|
|||
psi2 += np.einsum('nm,no->mo',tmp1,tmp2)+np.einsum('nm,no->mo',tmp2,tmp1)
|
||||
#(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :])
|
||||
else:
|
||||
raise NotImplementedError, "psi2 cannot be computed for this kernel"
|
||||
raise NotImplementedError("psi2 cannot be computed for this kernel")
|
||||
return psi2
|
||||
|
||||
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
from static import White, Bias
|
||||
from .static import White, Bias
|
||||
for p1 in self.parts:
|
||||
#compute the effective dL_dpsi1. Extra terms appear becaue of the cross terms in psi2!
|
||||
eff_dL_dpsi1 = dL_dpsi1.copy()
|
||||
|
|
@ -131,7 +132,7 @@ class Add(CombinationKernel):
|
|||
p1.update_gradients_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
|
||||
|
||||
def gradients_Z_expectations(self, dL_psi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
from static import White, Bias
|
||||
from .static import White, Bias
|
||||
target = np.zeros(Z.shape)
|
||||
for p1 in self.parts:
|
||||
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
|
||||
|
|
@ -149,7 +150,7 @@ class Add(CombinationKernel):
|
|||
return target
|
||||
|
||||
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
|
||||
from static import White, Bias
|
||||
from .static import White, Bias
|
||||
target_grads = [np.zeros(v.shape) for v in variational_posterior.parameters]
|
||||
for p1 in self.parameters:
|
||||
#compute the effective dL_dpsi1. extra terms appear becaue of the cross terms in psi2!
|
||||
|
|
@ -164,7 +165,7 @@ class Add(CombinationKernel):
|
|||
else:
|
||||
eff_dL_dpsi1 += dL_dpsi2.sum(0) * p2.psi1(Z, variational_posterior) * 2.
|
||||
grads = p1.gradients_qX_expectations(dL_dpsi0, eff_dL_dpsi1, dL_dpsi2, Z, variational_posterior)
|
||||
[np.add(target_grads[i],grads[i],target_grads[i]) for i in xrange(len(grads))]
|
||||
[np.add(target_grads[i],grads[i],target_grads[i]) for i in range(len(grads))]
|
||||
return target_grads
|
||||
|
||||
def add(self, other):
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
|
|
|
|||
|
|
@ -1,13 +1,17 @@
|
|||
# Copyright (c) 2012, James Hensman and Ricardo Andrade
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
import numpy as np
|
||||
from scipy import weave
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from ...util.config import config # for assesing whether to use weave
|
||||
|
||||
try:
|
||||
from scipy import weave
|
||||
except ImportError:
|
||||
config.set('weave', 'working', 'False')
|
||||
|
||||
class Coregionalize(Kern):
|
||||
"""
|
||||
Covariance function for intrinsic/linear coregionalization models
|
||||
|
|
@ -61,7 +65,7 @@ class Coregionalize(Kern):
|
|||
try:
|
||||
return self._K_weave(X, X2)
|
||||
except:
|
||||
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
|
||||
print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n")
|
||||
config.set('weave', 'working', 'False')
|
||||
return self._K_numpy(X, X2)
|
||||
else:
|
||||
|
|
@ -123,7 +127,7 @@ class Coregionalize(Kern):
|
|||
try:
|
||||
dL_dK_small = self._gradient_reduce_weave(dL_dK, index, index2)
|
||||
except:
|
||||
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
|
||||
print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n")
|
||||
config.set('weave', 'working', 'False')
|
||||
dL_dK_small = self._gradient_reduce_weave(dL_dK, index, index2)
|
||||
else:
|
||||
|
|
@ -162,7 +166,7 @@ class Coregionalize(Kern):
|
|||
|
||||
def update_gradients_diag(self, dL_dKdiag, X):
|
||||
index = np.asarray(X, dtype=np.int).flatten()
|
||||
dL_dKdiag_small = np.array([dL_dKdiag[index==i].sum() for i in xrange(self.output_dim)])
|
||||
dL_dKdiag_small = np.array([dL_dKdiag[index==i].sum() for i in range(self.output_dim)])
|
||||
self.W.gradient = 2.*self.W*dL_dKdiag_small[:, None]
|
||||
self.kappa.gradient = dL_dKdiag_small
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
import numpy as np
|
||||
from scipy.special import wofz
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from ...util.caching import Cache_this
|
||||
|
|
|
|||
|
|
@ -2,13 +2,13 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
from kern import Kern, CombinationKernel
|
||||
from .kern import Kern, CombinationKernel
|
||||
import numpy as np
|
||||
import itertools
|
||||
|
||||
def index_to_slices(index):
|
||||
"""
|
||||
take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index.
|
||||
take a numpy array of integers (index) and return a nested list of slices such that the slices describe the start, stop points for each integer in the index.
|
||||
|
||||
e.g.
|
||||
>>> index = np.asarray([0,0,0,1,1,1,2,2,2])
|
||||
|
|
@ -79,10 +79,10 @@ class IndependentOutputs(CombinationKernel):
|
|||
|
||||
def update_gradients_full(self,dL_dK,X,X2=None):
|
||||
slices = index_to_slices(X[:,self.index_dim])
|
||||
if self.single_kern:
|
||||
if self.single_kern:
|
||||
target = np.zeros(self.kern.size)
|
||||
kerns = itertools.repeat(self.kern)
|
||||
else:
|
||||
else:
|
||||
kerns = self.kern
|
||||
target = [np.zeros(kern.size) for kern, _ in zip(kerns, slices)]
|
||||
def collate_grads(kern, i, dL, X, X2):
|
||||
|
|
@ -94,20 +94,24 @@ class IndependentOutputs(CombinationKernel):
|
|||
else:
|
||||
slices2 = index_to_slices(X2[:,self.index_dim])
|
||||
[[[collate_grads(kern, i, dL_dK[s,s2],X[s],X2[s2]) for s in slices_i] for s2 in slices_j] for i,(kern,slices_i,slices_j) in enumerate(zip(kerns,slices,slices2))]
|
||||
if self.single_kern: kern.gradient = target
|
||||
else:[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))]
|
||||
if self.single_kern:
|
||||
self.kern.gradient = target
|
||||
else:
|
||||
[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))]
|
||||
|
||||
def gradients_X(self,dL_dK, X, X2=None):
|
||||
target = np.zeros(X.shape)
|
||||
kerns = itertools.repeat(self.kern) if self.single_kern else self.kern
|
||||
if X2 is None:
|
||||
# TODO: make use of index_to_slices
|
||||
# FIXME: Broken as X is already sliced out
|
||||
print "Warning, gradients_X may not be working, I believe X has already been sliced out by the slicer!"
|
||||
values = np.unique(X[:,self.index_dim])
|
||||
slices = [X[:,self.index_dim]==i for i in values]
|
||||
[target.__setitem__(s, kern.gradients_X(dL_dK[s,s],X[s],None))
|
||||
for kern, s in zip(kerns, slices)]
|
||||
#slices = index_to_slices(X[:,self.index_dim])
|
||||
#[[np.add(target[s], kern.gradients_X(dL_dK[s,s], X[s]), out=target[s])
|
||||
#[[np.add(target[s], kern.gradients_X(dL_dK[s,s], X[s]), out=target[s])
|
||||
# for s in slices_i] for kern, slices_i in zip(kerns, slices)]
|
||||
#import ipdb;ipdb.set_trace()
|
||||
#[[(np.add(target[s ], kern.gradients_X(dL_dK[s ,ss],X[s ], X[ss]), out=target[s ]),
|
||||
|
|
@ -142,7 +146,7 @@ class IndependentOutputs(CombinationKernel):
|
|||
if self.single_kern: target[:] += kern.gradient
|
||||
else: target[i][:] += kern.gradient
|
||||
[[collate_grads(kern, i, dL_dKdiag[s], X[s,:]) for s in slices_i] for i, (kern, slices_i) in enumerate(zip(kerns, slices))]
|
||||
if self.single_kern: kern.gradient = target
|
||||
if self.single_kern: self.kern.gradient = target
|
||||
else:[kern.gradient.__setitem__(Ellipsis, target[i]) for i, [kern, _] in enumerate(zip(kerns, slices))]
|
||||
|
||||
class Hierarchical(CombinationKernel):
|
||||
|
|
|
|||
|
|
@ -4,17 +4,20 @@
|
|||
import sys
|
||||
import numpy as np
|
||||
from ...core.parameterization.parameterized import Parameterized
|
||||
from kernel_slice_operations import KernCallsViaSlicerMeta
|
||||
from .kernel_slice_operations import KernCallsViaSlicerMeta
|
||||
from ...util.caching import Cache_this
|
||||
from GPy.core.parameterization.observable_array import ObsAr
|
||||
from functools import reduce
|
||||
import six
|
||||
|
||||
|
||||
|
||||
@six.add_metaclass(KernCallsViaSlicerMeta)
|
||||
class Kern(Parameterized):
|
||||
#===========================================================================
|
||||
# This adds input slice support. The rather ugly code for slicing can be
|
||||
# found in kernel_slice_operations
|
||||
__metaclass__ = KernCallsViaSlicerMeta
|
||||
# __meataclass__ is ignored in Python 3 - needs to be put in the function definiton
|
||||
#__metaclass__ = KernCallsViaSlicerMeta
|
||||
#Here, we use the Python module six to support Py3 and Py2 simultaneously
|
||||
#===========================================================================
|
||||
_support_GPU=False
|
||||
def __init__(self, input_dim, active_dims, name, useGPU=False, *a, **kw):
|
||||
|
|
@ -178,7 +181,7 @@ class Kern(Parameterized):
|
|||
|
||||
"""
|
||||
assert isinstance(other, Kern), "only kernels can be added to kernels..."
|
||||
from add import Add
|
||||
from .add import Add
|
||||
return Add([self, other], name=name)
|
||||
|
||||
def __mul__(self, other):
|
||||
|
|
@ -210,7 +213,7 @@ class Kern(Parameterized):
|
|||
|
||||
"""
|
||||
assert isinstance(other, Kern), "only kernels can be multiplied to kernels..."
|
||||
from prod import Prod
|
||||
from .prod import Prod
|
||||
#kernels = []
|
||||
#if isinstance(self, Prod): kernels.extend(self.parameters)
|
||||
#else: kernels.append(self)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...util.linalg import tdot
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
import numpy as np
|
||||
|
|
|
|||
|
|
@ -3,11 +3,12 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...util.linalg import mdot
|
||||
from ...util.decorators import silence_errors
|
||||
from ...core.parameterization.param import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from functools import reduce
|
||||
|
||||
class Periodic(Kern):
|
||||
def __init__(self, input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name):
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
class Poly(Kern):
|
||||
|
|
|
|||
|
|
@ -2,9 +2,10 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
from kern import CombinationKernel
|
||||
from .kern import CombinationKernel
|
||||
from ...util.caching import Cache_this
|
||||
import itertools
|
||||
from functools import reduce
|
||||
|
||||
|
||||
def numpy_invalid_op_as_exception(func):
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@
|
|||
from ....core.parameterization.parameter_core import Pickleable
|
||||
from GPy.util.caching import Cache_this
|
||||
from ....core.parameterization import variational
|
||||
import rbf_psi_comp
|
||||
import ssrbf_psi_comp
|
||||
import sslinear_psi_comp
|
||||
import linear_psi_comp
|
||||
from . import rbf_psi_comp
|
||||
from . import ssrbf_psi_comp
|
||||
from . import sslinear_psi_comp
|
||||
from . import linear_psi_comp
|
||||
|
||||
class PSICOMP_RBF(Pickleable):
|
||||
@Cache_this(limit=2, ignore_args=(0,))
|
||||
|
|
@ -17,7 +17,7 @@ class PSICOMP_RBF(Pickleable):
|
|||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||
return ssrbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior)
|
||||
else:
|
||||
raise ValueError, "unknown distriubtion received for psi-statistics"
|
||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||
|
||||
@Cache_this(limit=2, ignore_args=(0,1,2,3))
|
||||
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
|
||||
|
|
@ -26,7 +26,7 @@ class PSICOMP_RBF(Pickleable):
|
|||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||
return ssrbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior)
|
||||
else:
|
||||
raise ValueError, "unknown distriubtion received for psi-statistics"
|
||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||
|
||||
def _setup_observers(self):
|
||||
pass
|
||||
|
|
@ -40,7 +40,7 @@ class PSICOMP_Linear(Pickleable):
|
|||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||
return sslinear_psi_comp.psicomputations(variance, Z, variational_posterior)
|
||||
else:
|
||||
raise ValueError, "unknown distriubtion received for psi-statistics"
|
||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||
|
||||
@Cache_this(limit=2, ignore_args=(0,1,2,3))
|
||||
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior):
|
||||
|
|
@ -49,7 +49,7 @@ class PSICOMP_Linear(Pickleable):
|
|||
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
|
||||
return sslinear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior)
|
||||
else:
|
||||
raise ValueError, "unknown distriubtion received for psi-statistics"
|
||||
raise ValueError("unknown distriubtion received for psi-statistics")
|
||||
|
||||
def _setup_observers(self):
|
||||
pass
|
||||
|
|
@ -3,9 +3,9 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from stationary import Stationary
|
||||
from psi_comp import PSICOMP_RBF
|
||||
from psi_comp.rbf_psi_gpucomp import PSICOMP_RBF_GPU
|
||||
from .stationary import Stationary
|
||||
from .psi_comp import PSICOMP_RBF
|
||||
from .psi_comp.rbf_psi_gpucomp import PSICOMP_RBF_GPU
|
||||
from ...util.config import *
|
||||
|
||||
class RBF(Stationary):
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ A new kernel
|
|||
"""
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern,CombinationKernel
|
||||
from .kern import Kern,CombinationKernel
|
||||
from .independent_outputs import index_to_slices
|
||||
import itertools
|
||||
|
||||
|
|
@ -104,7 +104,7 @@ class SplitKern(CombinationKernel):
|
|||
assert len(slices2)<=2, 'The Split kernel only support two different indices'
|
||||
target = np.zeros((X.shape[0], X2.shape[0]))
|
||||
# diagonal blocks
|
||||
[[target.__setitem__((s,s2), self.kern.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices[i], slices2[i])] for i in xrange(min(len(slices),len(slices2)))]
|
||||
[[target.__setitem__((s,s2), self.kern.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices[i], slices2[i])] for i in range(min(len(slices),len(slices2)))]
|
||||
if len(slices)>1:
|
||||
[target.__setitem__((s,s2), self.kern_cross.K(X[s,:],X2[s2,:])) for s,s2 in itertools.product(slices[1], slices2[0])]
|
||||
if len(slices2)>1:
|
||||
|
|
@ -135,7 +135,7 @@ class SplitKern(CombinationKernel):
|
|||
else:
|
||||
assert dL_dK.shape==(X.shape[0],X2.shape[0])
|
||||
slices2 = index_to_slices(X2[:,self.index_dim])
|
||||
[[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s,s2 in itertools.product(slices[i], slices2[i])] for i in xrange(min(len(slices),len(slices2)))]
|
||||
[[collate_grads(dL_dK[s,s2],X[s],X2[s2]) for s,s2 in itertools.product(slices[i], slices2[i])] for i in range(min(len(slices),len(slices2)))]
|
||||
if len(slices)>1:
|
||||
[collate_grads(dL_dK[s,s2], X[s], X2[s2], True) for s,s2 in itertools.product(slices[1], slices2[0])]
|
||||
if len(slices2)>1:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
import numpy as np
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
|
|
|
|||
|
|
@ -2,16 +2,21 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from ...util.linalg import tdot
|
||||
from ... import util
|
||||
import numpy as np
|
||||
from scipy import integrate, weave
|
||||
from scipy import integrate
|
||||
from ...util.config import config # for assesing whether to use weave
|
||||
from ...util.caching import Cache_this
|
||||
|
||||
try:
|
||||
from scipy import weave
|
||||
except ImportError:
|
||||
config.set('weave', 'working', 'False')
|
||||
|
||||
class Stationary(Kern):
|
||||
"""
|
||||
Stationary kernels (covariance functions).
|
||||
|
|
@ -65,10 +70,10 @@ class Stationary(Kern):
|
|||
self.link_parameters(self.variance, self.lengthscale)
|
||||
|
||||
def K_of_r(self, r):
|
||||
raise NotImplementedError, "implement the covariance function as a fn of r to use this class"
|
||||
raise NotImplementedError("implement the covariance function as a fn of r to use this class")
|
||||
|
||||
def dK_dr(self, r):
|
||||
raise NotImplementedError, "implement derivative of the covariance function wrt r to use this class"
|
||||
raise NotImplementedError("implement derivative of the covariance function wrt r to use this class")
|
||||
|
||||
@Cache_this(limit=5, ignore_args=())
|
||||
def K(self, X, X2=None):
|
||||
|
|
@ -165,11 +170,11 @@ class Stationary(Kern):
|
|||
try:
|
||||
self.lengthscale.gradient = self.weave_lengthscale_grads(tmp, X, X2)
|
||||
except:
|
||||
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
|
||||
print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n")
|
||||
config.set('weave', 'working', 'False')
|
||||
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)])
|
||||
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in range(self.input_dim)])
|
||||
else:
|
||||
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in xrange(self.input_dim)])
|
||||
self.lengthscale.gradient = np.array([np.einsum('ij,ij,...', tmp, np.square(X[:,q:q+1] - X2[:,q:q+1].T), -1./self.lengthscale[q]**3) for q in range(self.input_dim)])
|
||||
else:
|
||||
r = self._scaled_dist(X, X2)
|
||||
self.lengthscale.gradient = -np.sum(dL_dr*r)/self.lengthscale
|
||||
|
|
@ -214,7 +219,7 @@ class Stationary(Kern):
|
|||
try:
|
||||
return self.gradients_X_weave(dL_dK, X, X2)
|
||||
except:
|
||||
print "\n Weave compilation failed. Falling back to (slower) numpy implementation\n"
|
||||
print("\n Weave compilation failed. Falling back to (slower) numpy implementation\n")
|
||||
config.set('weave', 'working', 'False')
|
||||
return self.gradients_X_(dL_dK, X, X2)
|
||||
else:
|
||||
|
|
@ -234,7 +239,7 @@ class Stationary(Kern):
|
|||
|
||||
#the lower memory way with a loop
|
||||
ret = np.empty(X.shape, dtype=np.float64)
|
||||
for q in xrange(self.input_dim):
|
||||
for q in range(self.input_dim):
|
||||
np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=ret[:,q])
|
||||
ret /= self.lengthscale**2
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
# Check Matthew Rocklin's blog post.
|
||||
import sympy as sym
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.symbolic import Symbolic_core
|
||||
|
||||
|
||||
|
|
@ -11,7 +11,7 @@ class Symbolic(Kern, Symbolic_core):
|
|||
def __init__(self, input_dim, k=None, output_dim=1, name='symbolic', parameters=None, active_dims=None, operators=None, func_modules=[]):
|
||||
|
||||
if k is None:
|
||||
raise ValueError, "You must provide an argument for the covariance function."
|
||||
raise ValueError("You must provide an argument for the covariance function.")
|
||||
|
||||
Kern.__init__(self, input_dim, active_dims, name=name)
|
||||
kdiag = k
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
|
||||
import numpy as np
|
||||
from kern import Kern
|
||||
from .kern import Kern
|
||||
from ...core.parameterization import Param
|
||||
from ...core.parameterization.transformations import Logexp
|
||||
from ...util.caching import Cache_this
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
from bernoulli import Bernoulli
|
||||
from exponential import Exponential
|
||||
from gaussian import Gaussian
|
||||
from gamma import Gamma
|
||||
from poisson import Poisson
|
||||
from student_t import StudentT
|
||||
from likelihood import Likelihood
|
||||
from mixed_noise import MixedNoise
|
||||
from binomial import Binomial
|
||||
from .bernoulli import Bernoulli
|
||||
from .exponential import Exponential
|
||||
from .gaussian import Gaussian
|
||||
from .gamma import Gamma
|
||||
from .poisson import Poisson
|
||||
from .student_t import StudentT
|
||||
from .likelihood import Likelihood
|
||||
from .mixed_noise import MixedNoise
|
||||
from .binomial import Binomial
|
||||
|
||||
|
|
|
|||
|
|
@ -3,9 +3,8 @@
|
|||
|
||||
import numpy as np
|
||||
from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf
|
||||
import link_functions
|
||||
from likelihood import Likelihood
|
||||
from scipy import stats
|
||||
from . import link_functions
|
||||
from .likelihood import Likelihood
|
||||
|
||||
class Bernoulli(Likelihood):
|
||||
"""
|
||||
|
|
@ -81,19 +80,18 @@ class Bernoulli(Likelihood):
|
|||
if isinstance(self.gp_link, link_functions.Probit):
|
||||
|
||||
if gh_points is None:
|
||||
gh_x, gh_w = np.polynomial.hermite.hermgauss(20)
|
||||
gh_x, gh_w = self._gh_points()
|
||||
else:
|
||||
gh_x, gh_w = gh_points
|
||||
|
||||
from scipy import stats
|
||||
|
||||
shape = m.shape
|
||||
m,v,Y = m.flatten(), v.flatten(), Y.flatten()
|
||||
Ysign = np.where(Y==1,1,-1)
|
||||
X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + (m*Ysign)[:,None]
|
||||
p = stats.norm.cdf(X)
|
||||
p = std_norm_cdf(X)
|
||||
p = np.clip(p, 1e-9, 1.-1e-9) # for numerical stability
|
||||
N = stats.norm.pdf(X)
|
||||
N = std_norm_pdf(X)
|
||||
F = np.log(p).dot(gh_w)
|
||||
NoverP = N/p
|
||||
dF_dm = (NoverP*Ysign[:,None]).dot(gh_w)
|
||||
|
|
@ -106,10 +104,10 @@ class Bernoulli(Likelihood):
|
|||
def predictive_mean(self, mu, variance, Y_metadata=None):
|
||||
|
||||
if isinstance(self.gp_link, link_functions.Probit):
|
||||
return stats.norm.cdf(mu/np.sqrt(1+variance))
|
||||
return std_norm_cdf(mu/np.sqrt(1+variance))
|
||||
|
||||
elif isinstance(self.gp_link, link_functions.Heaviside):
|
||||
return stats.norm.cdf(mu/np.sqrt(variance))
|
||||
return std_norm_cdf(mu/np.sqrt(variance))
|
||||
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
import numpy as np
|
||||
from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf
|
||||
import link_functions
|
||||
from likelihood import Likelihood
|
||||
from . import link_functions
|
||||
from .likelihood import Likelihood
|
||||
from scipy import special
|
||||
|
||||
class Binomial(Likelihood):
|
||||
|
|
|
|||
|
|
@ -5,8 +5,8 @@
|
|||
import numpy as np
|
||||
from scipy import stats,special
|
||||
import scipy as sp
|
||||
import link_functions
|
||||
from likelihood import Likelihood
|
||||
from . import link_functions
|
||||
from .likelihood import Likelihood
|
||||
|
||||
class Exponential(Likelihood):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ import numpy as np
|
|||
from scipy import stats,special
|
||||
import scipy as sp
|
||||
from ..core.parameterization import Param
|
||||
import link_functions
|
||||
from likelihood import Likelihood
|
||||
from . import link_functions
|
||||
from .likelihood import Likelihood
|
||||
|
||||
class Gamma(Likelihood):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -13,8 +13,8 @@ James 11/12/13
|
|||
|
||||
import numpy as np
|
||||
from scipy import stats, special
|
||||
import link_functions
|
||||
from likelihood import Likelihood
|
||||
from . import link_functions
|
||||
from .likelihood import Likelihood
|
||||
from ..core.parameterization import Param
|
||||
from ..core.parameterization.transformations import Logexp
|
||||
from scipy import stats
|
||||
|
|
@ -35,8 +35,8 @@ class Gaussian(Likelihood):
|
|||
gp_link = link_functions.Identity()
|
||||
|
||||
if not isinstance(gp_link, link_functions.Identity):
|
||||
print "Warning, Exact inference is not implemeted for non-identity link functions,\
|
||||
if you are not already, ensure Laplace inference_method is used"
|
||||
print("Warning, Exact inference is not implemeted for non-identity link functions,\
|
||||
if you are not already, ensure Laplace inference_method is used")
|
||||
|
||||
super(Gaussian, self).__init__(gp_link, name=name)
|
||||
|
||||
|
|
@ -132,10 +132,8 @@ class Gaussian(Likelihood):
|
|||
:returns: log likelihood evaluated for this point
|
||||
:rtype: float
|
||||
"""
|
||||
N = y.shape[0]
|
||||
ln_det_cov = np.log(self.variance)
|
||||
|
||||
return -0.5*((y-link_f)**2/self.variance + ln_det_cov + np.log(2.*np.pi))
|
||||
return -(1.0/(2*self.variance))*((y-link_f)**2) - 0.5*ln_det_cov - 0.5*np.log(2.*np.pi)
|
||||
|
||||
def dlogpdf_dlink(self, link_f, y, Y_metadata=None):
|
||||
"""
|
||||
|
|
@ -220,7 +218,6 @@ class Gaussian(Likelihood):
|
|||
"""
|
||||
e = y - link_f
|
||||
s_4 = 1.0/(self.variance**2)
|
||||
N = y.shape[0]
|
||||
dlik_dsigma = -0.5/self.variance + 0.5*s_4*np.square(e)
|
||||
return dlik_dsigma
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
|
||||
# Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt)
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
from scipy import stats,special
|
||||
import scipy as sp
|
||||
import link_functions
|
||||
from . import link_functions
|
||||
from ..util.misc import chain_1, chain_2, chain_3, blockify_dhess_dtheta, blockify_third, blockify_hessian, safe_exp
|
||||
from scipy.integrate import quad
|
||||
import warnings
|
||||
|
|
@ -70,7 +70,7 @@ class Likelihood(Parameterized):
|
|||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def log_predictive_density(self, y_test, mu_star, var_star):
|
||||
def log_predictive_density(self, y_test, mu_star, var_star, Y_metadata=None):
|
||||
"""
|
||||
Calculation of the log predictive density
|
||||
|
||||
|
|
@ -87,15 +87,51 @@ class Likelihood(Parameterized):
|
|||
assert y_test.shape==mu_star.shape
|
||||
assert y_test.shape==var_star.shape
|
||||
assert y_test.shape[1] == 1
|
||||
def integral_generator(y, m, v):
|
||||
"""Generate a function which can be integrated to give p(Y*|Y) = int p(Y*|f*)p(f*|Y) df*"""
|
||||
def f(f_star):
|
||||
return self.pdf(f_star, y)*np.exp(-(1./(2*v))*np.square(m-f_star))
|
||||
|
||||
flat_y_test = y_test.flatten()
|
||||
flat_mu_star = mu_star.flatten()
|
||||
flat_var_star = var_star.flatten()
|
||||
|
||||
if Y_metadata is not None:
|
||||
#Need to zip individual elements of Y_metadata aswell
|
||||
Y_metadata_flat = {}
|
||||
if Y_metadata is not None:
|
||||
for key, val in Y_metadata.items():
|
||||
Y_metadata_flat[key] = np.atleast_1d(val).reshape(-1,1)
|
||||
|
||||
zipped_values = []
|
||||
|
||||
for i in range(y_test.shape[0]):
|
||||
y_m = {}
|
||||
for key, val in Y_metadata_flat.items():
|
||||
if np.isscalar(val) or val.shape[0] == 1:
|
||||
y_m[key] = val
|
||||
else:
|
||||
#Won't broadcast yet
|
||||
y_m[key] = val[i]
|
||||
zipped_values.append((flat_y_test[i], flat_mu_star[i], flat_var_star[i], y_m))
|
||||
else:
|
||||
#Otherwise just pass along None's
|
||||
zipped_values = zip(flat_y_test, flat_mu_star, flat_var_star, [None]*y_test.shape[0])
|
||||
|
||||
def integral_generator(yi, mi, vi, yi_m):
|
||||
"""Generate a function which can be integrated
|
||||
to give p(Y*|Y) = int p(Y*|f*)p(f*|Y) df*"""
|
||||
def f(fi_star):
|
||||
#exponent = np.exp(-(1./(2*v))*np.square(m-f_star))
|
||||
#from GPy.util.misc import safe_exp
|
||||
#exponent = safe_exp(exponent)
|
||||
#return self.pdf(f_star, y, y_m)*exponent
|
||||
|
||||
#More stable in the log space
|
||||
return np.exp(self.logpdf(fi_star, yi, yi_m)
|
||||
- 0.5*np.log(2*np.pi*vi)
|
||||
- 0.5*np.square(mi-fi_star)/vi)
|
||||
return f
|
||||
|
||||
scaled_p_ystar, accuracy = zip(*[quad(integral_generator(y, m, v), -np.inf, np.inf) for y, m, v in zip(y_test.flatten(), mu_star.flatten(), var_star.flatten())])
|
||||
scaled_p_ystar = np.array(scaled_p_ystar).reshape(-1,1)
|
||||
p_ystar = scaled_p_ystar/np.sqrt(2*np.pi*var_star)
|
||||
p_ystar, _ = zip(*[quad(integral_generator(yi, mi, vi, yi_m), -np.inf, np.inf)
|
||||
for yi, mi, vi, yi_m in zipped_values])
|
||||
p_ystar = np.array(p_ystar).reshape(-1, 1)
|
||||
return np.log(p_ystar)
|
||||
|
||||
def _moments_match_ep(self,obs,tau,v):
|
||||
|
|
@ -132,6 +168,13 @@ class Likelihood(Parameterized):
|
|||
|
||||
return z, mean, variance
|
||||
|
||||
#only compute gh points if required
|
||||
__gh_points = None
|
||||
def _gh_points(self):
|
||||
if self.__gh_points is None:
|
||||
self.__gh_points = np.polynomial.hermite.hermgauss(20)
|
||||
return self.__gh_points
|
||||
|
||||
def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):
|
||||
"""
|
||||
Use Gauss-Hermite Quadrature to compute
|
||||
|
|
@ -144,10 +187,9 @@ class Likelihood(Parameterized):
|
|||
|
||||
if no gh_points are passed, we construct them using defualt options
|
||||
"""
|
||||
#May be broken
|
||||
|
||||
if gh_points is None:
|
||||
gh_x, gh_w = np.polynomial.hermite.hermgauss(20)
|
||||
gh_x, gh_w = self._gh_points()
|
||||
else:
|
||||
gh_x, gh_w = gh_points
|
||||
|
||||
|
|
@ -215,8 +257,8 @@ class Likelihood(Parameterized):
|
|||
return mean
|
||||
|
||||
def _conditional_mean(self, f):
|
||||
"""Quadrature calculation of the conditional mean: E(Y_star|f_star)"""
|
||||
raise NotImplementedError, "implement this function to make predictions"
|
||||
"""Quadrature calculation of the conditional mean: E(Y_star|f)"""
|
||||
raise NotImplementedError("implement this function to make predictions")
|
||||
|
||||
def predictive_variance(self, mu,variance, predictive_mean=None, Y_metadata=None):
|
||||
"""
|
||||
|
|
@ -506,9 +548,9 @@ class Likelihood(Parameterized):
|
|||
|
||||
#Parameters are stacked vertically. Must be listed in same order as 'get_param_names'
|
||||
# ensure we have gradients for every parameter we want to optimize
|
||||
assert dlogpdf_dtheta.shape[0] == self.size #f, d x num_param array
|
||||
assert dlogpdf_df_dtheta.shape[0] == self.size #f x d x num_param matrix or just f x num_param
|
||||
assert d2logpdf_df2_dtheta.shape[0] == self.size #f x num_param matrix or f x d x num_param matrix, f x f x num_param or f x f x d x num_param
|
||||
assert dlogpdf_dtheta.shape[0] == self.size #num_param array x f, d
|
||||
assert dlogpdf_df_dtheta.shape[0] == self.size #num_param x f x d x matrix or just num_param x f
|
||||
assert d2logpdf_df2_dtheta.shape[0] == self.size #num_param x f matrix or num_param x f x d x matrix, num_param x f x f or num_param x f x f x d
|
||||
|
||||
return dlogpdf_dtheta, dlogpdf_df_dtheta, d2logpdf_df2_dtheta
|
||||
|
||||
|
|
@ -565,7 +607,7 @@ class Likelihood(Parameterized):
|
|||
:param burnin: number of samples to use for burnin (will need modifying)
|
||||
:param Y_metadata: Y_metadata for pdf
|
||||
"""
|
||||
print "Warning, using MCMC for sampling y*, needs to be tuned!"
|
||||
print("Warning, using MCMC for sampling y*, needs to be tuned!")
|
||||
if starting_loc is None:
|
||||
starting_loc = fNew
|
||||
from functools import partial
|
||||
|
|
@ -619,8 +661,8 @@ class Likelihood(Parameterized):
|
|||
|
||||
#Show progress
|
||||
if i % int((burn_in+num_samples)*0.1) == 0:
|
||||
print "{}% of samples taken ({})".format((i/int((burn_in+num_samples)*0.1)*10), i)
|
||||
print "Last run accept ratio: ", accept_ratio[i]
|
||||
print("{}% of samples taken ({})".format((i/int((burn_in+num_samples)*0.1)*10), i))
|
||||
print("Last run accept ratio: ", accept_ratio[i])
|
||||
|
||||
print "Average accept ratio: ", np.mean(accept_ratio)
|
||||
print("Average accept ratio: ", np.mean(accept_ratio))
|
||||
return chain_values
|
||||
|
|
|
|||
|
|
@ -1,13 +1,10 @@
|
|||
# Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
|
||||
# Copyright (c) 2012-2015 The GPy authors (see AUTHORS.txt)
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
import numpy as np
|
||||
from scipy import stats
|
||||
from ..util.univariate_Gaussian import std_norm_cdf, std_norm_pdf
|
||||
import scipy as sp
|
||||
from GPy.util.univariate_Gaussian import std_norm_pdf,std_norm_cdf,inv_std_norm_cdf
|
||||
|
||||
_exp_lim_val = np.finfo(np.float64).max
|
||||
_lim_val = np.log(_exp_lim_val)
|
||||
from ..util.misc import safe_exp, safe_square, safe_cube, safe_quad, safe_three_times
|
||||
|
||||
class GPTransformation(object):
|
||||
"""
|
||||
|
|
@ -79,13 +76,10 @@ class Probit(GPTransformation):
|
|||
return std_norm_pdf(f)
|
||||
|
||||
def d2transf_df2(self,f):
|
||||
#FIXME
|
||||
return -f * std_norm_pdf(f)
|
||||
|
||||
def d3transf_df3(self,f):
|
||||
#FIXME
|
||||
f2 = f**2
|
||||
return -(1/(np.sqrt(2*np.pi)))*np.exp(-0.5*(f2))*(1-f2)
|
||||
return (safe_square(f)-1.)*std_norm_pdf(f)
|
||||
|
||||
|
||||
class Cloglog(GPTransformation):
|
||||
|
|
@ -98,22 +92,26 @@ class Cloglog(GPTransformation):
|
|||
or
|
||||
|
||||
f = \log (-\log(1-p))
|
||||
|
||||
|
||||
"""
|
||||
def transf(self,f):
|
||||
return 1-np.exp(-np.exp(f))
|
||||
ef = safe_exp(f)
|
||||
return 1-np.exp(-ef)
|
||||
|
||||
def dtransf_df(self,f):
|
||||
return np.exp(f-np.exp(f))
|
||||
ef = safe_exp(f)
|
||||
return np.exp(f-ef)
|
||||
|
||||
def d2transf_df2(self,f):
|
||||
ef = np.exp(f)
|
||||
ef = safe_exp(f)
|
||||
return -np.exp(f-ef)*(ef-1.)
|
||||
|
||||
def d3transf_df3(self,f):
|
||||
ef = np.exp(f)
|
||||
return np.exp(f-ef)*(1.-3*ef + ef**2)
|
||||
|
||||
ef = safe_exp(f)
|
||||
ef2 = safe_square(ef)
|
||||
three_times_ef = safe_three_times(ef)
|
||||
r_val = np.exp(f-ef)*(1.-three_times_ef + ef2)
|
||||
return r_val
|
||||
|
||||
class Log(GPTransformation):
|
||||
"""
|
||||
|
|
@ -123,16 +121,16 @@ class Log(GPTransformation):
|
|||
|
||||
"""
|
||||
def transf(self,f):
|
||||
return np.exp(np.clip(f, -_lim_val, _lim_val))
|
||||
return safe_exp(f)
|
||||
|
||||
def dtransf_df(self,f):
|
||||
return np.exp(np.clip(f, -_lim_val, _lim_val))
|
||||
return safe_exp(f)
|
||||
|
||||
def d2transf_df2(self,f):
|
||||
return np.exp(np.clip(f, -_lim_val, _lim_val))
|
||||
return safe_exp(f)
|
||||
|
||||
def d3transf_df3(self,f):
|
||||
return np.exp(np.clip(f, -_lim_val, _lim_val))
|
||||
return safe_exp(f)
|
||||
|
||||
class Log_ex_1(GPTransformation):
|
||||
"""
|
||||
|
|
@ -142,17 +140,20 @@ class Log_ex_1(GPTransformation):
|
|||
|
||||
"""
|
||||
def transf(self,f):
|
||||
return np.log(1.+np.exp(f))
|
||||
return np.log1p(safe_exp(f))
|
||||
|
||||
def dtransf_df(self,f):
|
||||
return np.exp(f)/(1.+np.exp(f))
|
||||
ef = safe_exp(f)
|
||||
return ef/(1.+ef)
|
||||
|
||||
def d2transf_df2(self,f):
|
||||
aux = np.exp(f)/(1.+np.exp(f))
|
||||
ef = safe_exp(f)
|
||||
aux = ef/(1.+ef)
|
||||
return aux*(1.-aux)
|
||||
|
||||
def d3transf_df3(self,f):
|
||||
aux = np.exp(f)/(1.+np.exp(f))
|
||||
ef = safe_exp(f)
|
||||
aux = ef/(1.+ef)
|
||||
daux_df = aux*(1.-aux)
|
||||
return daux_df - (2.*aux*daux_df)
|
||||
|
||||
|
|
@ -160,21 +161,24 @@ class Reciprocal(GPTransformation):
|
|||
def transf(self,f):
|
||||
return 1./f
|
||||
|
||||
def dtransf_df(self,f):
|
||||
return -1./(f**2)
|
||||
def dtransf_df(self, f):
|
||||
f2 = safe_square(f)
|
||||
return -1./f2
|
||||
|
||||
def d2transf_df2(self,f):
|
||||
return 2./(f**3)
|
||||
def d2transf_df2(self, f):
|
||||
f3 = safe_cube(f)
|
||||
return 2./f3
|
||||
|
||||
def d3transf_df3(self,f):
|
||||
return -6./(f**4)
|
||||
f4 = safe_quad(f)
|
||||
return -6./f4
|
||||
|
||||
class Heaviside(GPTransformation):
|
||||
"""
|
||||
|
||||
.. math::
|
||||
|
||||
g(f) = I_{x \\in A}
|
||||
g(f) = I_{x \\geq 0}
|
||||
|
||||
"""
|
||||
def transf(self,f):
|
||||
|
|
@ -182,7 +186,7 @@ class Heaviside(GPTransformation):
|
|||
return np.where(f>0, 1, 0)
|
||||
|
||||
def dtransf_df(self,f):
|
||||
raise NotImplementedError, "This function is not differentiable!"
|
||||
raise NotImplementedError("This function is not differentiable!")
|
||||
|
||||
def d2transf_df2(self,f):
|
||||
raise NotImplementedError, "This function is not differentiable!"
|
||||
raise NotImplementedError("This function is not differentiable!")
|
||||
|
|
|
|||
|
|
@ -3,9 +3,9 @@
|
|||
|
||||
import numpy as np
|
||||
from scipy import stats, special
|
||||
import link_functions
|
||||
from likelihood import Likelihood
|
||||
from gaussian import Gaussian
|
||||
from . import link_functions
|
||||
from .likelihood import Likelihood
|
||||
from .gaussian import Gaussian
|
||||
from ..core.parameterization import Param
|
||||
from ..core.parameterization.transformations import Logexp
|
||||
from ..core.parameterization import Parameterized
|
||||
|
|
|
|||
|
|
@ -5,8 +5,8 @@ from __future__ import division
|
|||
import numpy as np
|
||||
from scipy import stats,special
|
||||
import scipy as sp
|
||||
import link_functions
|
||||
from likelihood import Likelihood
|
||||
from . import link_functions
|
||||
from .likelihood import Likelihood
|
||||
|
||||
class Poisson(Likelihood):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@
|
|||
import numpy as np
|
||||
from scipy import stats, special
|
||||
import scipy as sp
|
||||
import link_functions
|
||||
from . import link_functions
|
||||
from scipy import stats, integrate
|
||||
from scipy.special import gammaln, gamma
|
||||
from likelihood import Likelihood
|
||||
from .likelihood import Likelihood
|
||||
from ..core.parameterization import Param
|
||||
from ..core.parameterization.transformations import Logexp
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
# Copyright (c) 2013, 2014 GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from kernel import Kernel
|
||||
from linear import Linear
|
||||
from mlp import MLP
|
||||
from additive import Additive
|
||||
from compound import Compound
|
||||
from .kernel import Kernel
|
||||
from .linear import Linear
|
||||
from .mlp import MLP
|
||||
from .additive import Additive
|
||||
from .compound import Compound
|
||||
|
||||
|
|
|
|||
|
|
@ -1,23 +1,23 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from gp_regression import GPRegression
|
||||
from gp_classification import GPClassification
|
||||
from sparse_gp_regression import SparseGPRegression, SparseGPRegressionUncertainInput
|
||||
from sparse_gp_classification import SparseGPClassification
|
||||
from gplvm import GPLVM
|
||||
from bcgplvm import BCGPLVM
|
||||
from sparse_gplvm import SparseGPLVM
|
||||
from warped_gp import WarpedGP
|
||||
from bayesian_gplvm import BayesianGPLVM
|
||||
from mrd import MRD
|
||||
from gradient_checker import GradientChecker
|
||||
from ss_gplvm import SSGPLVM
|
||||
from gp_coregionalized_regression import GPCoregionalizedRegression
|
||||
from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression
|
||||
from gp_heteroscedastic_regression import GPHeteroscedasticRegression
|
||||
from ss_mrd import SSMRD
|
||||
from gp_kronecker_gaussian_regression import GPKroneckerGaussianRegression
|
||||
from gp_var_gauss import GPVariationalGaussianApproximation
|
||||
from one_vs_all_classification import OneVsAllClassification
|
||||
from one_vs_all_sparse_classification import OneVsAllSparseClassification
|
||||
from .gp_regression import GPRegression
|
||||
from .gp_classification import GPClassification
|
||||
from .sparse_gp_regression import SparseGPRegression, SparseGPRegressionUncertainInput
|
||||
from .sparse_gp_classification import SparseGPClassification
|
||||
from .gplvm import GPLVM
|
||||
from .bcgplvm import BCGPLVM
|
||||
from .sparse_gplvm import SparseGPLVM
|
||||
from .warped_gp import WarpedGP
|
||||
from .bayesian_gplvm import BayesianGPLVM
|
||||
from .mrd import MRD
|
||||
from .gradient_checker import GradientChecker, HessianChecker, SkewChecker
|
||||
from .ss_gplvm import SSGPLVM
|
||||
from .gp_coregionalized_regression import GPCoregionalizedRegression
|
||||
from .sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression
|
||||
from .gp_heteroscedastic_regression import GPHeteroscedasticRegression
|
||||
from .ss_mrd import SSMRD
|
||||
from .gp_kronecker_gaussian_regression import GPKroneckerGaussianRegression
|
||||
from .gp_var_gauss import GPVariationalGaussianApproximation
|
||||
from .one_vs_all_classification import OneVsAllClassification
|
||||
from .one_vs_all_sparse_classification import OneVsAllSparseClassification
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ class BayesianGPLVM(SparseGP_MPI):
|
|||
def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10,
|
||||
Z=None, kernel=None, inference_method=None, likelihood=None,
|
||||
name='bayesian gplvm', mpi_comm=None, normalizer=None,
|
||||
missing_data=False, stochastic=False, batchsize=1):
|
||||
missing_data=False, stochastic=False, batchsize=1, Y_metadata=None):
|
||||
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
if X is None:
|
||||
|
|
@ -69,6 +69,7 @@ class BayesianGPLVM(SparseGP_MPI):
|
|||
name=name, inference_method=inference_method,
|
||||
normalizer=normalizer, mpi_comm=mpi_comm,
|
||||
variational_prior=self.variational_prior,
|
||||
Y_metadata=Y_metadata
|
||||
)
|
||||
self.link_parameter(self.X, index=0)
|
||||
|
||||
|
|
@ -83,7 +84,7 @@ class BayesianGPLVM(SparseGP_MPI):
|
|||
def parameters_changed(self):
|
||||
super(BayesianGPLVM,self).parameters_changed()
|
||||
if isinstance(self.inference_method, VarDTC_minibatch):
|
||||
return
|
||||
return
|
||||
|
||||
kl_fctr = 1.
|
||||
self._log_marginal_likelihood -= kl_fctr*self.variational_prior.KL_divergence(self.X)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ from ..core.model import Model
|
|||
import itertools
|
||||
import numpy
|
||||
from ..core.parameterization import Param
|
||||
np = numpy
|
||||
from ..util.block_matrices import get_blocks, get_block_shapes, unblock, get_blocks_3d, get_block_shapes_3d
|
||||
|
||||
def get_shape(x):
|
||||
if isinstance(x, numpy.ndarray):
|
||||
|
|
@ -111,3 +113,261 @@ class GradientChecker(Model):
|
|||
#for name, shape in zip(self.names, self.shapes):
|
||||
#_param_names.extend(map(lambda nameshape: ('_'.join(nameshape)).strip('_'), itertools.izip(itertools.repeat(name), itertools.imap(lambda t: '_'.join(map(str, t)), itertools.product(*map(lambda xi: range(xi), shape))))))
|
||||
#return _param_names
|
||||
|
||||
|
||||
class HessianChecker(GradientChecker):
|
||||
|
||||
def __init__(self, f, df, ddf, x0, names=None, *args, **kwargs):
|
||||
"""
|
||||
:param f: Function (only used for numerical hessian gradient)
|
||||
:param df: Gradient of function to check
|
||||
:param ddf: Analytical gradient function
|
||||
:param x0:
|
||||
Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names).
|
||||
Can be a list of arrays, if takes a list of arrays. This list will be passed
|
||||
to f and df in the same order as given here.
|
||||
If only one argument, make sure not to pass a list!!!
|
||||
|
||||
:type x0: [array-like] | array-like | float | int
|
||||
:param names:
|
||||
Names to print, when performing gradcheck. If a list was passed to x0
|
||||
a list of names with the same length is expected.
|
||||
:param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs)
|
||||
|
||||
"""
|
||||
super(HessianChecker, self).__init__(df, ddf, x0, names=names, *args, **kwargs)
|
||||
self._f = f
|
||||
self._df = df
|
||||
self._ddf = ddf
|
||||
|
||||
def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False):
|
||||
"""
|
||||
Overwrite checkgrad method to check whole block instead of looping through
|
||||
|
||||
Shows diagnostics using matshow instead
|
||||
|
||||
:param verbose: If True, print a "full" checking of each parameter
|
||||
:type verbose: bool
|
||||
:param step: The size of the step around which to linearise the objective
|
||||
:type step: float (default 1e-6)
|
||||
:param tolerance: the tolerance allowed (see note)
|
||||
:type tolerance: float (default 1e-3)
|
||||
|
||||
Note:-
|
||||
The gradient is considered correct if the ratio of the analytical
|
||||
and numerical gradients is within <tolerance> of unity.
|
||||
"""
|
||||
try:
|
||||
import numdifftools as nd
|
||||
except:
|
||||
raise ImportError("Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests")
|
||||
|
||||
if target_param:
|
||||
raise NotImplementedError('Only basic functionality is provided with this gradchecker')
|
||||
|
||||
#Repeat for each parameter, not the nicest but shouldn't be many cases where there are many
|
||||
#variables
|
||||
current_index = 0
|
||||
for name, shape in zip(self.names, self.shapes):
|
||||
current_size = numpy.prod(shape)
|
||||
x = self.optimizer_array.copy()
|
||||
#x = self._get_params_transformed().copy()
|
||||
x = x[current_index:current_index + current_size].reshape(shape)
|
||||
|
||||
# Check gradients
|
||||
analytic_hess = self._ddf(x)
|
||||
if analytic_hess.shape[1] == 1:
|
||||
analytic_hess = numpy.diagflat(analytic_hess)
|
||||
|
||||
#From the docs:
|
||||
#x0 : vector location
|
||||
#at which to differentiate fun
|
||||
#If x0 is an N x M array, then fun is assumed to be a function
|
||||
#of N*M variables., thus we must have it flat, not (N,1), but just (N,)
|
||||
#numeric_hess_partial = nd.Hessian(self._f, vectorized=False)
|
||||
numeric_hess_partial = nd.Jacobian(self._df, vectorized=False)
|
||||
#numeric_hess_partial = nd.Derivative(self._df, vectorized=True)
|
||||
numeric_hess = numeric_hess_partial(x)
|
||||
|
||||
check_passed = self.checkgrad_block(analytic_hess, numeric_hess, verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=plot)
|
||||
current_index += current_size
|
||||
return check_passed
|
||||
|
||||
def checkgrad_block(self, analytic_hess, numeric_hess, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False):
|
||||
"""
|
||||
Checkgrad a block matrix
|
||||
"""
|
||||
if analytic_hess.dtype is np.dtype('object'):
|
||||
#Make numeric hessian also into a block matrix
|
||||
real_size = get_block_shapes(analytic_hess)
|
||||
num_elements = np.sum(real_size)
|
||||
if (num_elements, num_elements) == numeric_hess.shape:
|
||||
#If the sizes are the same we assume they are the same
|
||||
#(we have not fixed any values so the numeric is the whole hessian)
|
||||
numeric_hess = get_blocks(numeric_hess, real_size)
|
||||
else:
|
||||
#Make a fake empty matrix and fill out the correct block
|
||||
tmp_numeric_hess = get_blocks(np.zeros((num_elements, num_elements)), real_size)
|
||||
tmp_numeric_hess[block_indices] = numeric_hess.copy()
|
||||
numeric_hess = tmp_numeric_hess
|
||||
|
||||
if block_indices is not None:
|
||||
#Extract the right block
|
||||
analytic_hess = analytic_hess[block_indices]
|
||||
numeric_hess = numeric_hess[block_indices]
|
||||
else:
|
||||
#Unblock them if they are in blocks and you aren't checking a single block (checking whole hessian)
|
||||
if analytic_hess.dtype is np.dtype('object'):
|
||||
analytic_hess = unblock(analytic_hess)
|
||||
numeric_hess = unblock(numeric_hess)
|
||||
|
||||
ratio = numeric_hess / (numpy.where(analytic_hess==0, 1e-10, analytic_hess))
|
||||
difference = numpy.abs(analytic_hess - numeric_hess)
|
||||
|
||||
check_passed = numpy.all((numpy.abs(1 - ratio)) < tolerance) or numpy.allclose(numeric_hess, analytic_hess, atol = tolerance)
|
||||
|
||||
if verbose:
|
||||
if block_indices:
|
||||
print "\nBlock {}".format(block_indices)
|
||||
else:
|
||||
print "\nAll blocks"
|
||||
|
||||
header = ['Checked', 'Max-Ratio', 'Min-Ratio', 'Min-Difference', 'Max-Difference']
|
||||
header_string = map(lambda x: ' | '.join(header), [header])
|
||||
separator = '-' * len(header_string[0])
|
||||
print '\n'.join([header_string[0], separator])
|
||||
min_r = '%.6f' % float(numpy.min(ratio))
|
||||
max_r = '%.6f' % float(numpy.max(ratio))
|
||||
max_d = '%.6f' % float(numpy.max(difference))
|
||||
min_d = '%.6f' % float(numpy.min(difference))
|
||||
cols = [max_r, min_r, min_d, max_d]
|
||||
|
||||
if check_passed:
|
||||
checked = "\033[92m True \033[0m"
|
||||
else:
|
||||
checked = "\033[91m False \033[0m"
|
||||
|
||||
grad_string = "{} | {} | {} | {} | {} ".format(checked, cols[0], cols[1], cols[2], cols[3])
|
||||
print grad_string
|
||||
|
||||
if plot:
|
||||
import pylab as pb
|
||||
fig, axes = pb.subplots(2, 2)
|
||||
max_lim = numpy.max(numpy.vstack((analytic_hess, numeric_hess)))
|
||||
min_lim = numpy.min(numpy.vstack((analytic_hess, numeric_hess)))
|
||||
msa = axes[0,0].matshow(analytic_hess, vmin=min_lim, vmax=max_lim)
|
||||
axes[0,0].set_title('Analytic hessian')
|
||||
axes[0,0].xaxis.set_ticklabels([None])
|
||||
axes[0,0].yaxis.set_ticklabels([None])
|
||||
axes[0,0].xaxis.set_ticks([None])
|
||||
axes[0,0].yaxis.set_ticks([None])
|
||||
msn = axes[0,1].matshow(numeric_hess, vmin=min_lim, vmax=max_lim)
|
||||
pb.colorbar(msn, ax=axes[0,1])
|
||||
axes[0,1].set_title('Numeric hessian')
|
||||
axes[0,1].xaxis.set_ticklabels([None])
|
||||
axes[0,1].yaxis.set_ticklabels([None])
|
||||
axes[0,1].xaxis.set_ticks([None])
|
||||
axes[0,1].yaxis.set_ticks([None])
|
||||
msr = axes[1,0].matshow(ratio)
|
||||
pb.colorbar(msr, ax=axes[1,0])
|
||||
axes[1,0].set_title('Ratio')
|
||||
axes[1,0].xaxis.set_ticklabels([None])
|
||||
axes[1,0].yaxis.set_ticklabels([None])
|
||||
axes[1,0].xaxis.set_ticks([None])
|
||||
axes[1,0].yaxis.set_ticks([None])
|
||||
msd = axes[1,1].matshow(difference)
|
||||
pb.colorbar(msd, ax=axes[1,1])
|
||||
axes[1,1].set_title('difference')
|
||||
axes[1,1].xaxis.set_ticklabels([None])
|
||||
axes[1,1].yaxis.set_ticklabels([None])
|
||||
axes[1,1].xaxis.set_ticks([None])
|
||||
axes[1,1].yaxis.set_ticks([None])
|
||||
if block_indices:
|
||||
fig.suptitle("Block: {}".format(block_indices))
|
||||
pb.show()
|
||||
|
||||
return check_passed
|
||||
|
||||
class SkewChecker(HessianChecker):
|
||||
|
||||
def __init__(self, df, ddf, dddf, x0, names=None, *args, **kwargs):
|
||||
"""
|
||||
:param df: gradient of function
|
||||
:param ddf: Gradient of function to check (hessian)
|
||||
:param dddf: Analytical gradient function (third derivative)
|
||||
:param x0:
|
||||
Initial guess for inputs x (if it has a shape (a,b) this will be reflected in the parameter names).
|
||||
Can be a list of arrays, if takes a list of arrays. This list will be passed
|
||||
to f and df in the same order as given here.
|
||||
If only one argument, make sure not to pass a list!!!
|
||||
|
||||
:type x0: [array-like] | array-like | float | int
|
||||
:param names:
|
||||
Names to print, when performing gradcheck. If a list was passed to x0
|
||||
a list of names with the same length is expected.
|
||||
:param args: Arguments passed as f(x, *args, **kwargs) and df(x, *args, **kwargs)
|
||||
|
||||
"""
|
||||
super(SkewChecker, self).__init__(df, ddf, dddf, x0, names=names, *args, **kwargs)
|
||||
|
||||
def checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, block_indices=None, plot=False, super_plot=False):
|
||||
"""
|
||||
Gradient checker that just checks each hessian individually
|
||||
|
||||
super_plot will plot the hessian wrt every parameter, plot will just do the first one
|
||||
"""
|
||||
try:
|
||||
import numdifftools as nd
|
||||
except:
|
||||
raise ImportError("Don't have numdifftools package installed, it is not a GPy dependency as of yet, it is only used for hessian tests")
|
||||
|
||||
if target_param:
|
||||
raise NotImplementedError('Only basic functionality is provided with this gradchecker')
|
||||
|
||||
#Repeat for each parameter, not the nicest but shouldn't be many cases where there are many
|
||||
#variables
|
||||
current_index = 0
|
||||
for name, n_shape in zip(self.names, self.shapes):
|
||||
current_size = numpy.prod(n_shape)
|
||||
x = self.optimizer_array.copy()
|
||||
#x = self._get_params_transformed().copy()
|
||||
x = x[current_index:current_index + current_size].reshape(n_shape)
|
||||
|
||||
# Check gradients
|
||||
#Actually the third derivative
|
||||
analytic_hess = self._ddf(x)
|
||||
|
||||
#Can only calculate jacobian for one variable at a time
|
||||
#From the docs:
|
||||
#x0 : vector location
|
||||
#at which to differentiate fun
|
||||
#If x0 is an N x M array, then fun is assumed to be a function
|
||||
#of N*M variables., thus we must have it flat, not (N,1), but just (N,)
|
||||
#numeric_hess_partial = nd.Hessian(self._f, vectorized=False)
|
||||
#Actually _df is already the hessian
|
||||
numeric_hess_partial = nd.Jacobian(self._df, vectorized=True)
|
||||
numeric_hess = numeric_hess_partial(x)
|
||||
|
||||
print "Done making numerical hessian"
|
||||
if analytic_hess.dtype is np.dtype('object'):
|
||||
#Blockify numeric_hess aswell
|
||||
blocksizes, pagesizes = get_block_shapes_3d(analytic_hess)
|
||||
#HACK
|
||||
real_block_size = np.sum(blocksizes)
|
||||
numeric_hess = numeric_hess.reshape(real_block_size, real_block_size, pagesizes)
|
||||
#numeric_hess = get_blocks_3d(numeric_hess, blocksizes)#, pagesizes)
|
||||
else:
|
||||
numeric_hess = numeric_hess.reshape(*analytic_hess.shape)
|
||||
|
||||
#Check every block individually (for ease)
|
||||
check_passed = [False]*numeric_hess.shape[2]
|
||||
for block_ind in xrange(numeric_hess.shape[2]):
|
||||
#Unless super_plot is set, just plot the first one
|
||||
p = True if (plot and block_ind == numeric_hess.shape[2]-1) or super_plot else False
|
||||
if verbose:
|
||||
print "Checking derivative of hessian wrt parameter number {}".format(block_ind)
|
||||
check_passed[block_ind] = self.checkgrad_block(analytic_hess[:,:,block_ind], numeric_hess[:,:,block_ind], verbose=verbose, step=step, tolerance=tolerance, block_indices=block_indices, plot=p)
|
||||
|
||||
current_index += current_size
|
||||
return np.all(check_passed)
|
||||
|
||||
|
|
|
|||
|
|
@ -74,6 +74,8 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
|
||||
self.logger.debug("creating observable arrays")
|
||||
self.Ylist = [ObsAr(Y) for Y in Ylist]
|
||||
#The next line is a fix for Python 3. It replicates the python 2 behaviour from the above comprehension
|
||||
Y = Ylist[-1]
|
||||
|
||||
if Ynames is None:
|
||||
self.logger.debug("creating Ynames")
|
||||
|
|
@ -82,7 +84,7 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
assert len(self.names) == len(self.Ylist), "one name per dataset, or None if Ylist is a dict"
|
||||
|
||||
if inference_method is None:
|
||||
self.inference_method = InferenceMethodList([VarDTC() for _ in xrange(len(self.Ylist))])
|
||||
self.inference_method = InferenceMethodList([VarDTC() for _ in range(len(self.Ylist))])
|
||||
else:
|
||||
assert isinstance(inference_method, InferenceMethodList), "please provide one inference method per Y in the list and provide it as InferenceMethodList, inference_method given: {}".format(inference_method)
|
||||
self.inference_method = inference_method
|
||||
|
|
@ -137,7 +139,7 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
|
||||
self.bgplvms = []
|
||||
|
||||
for i, n, k, l, Y, im, bs in itertools.izip(itertools.count(), Ynames, kernels, likelihoods, Ylist, self.inference_method, batchsize):
|
||||
for i, n, k, l, Y, im, bs in zip(itertools.count(), Ynames, kernels, likelihoods, Ylist, self.inference_method, batchsize):
|
||||
assert Y.shape[0] == self.num_data, "All datasets need to share the number of datapoints, and those have to correspond to one another"
|
||||
md = np.isnan(Y).any()
|
||||
spgp = BayesianGPLVMMiniBatch(Y, input_dim, X, X_variance,
|
||||
|
|
@ -164,7 +166,7 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
self._log_marginal_likelihood = 0
|
||||
self.Z.gradient[:] = 0.
|
||||
self.X.gradient[:] = 0.
|
||||
for b, i in itertools.izip(self.bgplvms, self.inference_method):
|
||||
for b, i in zip(self.bgplvms, self.inference_method):
|
||||
self._log_marginal_likelihood += b._log_marginal_likelihood
|
||||
|
||||
self.logger.info('working on im <{}>'.format(hex(id(i))))
|
||||
|
|
@ -195,7 +197,7 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
elif init in "PCA_single":
|
||||
X = np.zeros((Ylist[0].shape[0], self.input_dim))
|
||||
fracs = []
|
||||
for qs, Y in itertools.izip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist):
|
||||
for qs, Y in zip(np.array_split(np.arange(self.input_dim), len(Ylist)), Ylist):
|
||||
x,frcs = initialize_latent('PCA', len(qs), Y)
|
||||
X[:, qs] = x
|
||||
fracs.append(frcs)
|
||||
|
|
@ -327,9 +329,9 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
|
||||
def __getstate__(self):
|
||||
state = super(MRD, self).__getstate__()
|
||||
if state.has_key('kern'):
|
||||
if 'kern' in state:
|
||||
del state['kern']
|
||||
if state.has_key('likelihood'):
|
||||
if 'likelihood' in state:
|
||||
del state['likelihood']
|
||||
return state
|
||||
|
||||
|
|
@ -338,4 +340,4 @@ class MRD(BayesianGPLVMMiniBatch):
|
|||
super(MRD, self).__setstate__(state)
|
||||
self.kern = self.bgplvms[0].kern
|
||||
self.likelihood = self.bgplvms[0].likelihood
|
||||
self.parameters_changed()
|
||||
self.parameters_changed()
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ class OneVsAllSparseClassification(object):
|
|||
|
||||
self.results = {}
|
||||
for yj in labels:
|
||||
print 'Class %s vs all' %yj
|
||||
print('Class %s vs all' %yj)
|
||||
Ynew = Y.copy()
|
||||
Ynew[Y.flatten()!=yj] = 0
|
||||
Ynew[Y.flatten()==yj] = 1
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
from __future__ import print_function
|
||||
import numpy as np
|
||||
from ..core.parameterization.param import Param
|
||||
from ..core.sparse_gp import SparseGP
|
||||
|
|
@ -50,8 +51,8 @@ class SparseGPMiniBatch(SparseGP):
|
|||
inference_method = var_dtc.VarDTC(limit=1 if not missing_data else Y.shape[1])
|
||||
else:
|
||||
#inference_method = ??
|
||||
raise NotImplementedError, "what to do what to do?"
|
||||
print "defaulting to ", inference_method, "for latent function inference"
|
||||
raise NotImplementedError("what to do what to do?")
|
||||
print("defaulting to ", inference_method, "for latent function inference")
|
||||
|
||||
self.kl_factr = 1.
|
||||
self.Z = Param('inducing inputs', Z)
|
||||
|
|
@ -81,13 +82,13 @@ class SparseGPMiniBatch(SparseGP):
|
|||
overall = self.Y_normalized.shape[1]
|
||||
m_f = lambda i: "Precomputing Y for missing data: {: >7.2%}".format(float(i+1)/overall)
|
||||
message = m_f(-1)
|
||||
print message,
|
||||
for d in xrange(overall):
|
||||
print(message, end=' ')
|
||||
for d in range(overall):
|
||||
self.Ylist.append(self.Y_normalized[self.ninan[:, d], d][:, None])
|
||||
print ' '*(len(message)+1) + '\r',
|
||||
print(' '*(len(message)+1) + '\r', end=' ')
|
||||
message = m_f(d)
|
||||
print message,
|
||||
print ''
|
||||
print(message, end=' ')
|
||||
print('')
|
||||
|
||||
self.posterior = None
|
||||
|
||||
|
|
@ -182,11 +183,11 @@ class SparseGPMiniBatch(SparseGP):
|
|||
full_values[key][value_indices[key]] += current_values[key]
|
||||
"""
|
||||
for key in current_values.keys():
|
||||
if value_indices is not None and value_indices.has_key(key):
|
||||
if value_indices is not None and key in value_indices:
|
||||
index = value_indices[key]
|
||||
else:
|
||||
index = slice(None)
|
||||
if full_values.has_key(key):
|
||||
if key in full_values:
|
||||
full_values[key][index] += current_values[key]
|
||||
else:
|
||||
full_values[key] = current_values[key]
|
||||
|
|
@ -242,15 +243,15 @@ class SparseGPMiniBatch(SparseGP):
|
|||
if not self.stochastics:
|
||||
m_f = lambda i: "Inference with missing_data: {: >7.2%}".format(float(i+1)/self.output_dim)
|
||||
message = m_f(-1)
|
||||
print message,
|
||||
print(message, end=' ')
|
||||
|
||||
for d in self.stochastics.d:
|
||||
ninan = self.ninan[:, d]
|
||||
|
||||
if not self.stochastics:
|
||||
print ' '*(len(message)) + '\r',
|
||||
print(' '*(len(message)) + '\r', end=' ')
|
||||
message = m_f(d)
|
||||
print message,
|
||||
print(message, end=' ')
|
||||
|
||||
posterior, log_marginal_likelihood, \
|
||||
grad_dict, current_values, value_indices = self._inner_parameters_changed(
|
||||
|
|
@ -269,7 +270,7 @@ class SparseGPMiniBatch(SparseGP):
|
|||
woodbury_vector[:, d:d+1] = posterior.woodbury_vector
|
||||
self._log_marginal_likelihood += log_marginal_likelihood
|
||||
if not self.stochastics:
|
||||
print ''
|
||||
print('')
|
||||
|
||||
if self.posterior is None:
|
||||
self.posterior = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector,
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ class SSGPLVM(SparseGP_MPI):
|
|||
self.link_parameter(self.X, index=0)
|
||||
|
||||
if self.group_spike:
|
||||
[self.X.gamma[:,i].tie('tieGamma'+str(i)) for i in xrange(self.X.gamma.shape[1])] # Tie columns together
|
||||
[self.X.gamma[:,i].tie('tieGamma'+str(i)) for i in range(self.X.gamma.shape[1])] # Tie columns together
|
||||
|
||||
def set_X_gradients(self, X, X_grad):
|
||||
"""Set the gradients of the posterior distribution of X in its specific form."""
|
||||
|
|
|
|||
|
|
@ -19,10 +19,10 @@ class SSMRD(Model):
|
|||
name='model_'+str(i)) for i,y in enumerate(Ylist)]
|
||||
self.add_parameters(*(self.models))
|
||||
|
||||
[[[self.models[m].X.mean[i,j:j+1].tie('mean_'+str(i)+'_'+str(j)) for m in xrange(len(self.models))] for j in xrange(self.models[0].X.mean.shape[1])]
|
||||
for i in xrange(self.models[0].X.mean.shape[0])]
|
||||
[[[self.models[m].X.variance[i,j:j+1].tie('var_'+str(i)+'_'+str(j)) for m in xrange(len(self.models))] for j in xrange(self.models[0].X.variance.shape[1])]
|
||||
for i in xrange(self.models[0].X.variance.shape[0])]
|
||||
[[[self.models[m].X.mean[i,j:j+1].tie('mean_'+str(i)+'_'+str(j)) for m in range(len(self.models))] for j in range(self.models[0].X.mean.shape[1])]
|
||||
for i in range(self.models[0].X.mean.shape[0])]
|
||||
[[[self.models[m].X.variance[i,j:j+1].tie('var_'+str(i)+'_'+str(j)) for m in range(len(self.models))] for j in range(self.models[0].X.variance.shape[1])]
|
||||
for i in range(self.models[0].X.variance.shape[0])]
|
||||
|
||||
self.updates = True
|
||||
|
||||
|
|
@ -31,4 +31,4 @@ class SSMRD(Model):
|
|||
self._log_marginal_likelihood = sum([m._log_marginal_likelihood for m in self.models])
|
||||
|
||||
def log_likelihood(self):
|
||||
return self._log_marginal_likelihood
|
||||
return self._log_marginal_likelihood
|
||||
|
|
|
|||
|
|
@ -2,6 +2,6 @@
|
|||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
try:
|
||||
import matplot_dep
|
||||
from . import matplot_dep
|
||||
except (ImportError, NameError):
|
||||
print 'Fail to load GPy.plotting.matplot_dep.'
|
||||
print('Fail to load GPy.plotting.matplot_dep.')
|
||||
|
|
@ -133,7 +133,7 @@ def x_frame1D(X,plot_limits=None,resolution=None):
|
|||
elif len(plot_limits)==2:
|
||||
xmin, xmax = plot_limits
|
||||
else:
|
||||
raise ValueError, "Bad limits for plotting"
|
||||
raise ValueError("Bad limits for plotting")
|
||||
|
||||
Xnew = np.linspace(xmin,xmax,resolution or 200)[:,None]
|
||||
return Xnew, xmin, xmax
|
||||
|
|
@ -149,7 +149,7 @@ def x_frame2D(X,plot_limits=None,resolution=None):
|
|||
elif len(plot_limits)==2:
|
||||
xmin, xmax = plot_limits
|
||||
else:
|
||||
raise ValueError, "Bad limits for plotting"
|
||||
raise ValueError("Bad limits for plotting")
|
||||
|
||||
resolution = resolution or 50
|
||||
xx,yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution]
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ def most_significant_input_dimensions(model, which_indices):
|
|||
try:
|
||||
input_1, input_2 = np.argsort(model.input_sensitivity())[::-1][:2]
|
||||
except:
|
||||
raise ValueError, "cannot automatically determine which dimensions to plot, please pass 'which_indices'"
|
||||
raise ValueError("cannot automatically determine which dimensions to plot, please pass 'which_indices'")
|
||||
else:
|
||||
input_1, input_2 = which_indices
|
||||
return input_1, input_2
|
||||
|
|
@ -62,7 +62,7 @@ def plot_latent(model, labels=None, which_indices=None,
|
|||
|
||||
|
||||
if X.shape[0] > 1000:
|
||||
print "Warning: subsampling X, as it has more samples then 1000. X.shape={!s}".format(X.shape)
|
||||
print("Warning: subsampling X, as it has more samples then 1000. X.shape={!s}".format(X.shape))
|
||||
subsample = np.random.choice(X.shape[0], size=1000, replace=False)
|
||||
X = X[subsample]
|
||||
labels = labels[subsample]
|
||||
|
|
@ -133,7 +133,7 @@ def plot_latent(model, labels=None, which_indices=None,
|
|||
try:
|
||||
xmin, xmax, ymin, ymax = plot_limits
|
||||
except (TypeError, ValueError) as e:
|
||||
raise e.__class__, "Wrong plot limits: {} given -> need (xmin, xmax, ymin, ymax)".format(plot_limits)
|
||||
raise e.__class__("Wrong plot limits: {} given -> need (xmin, xmax, ymin, ymax)".format(plot_limits))
|
||||
view = ImshowController(ax, plot_function,
|
||||
(xmin, ymin, xmax, ymax),
|
||||
resolution, aspect=aspect, interpolation='bilinear',
|
||||
|
|
@ -187,14 +187,14 @@ def plot_latent(model, labels=None, which_indices=None,
|
|||
fig.tight_layout()
|
||||
fig.canvas.draw()
|
||||
except Exception as e:
|
||||
print "Could not invoke tight layout: {}".format(e)
|
||||
print("Could not invoke tight layout: {}".format(e))
|
||||
pass
|
||||
|
||||
if updates:
|
||||
try:
|
||||
ax.figure.canvas.show()
|
||||
except Exception as e:
|
||||
print "Could not invoke show: {}".format(e)
|
||||
print("Could not invoke show: {}".format(e))
|
||||
raw_input('Enter to continue')
|
||||
view.deactivate()
|
||||
return ax
|
||||
|
|
|
|||
|
|
@ -50,8 +50,8 @@ def plot_2D_images(figure, arr, symmetric=False, pad=None, zoom=None, mode=None,
|
|||
|
||||
buf = np.ones((y_size*fig_nrows+pad*(fig_nrows-1), x_size*fig_ncols+pad*(fig_ncols-1), 3),dtype=arr.dtype)
|
||||
|
||||
for y in xrange(fig_nrows):
|
||||
for x in xrange(fig_ncols):
|
||||
for y in range(fig_nrows):
|
||||
for x in range(fig_ncols):
|
||||
if y*fig_ncols+x<fig_num:
|
||||
buf[y*y_size+y*pad:(y+1)*y_size+y*pad, x*x_size+x*pad:(x+1)*x_size+x*pad] = arr_color[y*fig_ncols+x,:,:,:3]
|
||||
img_plot = ax.imshow(buf, interpolation=interpolation)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ except:
|
|||
|
||||
def plot_optimizer(optimizer):
|
||||
if optimizer.trace == None:
|
||||
print "No trace present so I can't plot it. Please check that the optimizer actually supplies a trace."
|
||||
print("No trace present so I can't plot it. Please check that the optimizer actually supplies a trace.")
|
||||
else:
|
||||
pb.figure()
|
||||
pb.plot(optimizer.trace)
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False, filtering=Non
|
|||
last_bottom = ard_params[i,:]
|
||||
bottom += last_bottom
|
||||
else:
|
||||
print "filtering out {}".format(kernel.parameters[i].name)
|
||||
print("filtering out {}".format(kernel.parameters[i].name))
|
||||
|
||||
ax.set_xlim(-.5, kernel.input_dim - .5)
|
||||
add_bar_labels(fig, ax, [bars[-1]], bottom=bottom-last_bottom)
|
||||
|
|
@ -132,7 +132,7 @@ def plot(kernel,x=None, fignum=None, ax=None, title=None, plot_limits=None, reso
|
|||
elif len(plot_limits) == 2:
|
||||
xmin, xmax = plot_limits
|
||||
else:
|
||||
raise ValueError, "Bad limits for plotting"
|
||||
raise ValueError("Bad limits for plotting")
|
||||
|
||||
Xnew = np.linspace(xmin, xmax, resolution or 201)[:, None]
|
||||
Kx = kernel.K(Xnew, x)
|
||||
|
|
@ -154,7 +154,7 @@ def plot(kernel,x=None, fignum=None, ax=None, title=None, plot_limits=None, reso
|
|||
elif len(plot_limits) == 2:
|
||||
xmin, xmax = plot_limits
|
||||
else:
|
||||
raise ValueError, "Bad limits for plotting"
|
||||
raise ValueError("Bad limits for plotting")
|
||||
|
||||
resolution = resolution or 51
|
||||
xx, yy = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:xmax[1]:1j * resolution]
|
||||
|
|
@ -168,4 +168,4 @@ def plot(kernel,x=None, fignum=None, ax=None, title=None, plot_limits=None, reso
|
|||
ax.set_ylabel("x2")
|
||||
ax.set_title("k(x1,x2 ; %0.1f,%0.1f)" % (x[0, 0], x[0, 1]))
|
||||
else:
|
||||
raise NotImplementedError, "Cannot plot a kernel with more than two input dimensions"
|
||||
raise NotImplementedError("Cannot plot a kernel with more than two input dimensions")
|
||||
|
|
|
|||
|
|
@ -81,4 +81,4 @@ def plot_mapping(self, plot_limits=None, which_data='all', which_parts='all', re
|
|||
ax.set_ylim(xmin[1], xmax[1])
|
||||
|
||||
else:
|
||||
raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
|
||||
raise NotImplementedError("Cannot define a frame with more than two input dimensions")
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ def plot(shape_records,facecolor='w',edgecolor='k',linewidths=.5, ax=None,xlims=
|
|||
par = list(sparts) + [points.shape[0]]
|
||||
|
||||
polygs = []
|
||||
for pj in xrange(len(sparts)):
|
||||
for pj in range(len(sparts)):
|
||||
polygs.append(Polygon(points[par[pj]:par[pj+1]]))
|
||||
ax.add_collection(PatchCollection(polygs,facecolor=facecolor,edgecolor=edgecolor, linewidths=linewidths))
|
||||
|
||||
|
|
@ -163,10 +163,10 @@ def new_shape_string(sf,name,regex,field=2,type=None):
|
|||
|
||||
newshp.line(parts=_parts)
|
||||
newshp.records.append(sr.record)
|
||||
print len(sr.record)
|
||||
print(len(sr.record))
|
||||
|
||||
newshp.save(name)
|
||||
print index
|
||||
print(index)
|
||||
|
||||
def apply_bbox(sf,ax):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
try:
|
||||
|
|
@ -16,7 +16,8 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
|||
which_data_ycols='all', fixed_inputs=[],
|
||||
levels=20, samples=0, fignum=None, ax=None, resolution=None,
|
||||
plot_raw=False,
|
||||
linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None, data_symbol='kx'):
|
||||
linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None, data_symbol='kx',
|
||||
apply_link=False, samples_f=0, plot_uncertain_inputs=True):
|
||||
"""
|
||||
Plot the posterior of the GP.
|
||||
- In one dimension, the function is plotted with a shaded region identifying two standard deviations.
|
||||
|
|
@ -38,7 +39,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
|||
:type resolution: int
|
||||
:param levels: number of levels to plot in a contour plot.
|
||||
:type levels: int
|
||||
:param samples: the number of a posteriori samples to plot
|
||||
:param samples: the number of a posteriori samples to plot p(y*|y)
|
||||
:type samples: int
|
||||
:param fignum: figure to plot on.
|
||||
:type fignum: figure number
|
||||
|
|
@ -49,6 +50,10 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
|||
:type linecol:
|
||||
:param fillcol: color of fill
|
||||
:param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure
|
||||
:param apply_link: apply the link function if plotting f (default false)
|
||||
:type apply_link: boolean
|
||||
:param samples_f: the number of posteriori f samples to plot p(f*|y)
|
||||
:type samples_f: int
|
||||
"""
|
||||
#deal with optional arguments
|
||||
if which_data_rows == 'all':
|
||||
|
|
@ -88,8 +93,14 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
|||
#make a prediction on the frame and plot it
|
||||
if plot_raw:
|
||||
m, v = model._raw_predict(Xgrid)
|
||||
lower = m - 2*np.sqrt(v)
|
||||
upper = m + 2*np.sqrt(v)
|
||||
if apply_link:
|
||||
lower = model.likelihood.gp_link.transf(m - 2*np.sqrt(v))
|
||||
upper = model.likelihood.gp_link.transf(m + 2*np.sqrt(v))
|
||||
#Once transformed this is now the median of the function
|
||||
m = model.likelihood.gp_link.transf(m)
|
||||
else:
|
||||
lower = m - 2*np.sqrt(v)
|
||||
upper = m + 2*np.sqrt(v)
|
||||
else:
|
||||
if isinstance(model,GPCoregionalizedRegression) or isinstance(model,SparseGPCoregionalizedRegression):
|
||||
meta = {'output_index': Xgrid[:,-1:].astype(np.int)}
|
||||
|
|
@ -110,13 +121,31 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
|||
plots['posterior_samples'] = ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25)
|
||||
#ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs.
|
||||
|
||||
if samples_f: #NOTE not tested with fixed_inputs
|
||||
Fsim = model.posterior_samples_f(Xgrid, samples_f)
|
||||
for fi in Fsim.T:
|
||||
plots['posterior_samples_f'] = ax.plot(Xnew, fi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25)
|
||||
#ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs.
|
||||
|
||||
|
||||
#add error bars for uncertain (if input uncertainty is being modelled)
|
||||
if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs():
|
||||
plots['xerrorbar'] = ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(),
|
||||
xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()),
|
||||
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
||||
|
||||
if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs() and plot_uncertain_inputs:
|
||||
if plot_raw:
|
||||
#add error bars for uncertain (if input uncertainty is being modelled), for plot_f
|
||||
#Hack to plot error bars on latent function, rather than on the data
|
||||
vs = model.X.mean.values.copy()
|
||||
for i,v in fixed_inputs:
|
||||
vs[:,i] = v
|
||||
m_X, _ = model._raw_predict(vs)
|
||||
if apply_link:
|
||||
m_X = model.likelihood.gp_link.transf(m_X)
|
||||
plots['xerrorbar'] = ax.errorbar(X[which_data_rows, free_dims].flatten(), m_X[which_data_rows, which_data_ycols].flatten(),
|
||||
xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()),
|
||||
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
||||
else:
|
||||
plots['xerrorbar'] = ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(),
|
||||
xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()),
|
||||
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
||||
|
||||
#set the limits of the plot to some sensible values
|
||||
ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper))
|
||||
|
|
@ -175,7 +204,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
|||
plots['inducing_inputs'] = ax.plot(Zu[:,0], Zu[:,1], 'wo')
|
||||
|
||||
else:
|
||||
raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
|
||||
raise NotImplementedError("Cannot define a frame with more than two input dimensions")
|
||||
return plots
|
||||
|
||||
def plot_fit_f(model, *args, **kwargs):
|
||||
|
|
@ -186,3 +215,29 @@ def plot_fit_f(model, *args, **kwargs):
|
|||
"""
|
||||
kwargs['plot_raw'] = True
|
||||
plot_fit(model,*args, **kwargs)
|
||||
|
||||
def fixed_inputs(model, non_fixed_inputs, fix_routine='median'):
|
||||
"""
|
||||
Convenience function for returning back fixed_inputs where the other inputs
|
||||
are fixed using fix_routine
|
||||
:param model: model
|
||||
:type model: Model
|
||||
:param non_fixed_inputs: dimensions of non fixed inputs
|
||||
:type non_fixed_inputs: list
|
||||
:param fix_routine: fixing routine to use, 'mean', 'median', 'zero'
|
||||
:type fix_routine: string
|
||||
"""
|
||||
f_inputs = []
|
||||
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
|
||||
X = model.X.mean.values.copy()
|
||||
else:
|
||||
X = model.X.values.copy()
|
||||
for i in range(X.shape[1]):
|
||||
if i not in non_fixed_inputs:
|
||||
if fix_routine == 'mean':
|
||||
f_inputs.append( (i, np.mean(X[:,i])) )
|
||||
if fix_routine == 'median':
|
||||
f_inputs.append( (i, np.median(X[:,i])) )
|
||||
elif fix_routine == 'zero':
|
||||
f_inputs.append( (i, 0) )
|
||||
return f_inputs
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue