Merge pull request #269 from SheffieldML/paramz

Paramz
This commit is contained in:
Neil Lawrence 2015-11-09 11:01:56 +00:00
commit 4b4e63e870
134 changed files with 444 additions and 5179 deletions

34
.coveragerc Normal file
View file

@ -0,0 +1,34 @@
# .coveragerc to control coverage.py
[run]
branch = True
source = paramz
omit = ./paramz/tests/*.py, travis_tests.py, setup.py, ./paramz/__version__.py
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
# Have to re-enable the standard pragma
pragma: no cover
# Don't complain about missing debug-only code:
if self\.debug
# Don't complain if tests don't hit defensive assertion code:
raise AssertionError
raise NotImplementedError
raise NotImplemented
except NotImplementedError
except NotImplemented
except AssertionError
except ImportError
pass
# Don't complain if non-runnable code isn't run:
if 0:
if __name__ == .__main__.:
# Don't fail on python3 catch clauses:
python3
ignore_errors = True

View file

@ -67,8 +67,8 @@ deploy:
password:
secure: "vMEOlP7DQhFJ7hQAKtKC5hrJXFl5BkUt4nXdosWWiw//Kg8E+PPLg88XPI2gqIosir9wwgtbSBBbbwCxkM6uxRNMpoNR8Ixyv9fmSXp4rLl7bbBY768W7IRXKIBjpuEy2brQjoT+CwDDSzUkckHvuUjJDNRvUv8ab4P/qYO1LG4="
on:
tags: true
branch: master
#server: https://testpypi.python.org/pypi
tags: false
branch: paramz
server: https://testpypi.python.org/pypi
distributions: "bdist_wheel sdist"
skip_cleanup: true

View file

@ -4,8 +4,6 @@ import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from . import core
from .core.parameterization import transformations, priors
constraints = transformations
from . import models
from . import mappings
from . import inference
@ -13,16 +11,24 @@ from . import util
from . import examples
from . import likelihoods
from . import testing
from numpy.testing import Tester
from . import kern
from . import plotting
# backwards compatibility
import sys
backwards_compatibility = ['lists_and_dicts', 'observable_array', 'ties_and_remappings', 'index_operations']
for bc in backwards_compatibility:
sys.modules['GPy.core.parameterization.{!s}'.format(bc)] = getattr(core.parameterization, bc)
# Direct imports for convenience:
from .core import Model
from .core.parameterization import Param, Parameterized, ObsAr
from .core.parameterization import priors
from .core.parameterization import Param, Parameterized, ObsAr, transformations as constraints
from .__version__ import __version__
from numpy.testing import Tester
#@nottest
try:
#Get rid of nose dependency by only ignoring if you have nose installed
@ -41,29 +47,29 @@ def load(file_or_path):
:param file_name: path/to/file.pickle
"""
# This is the pickling pain when changing _src -> src
import inspect
sys.modules['GPy.kern._src'] = kern.src # @UndefinedVariable
for name, module in inspect.getmembers(kern.src): # @UndefinedVariable
if not name.startswith('_'):
sys.modules['GPy.kern._src.{}'.format(name)] = module
try:
try:
import cPickle as pickle
if isinstance(file_or_path, basestring):
with open(file_or_path, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
m = u.load()
else:
m = pickle.load(file_or_path)
except:
import pickle
if isinstance(file_or_path, str):
with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
except ImportError:
import sys
import inspect
sys.modules['GPy.kern._src'] = kern.src
for name, module in inspect.getmembers(kern.src):
if not name.startswith('_'):
sys.modules['GPy.kern._src.{}'.format(name)] = module
m = load(file_or_path)
import cPickle as pickle
if isinstance(file_or_path, basestring):
with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
except: # python3
import pickle # @Reimport
if isinstance(file_or_path, str):
with open(file_or_path, 'rb') as f:
#u = pickle._Unpickler(f) # @UndefinedVariable
#u.encoding = 'latin1'
#m = u.load()
m = pickle.load(f, encoding='latin1')#
else:
#u = pickle._Unpickler(file_or_path) # @UndefinedVariable
#u.encoding = 'latin1'
#m = u.load(protocol=2)
m = pickle.load(f, encoding='latin1')#
return m

View file

@ -1 +1 @@
__version__ = "0.8.8dev5"
__version__ = "0.8.21"

View file

@ -1,12 +1,46 @@
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .model import *
from .parameterization.parameterized import adjust_name_for_printing, Parameterizable
from .parameterization.param import Param, ParamConcatenation
from .parameterization.observable_array import ObsAr
from GPy.core.model import Model
from .parameterization import Param, Parameterized
from . import parameterization
from .gp import GP
from .svgp import SVGP
from .sparse_gp import SparseGP
from .mapping import *
#===========================================================================
# Handle priors, this needs to be
# cleaned up at some point
#===========================================================================
def randomize(self, rand_gen=None, *args, **kwargs):
"""
Randomize the model.
Make this draw from the prior if one exists, else draw from given random generator
:param rand_gen: np random number generator which takes args and kwargs
:param flaot loc: loc parameter for random number generator
:param float scale: scale parameter for random number generator
:param args, kwargs: will be passed through to random number generator
"""
if rand_gen is None:
rand_gen = np.random.normal
# first take care of all parameters (from N(0,1))
x = rand_gen(size=self._size_transformed(), *args, **kwargs)
updates = self.update_model()
self.update_model(False) # Switch off the updates
self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)
# now draw from prior where possible
x = self.param_array.copy()
[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.items() if not p is None]
unfixlist = np.ones((self.size,),dtype=np.bool)
from paramz.transformations import __fixed__
unfixlist[self.constraints[__fixed__]] = False
self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]
self.update_model(updates)
Model.randomize = randomize
Param.randomize = randomize
Parameterized.randomize = randomize

View file

@ -2,14 +2,13 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import sys
from .. import kern
from .model import Model
from .parameterization import ObsAr
from GPy.core.model import Model
from paramz import ObsAr
from .mapping import Mapping
from .. import likelihoods
from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation
from .parameterization.variational import VariationalPosterior
from GPy.core.parameterization.variational import VariationalPosterior
import logging
import warnings

View file

@ -1,126 +1,18 @@
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .parameterization.priorizable import Priorizable
from paramz import Model as ParamzModel
from .. import likelihoods
from ..inference import optimization
from ..util.misc import opt_wrapper
from .parameterization import Parameterized
import multiprocessing as mp
import numpy as np
from numpy.linalg.linalg import LinAlgError
import itertools
import sys
from .verbose_optimization import VerboseOptimization
# import numdifftools as ndt
from functools import reduce
class Model(Parameterized):
_fail_count = 0 # Count of failed optimization steps (see objective)
_allowed_failures = 10 # number of allowed failures
class Model(ParamzModel, Priorizable):
def __init__(self, name):
super(Model, self).__init__(name) # Parameterized.__init__(self)
self.optimization_runs = []
self.sampling_runs = []
self.preferred_optimizer = 'lbfgsb'
from .parameterization.ties_and_remappings import Tie
self.tie = Tie()
self.link_parameter(self.tie, -1)
self.obj_grads = None
self.add_observer(self.tie, self.tie._parameters_changed_notification, priority=-500)
def log_likelihood(self):
raise NotImplementedError("this needs to be implemented to use the model class")
def _log_likelihood_gradients(self):
return self.gradient.copy()
def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
"""
Perform random restarts of the model, and set the model to the best
seen solution.
If the robust flag is set, exceptions raised during optimizations will
be handled silently. If _all_ runs fail, the model is reset to the
existing parameter values.
**Notes**
:param num_restarts: number of restarts to use (default 10)
:type num_restarts: int
:param robust: whether to handle exceptions silently or not (default False)
:type robust: bool
:param parallel: whether to run each restart as a separate process. It relies on the multiprocessing module.
:type parallel: bool
:param num_processes: number of workers in the multiprocessing pool
:type numprocesses: int
\*\*kwargs are passed to the optimizer. They can be:
:param max_f_eval: maximum number of function evaluations
:type max_f_eval: int
:param max_iters: maximum number of iterations
:type max_iters: int
:param messages: whether to display during optimisation
:type messages: bool
.. note:: If num_processes is None, the number of workes in the
multiprocessing pool is automatically set to the number of processors
on the current machine.
"""
initial_parameters = self.optimizer_array.copy()
if parallel:
try:
jobs = []
pool = mp.Pool(processes=num_processes)
for i in range(num_restarts):
if i>0: self.randomize()
job = pool.apply_async(opt_wrapper, args=(self,), kwds=kwargs)
jobs.append(job)
pool.close() # signal that no more data coming in
pool.join() # wait for all the tasks to complete
except KeyboardInterrupt:
print("Ctrl+c received, terminating and joining pool.")
pool.terminate()
pool.join()
for i in range(num_restarts):
try:
if not parallel:
if i>0: self.randomize()
self.optimize(**kwargs)
else:
self.optimization_runs.append(jobs[i].get())
if verbose:
print(("Optimization restart {0}/{1}, f = {2}".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt)))
except Exception as e:
if robust:
print(("Warning - optimization restart {0}/{1} failed".format(i + 1, num_restarts)))
else:
raise e
if len(self.optimization_runs):
i = np.nanargmin([o.f_opt for o in self.optimization_runs])
self.optimizer_array = self.optimization_runs[i].x_opt
else:
self.optimizer_array = initial_parameters
def ensure_default_constraints(self, warning=True):
"""
Ensure that any variables which should clearly be positive
have been constrained somehow. The method performs a regular
expression search on parameter names looking for the terms
'variance', 'lengthscale', 'precision' and 'kappa'. If any of
these terms are present in the name the parameter is
constrained positive.
DEPRECATED.
"""
raise DeprecationWarning('parameters now have default constraints')
return self.gradient#.copy()
def objective_function(self):
"""
@ -153,285 +45,4 @@ class Model(Parameterized):
(including the MAP prior), so we return it here. If your model is not
probabilistic, just return your *negative* gradient here!
"""
return -(self._log_likelihood_gradients() + self._log_prior_gradients())
def _grads(self, x):
"""
Gets the gradients from the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the gradients, and will raise the original exception if
the objective cannot be computed.
:param x: the parameters of the model.
:type x: np.array
"""
try:
# self._set_params_transformed(x)
self.optimizer_array = x
self.obj_grads = self._transform_gradients(self.objective_function_gradients())
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e100, 1e100)
return self.obj_grads
def _objective(self, x):
"""
The objective function passed to the optimizer. It combines
the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the objective, and will raise the original exception if
the objective cannot be computed.
:param x: the parameters of the model.
:parameter type: np.array
"""
try:
self.optimizer_array = x
obj = self.objective_function()
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
return np.inf
return obj
def _objective_grads(self, x):
try:
self.optimizer_array = x
obj_f, self.obj_grads = self.objective_function(), self._transform_gradients(self.objective_function_gradients())
self._fail_count = 0
except (LinAlgError, ZeroDivisionError, ValueError):
if self._fail_count >= self._allowed_failures:
raise
self._fail_count += 1
obj_f = np.inf
self.obj_grads = np.clip(self._transform_gradients(self.objective_function_gradients()), -1e10, 1e10)
return obj_f, self.obj_grads
def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs):
"""
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
kwargs are passed to the optimizer. They can be:
:param max_iters: maximum number of function evaluations
:type max_iters: int
:messages: True: Display messages during optimisation, "ipython_notebook":
:type messages: bool"string
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
:type optimizer: string
Valid optimizers are:
- 'scg': scaled conjugate gradient method, recommended for stability.
See also GPy.inference.optimization.scg
- 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc)
- 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin),
- 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b),
- 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only!
"""
if self.is_fixed or self.size == 0:
print('nothing to optimize')
if not self.update_model():
print("updates were off, setting updates on again")
self.update_model(True)
if start == None:
start = self.optimizer_array
if optimizer is None:
optimizer = self.preferred_optimizer
if isinstance(optimizer, optimization.Optimizer):
opt = optimizer
opt.model = self
else:
optimizer = optimization.get_optimizer(optimizer)
opt = optimizer(x_init=start, model=self, max_iters=max_iters, **kwargs)
with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook, clear_after_finish=clear_after_finish) as vo:
opt.run(f_fp=self._objective_grads, f=self._objective, fp=self._grads)
vo.finish(opt)
self.optimization_runs.append(opt)
self.optimizer_array = opt.x_opt
def optimize_SGD(self, momentum=0.1, learning_rate=0.01, iterations=20, **kwargs):
# assert self.Y.shape[1] > 1, "SGD only works with D > 1"
sgd = SGD.StochasticGD(self, iterations, learning_rate, momentum, **kwargs) # @UndefinedVariable
sgd.run()
self.optimization_runs.append(sgd)
def _checkgrad(self, target_param=None, verbose=False, step=1e-6, tolerance=1e-3, df_tolerance=1e-12):
"""
Check the gradient of the ,odel by comparing to a numerical
estimate. If the verbose flag is passed, individual
components are tested (and printed)
:param verbose: If True, print a "full" checking of each parameter
:type verbose: bool
:param step: The size of the step around which to linearise the objective
:type step: float (default 1e-6)
:param tolerance: the tolerance allowed (see note)
:type tolerance: float (default 1e-3)
Note:-
The gradient is considered correct if the ratio of the analytical
and numerical gradients is within <tolerance> of unity.
The *dF_ratio* indicates the limit of numerical accuracy of numerical gradients.
If it is too small, e.g., smaller than 1e-12, the numerical gradients are usually
not accurate enough for the tests (shown with blue).
"""
x = self.optimizer_array.copy()
if not verbose:
# make sure only to test the selected parameters
if target_param is None:
transformed_index = range(len(x))
else:
transformed_index = self._raveled_index_for(target_param)
if self._has_fixes():
indices = np.r_[:self.size]
which = (transformed_index[:, None] == indices[self._fixes_][None, :]).nonzero()
transformed_index = (indices - (~self._fixes_).cumsum())[transformed_index[which[0]]]
if transformed_index.size == 0:
print("No free parameters to check")
return
# just check the global ratio
dx = np.zeros(x.shape)
dx[transformed_index] = step * (np.sign(np.random.uniform(-1, 1, transformed_index.size)) if transformed_index.size != 2 else 1.)
# evaulate around the point x
f1 = self._objective(x + dx)
f2 = self._objective(x - dx)
gradient = self._grads(x)
dx = dx[transformed_index]
gradient = gradient[transformed_index]
denominator = (2 * np.dot(dx, gradient))
global_ratio = (f1 - f2) / np.where(denominator == 0., 1e-32, denominator)
global_diff = np.abs(f1 - f2) < tolerance and np.allclose(gradient, 0, atol=tolerance)
if global_ratio is np.nan:
global_ratio = 0
return np.abs(1. - global_ratio) < tolerance or global_diff
else:
# check the gradient of each parameter individually, and do some pretty printing
try:
names = self._get_param_names()
except NotImplementedError:
names = ['Variable %i' % i for i in range(len(x))]
# Prepare for pretty-printing
header = ['Name', 'Ratio', 'Difference', 'Analytical', 'Numerical', 'dF_ratio']
max_names = max([len(names[i]) for i in range(len(names))] + [len(header[0])])
float_len = 10
cols = [max_names]
cols.extend([max(float_len, len(header[i])) for i in range(1, len(header))])
cols = np.array(cols) + 5
header_string = ["{h:^{col}}".format(h=header[i], col=cols[i]) for i in range(len(cols))]
header_string = list(map(lambda x: '|'.join(x), [header_string]))
separator = '-' * len(header_string[0])
print('\n'.join([header_string[0], separator]))
if target_param is None:
param_index = range(len(x))
transformed_index = param_index
else:
param_index = self._raveled_index_for(target_param)
if self._has_fixes():
indices = np.r_[:self.size]
which = (param_index[:, None] == indices[self._fixes_][None, :]).nonzero()
param_index = param_index[which[0]]
transformed_index = (indices - (~self._fixes_).cumsum())[param_index]
# print param_index, transformed_index
else:
transformed_index = param_index
if param_index.size == 0:
print("No free parameters to check")
return
gradient = self._grads(x).copy()
np.where(gradient == 0, 1e-312, gradient)
ret = True
for nind, xind in zip(param_index, transformed_index):
xx = x.copy()
xx[xind] += step
f1 = float(self._objective(xx))
xx[xind] -= 2.*step
f2 = float(self._objective(xx))
#Avoid divide by zero, if any of the values are above 1e-15, otherwise both values are essentiall
#the same
if f1 > 1e-15 or f1 < -1e-15 or f2 > 1e-15 or f2 < -1e-15:
df_ratio = np.abs((f1 - f2) / min(f1, f2))
else:
df_ratio = 1.0
df_unstable = df_ratio < df_tolerance
numerical_gradient = (f1 - f2) / (2. * step)
if np.all(gradient[xind] == 0): ratio = (f1 - f2) == gradient[xind]
else: ratio = (f1 - f2) / (2. * step * gradient[xind])
difference = np.abs(numerical_gradient - gradient[xind])
if (np.abs(1. - ratio) < tolerance) or np.abs(difference) < tolerance:
formatted_name = "\033[92m {0} \033[0m".format(names[nind])
ret &= True
else:
formatted_name = "\033[91m {0} \033[0m".format(names[nind])
ret &= False
if df_unstable:
formatted_name = "\033[94m {0} \033[0m".format(names[nind])
r = '%.6f' % float(ratio)
d = '%.6f' % float(difference)
g = '%.6f' % gradient[xind]
ng = '%.6f' % float(numerical_gradient)
df = '%1.e' % float(df_ratio)
grad_string = "{0:<{c0}}|{1:^{c1}}|{2:^{c2}}|{3:^{c3}}|{4:^{c4}}|{5:^{c5}}".format(formatted_name, r, d, g, ng, df, c0=cols[0] + 9, c1=cols[1], c2=cols[2], c3=cols[3], c4=cols[4], c5=cols[5])
print(grad_string)
self.optimizer_array = x
return ret
def _repr_html_(self):
"""Representation of the model in html for notebook display."""
model_details = [['<b>Model</b>', self.name + '<br>'],
['<b>Log-likelihood</b>', '{}<br>'.format(float(self.log_likelihood()))],
["<b>Number of Parameters</b>", '{}<br>'.format(self.size)],
["<b>Number of Optimization Parameters</b>", '{}<br>'.format(self._size_transformed())],
["<b>Updates</b>", '{}<br>'.format(self._update_on)],
]
from operator import itemgetter
to_print = ["""<style type="text/css">
.pd{
font-family: "Courier New", Courier, monospace !important;
width: 100%;
padding: 3px;
}
</style>\n"""] + ["<p class=pd>"] + ["{}: {}".format(name, detail) for name, detail in model_details] + ["</p>"]
to_print.append(super(Model, self)._repr_html_())
return "\n".join(to_print)
def __str__(self, VT100=True):
model_details = [['Name', self.name],
['Log-likelihood', '{}'.format(float(self.log_likelihood()))],
["Number of Parameters", '{}'.format(self.size)],
["Number of Optimization Parameters", '{}'.format(self._size_transformed())],
["Updates", '{}'.format(self._update_on)],
]
from operator import itemgetter
max_len = reduce(lambda a, b: max(len(b[0]), a), model_details, 0)
to_print = [""] + ["{0:{l}} : {1}".format(name, detail, l=max_len) for name, detail in model_details] + ["Parameters:"]
to_print.append(super(Model, self).__str__(VT100=VT100))
return "\n".join(to_print)
return -(self._log_likelihood_gradients() + self._log_prior_gradients())

View file

@ -1,5 +1,9 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .param import Param, ObsAr
from .param import Param
from .parameterized import Parameterized
from paramz import transformations
from paramz.core import lists_and_dicts, index_operations, observable_array, observable
from paramz import ties_and_remappings, ObsAr

View file

@ -1,25 +0,0 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
(Hyper-)Parameter domains defined for :py:mod:`~GPy.core.priors` and :py:mod:`~GPy.kern`.
These domains specify the legitimate realm of the parameters to live in.
:const:`~GPy.core.domains._REAL` :
real domain, all values in the real numbers are allowed
:const:`~GPy.core.domains._POSITIVE`:
positive domain, only positive real values are allowed
:const:`~GPy.core.domains._NEGATIVE`:
same as :const:`~GPy.core.domains._POSITIVE`, but only negative values are allowed
:const:`~GPy.core.domains._BOUNDED`:
only values within the bounded range are allowed,
the bounds are specified withing the object with the bounded range
"""
_REAL = 'real'
_POSITIVE = "positive"
_NEGATIVE = 'negative'
_BOUNDED = 'bounded'

View file

@ -1,327 +0,0 @@
# Copyright (c) 2014, Max Zwiessele
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy
from numpy.lib.function_base import vectorize
from .lists_and_dicts import IntArrayDict
from functools import reduce
from .transformations import Transformation
def extract_properties_to_index(index, props):
prop_index = dict()
for i, cl in enumerate(props):
for c in cl:
ind = prop_index.get(c, list())
ind.append(index[i])
prop_index[c] = ind
for c, i in prop_index.items():
prop_index[c] = numpy.array(i, dtype=int)
return prop_index
class ParameterIndexOperations(object):
"""
This object wraps a dictionary, whos keys are _operations_ that we'd like
to apply to a parameter array, and whose values are np integer arrays which
index the parameter array appropriately.
A model instance will contain one instance of this class for each thing
that needs indexing (i.e. constraints, ties and priors). Parameters within
the model constain instances of the ParameterIndexOperationsView class,
which can map from a 'local' index (starting 0) to this global index.
Here's an illustration:
#=======================================================================
model : 0 1 2 3 4 5 6 7 8 9
key1: 4 5
key2: 7 8
param1: 0 1 2 3 4 5
key1: 2 3
key2: 5
param2: 0 1 2 3 4
key1: 0
key2: 2 3
#=======================================================================
The views of this global index have a subset of the keys in this global
(model) index.
Adding a new key (e.g. a constraint) to a view will cause the view to pass
the new key to the global index, along with the local index and an offset.
This global index then stores the key and the appropriate global index
(which can be seen by the view).
See also:
ParameterIndexOperationsView
"""
_offset = 0
def __init__(self, constraints=None):
self._properties = IntArrayDict()
if constraints is not None:
#python 3 fix
#for t, i in constraints.iteritems():
for t, i in constraints.items():
self.add(t, i)
#iteritems has gone in python 3
#def iteritems(self):
# return self._properties.iteritems()
def items(self):
return self._properties.items()
def properties(self):
return self._properties.keys()
def iterproperties(self):
return iter(self._properties)
def shift_right(self, start, size):
for ind in self.iterindices():
toshift = ind>=start
ind[toshift] += size
def shift_left(self, start, size):
for v, ind in list(self.items()):
todelete = (ind>=start) * (ind<start+size)
if todelete.size != 0:
ind = ind[~todelete]
toshift = ind>=start
if toshift.size != 0:
ind[toshift] -= size
if ind.size != 0: self._properties[v] = ind
else: del self._properties[v]
def clear(self):
self._properties.clear()
@property
def size(self):
return reduce(lambda a,b: a+b.size, self.iterindices(), 0)
def iterindices(self):
try:
return self._properties.itervalues()
except AttributeError:
#Changed this from itervalues to values for Py3 compatibility. It didn't break the test suite.
return self._properties.values()
def indices(self):
return self._properties.values()
def properties_for(self, index):
"""
Returns a list of properties, such that each entry in the list corresponds
to the element of the index given.
Example:
let properties: 'one':[1,2,3,4], 'two':[3,5,6]
>>> properties_for([2,3,5])
[['one'], ['one', 'two'], ['two']]
"""
return vectorize(lambda i: [prop for prop in self.iterproperties() if i in self[prop]], otypes=[list])(index)
def properties_to_index_dict(self, index):
"""
Return a dictionary, containing properties as keys and indices as index
Thus, the indices for each constraint, which is contained will be collected as
one dictionary
Example:
let properties: 'one':[1,2,3,4], 'two':[3,5,6]
>>> properties_to_index_dict([2,3,5])
{'one':[2,3], 'two':[3,5]}
"""
props = self.properties_for(index)
prop_index = extract_properties_to_index(index, props)
return prop_index
def add(self, prop, indices):
self._properties[prop] = combine_indices(self._properties[prop], indices)
def remove(self, prop, indices):
if prop in self._properties:
diff = remove_indices(self[prop], indices)
removed = numpy.intersect1d(self[prop], indices, True)
if not index_empty(diff):
self._properties[prop] = diff
else:
del self._properties[prop]
return removed.astype(int)
return numpy.array([]).astype(int)
def update(self, parameter_index_view, offset=0):
#py3 fix
#for i, v in parameter_index_view.iteritems():
for i, v in parameter_index_view.items():
self.add(i, v+offset)
def copy(self):
return self.__deepcopy__(None)
def __deepcopy__(self, memo):
#py3 fix
#return ParameterIndexOperations(dict(self.iteritems()))
return ParameterIndexOperations(dict(self.items()))
def __getitem__(self, prop):
return self._properties[prop]
def __delitem__(self, prop):
del self._properties[prop]
def __str__(self, *args, **kwargs):
import pprint
return pprint.pformat(dict(self._properties))
def combine_indices(arr1, arr2):
return numpy.union1d(arr1, arr2)
def remove_indices(arr, to_remove):
return numpy.setdiff1d(arr, to_remove, True)
def index_empty(index):
return numpy.size(index) == 0
class ParameterIndexOperationsView(object):
def __init__(self, param_index_operations, offset, size):
self._param_index_ops = param_index_operations
self._offset = offset
self._size = size
def __getstate__(self):
return [self._param_index_ops, self._offset, self._size]
def __setstate__(self, state):
self._param_index_ops = state[0]
self._offset = state[1]
self._size = state[2]
def _filter_index(self, ind):
return ind[(ind >= self._offset) * (ind < (self._offset + self._size))] - self._offset
#iteritems has gone in python 3. It has been renamed items()
def items(self):
_items_list = list(self._param_index_ops.items())
for i, ind in _items_list:
ind2 = self._filter_index(ind)
if ind2.size > 0:
yield i, ind2
#Python 3 items() is now implemented as per py2 iteritems
#def items(self):
# return [[i,v] for i,v in self.iteritems()]
def properties(self):
return [i for i in self.iterproperties()]
def iterproperties(self):
#py3 fix
#for i, _ in self.iteritems():
for i, _ in self.items():
yield i
def shift_right(self, start, size):
self._param_index_ops.shift_right(start+self._offset, size)
def shift_left(self, start, size):
self._param_index_ops.shift_left(start+self._offset, size)
def clear(self):
for i, ind in self.items():
self._param_index_ops.remove(i, ind+self._offset)
@property
def size(self):
return reduce(lambda a,b: a+b.size, self.iterindices(), 0)
def iterindices(self):
#py3 fix
#for _, ind in self.iteritems():
for _, ind in self.items():
yield ind
def indices(self):
return [ind for ind in self.iterindices()]
def properties_for(self, index):
"""
Returns a list of properties, such that each entry in the list corresponds
to the element of the index given.
Example:
let properties: 'one':[1,2,3,4], 'two':[3,5,6]
>>> properties_for([2,3,5])
[['one'], ['one', 'two'], ['two']]
"""
return vectorize(lambda i: [prop for prop in self.iterproperties() if i in self[prop]], otypes=[list])(index)
def properties_to_index_dict(self, index):
"""
Return a dictionary, containing properties as keys and indices as index
Thus, the indices for each constraint, which is contained will be collected as
one dictionary
Example:
let properties: 'one':[1,2,3,4], 'two':[3,5,6]
>>> properties_to_index_dict([2,3,5])
{'one':[2,3], 'two':[3,5]}
"""
return extract_properties_to_index(index, self.properties_for(index))
def add(self, prop, indices):
self._param_index_ops.add(prop, indices+self._offset)
def remove(self, prop, indices):
removed = self._param_index_ops.remove(prop, numpy.array(indices)+self._offset)
if removed.size > 0:
return removed-self._offset
return removed
def __getitem__(self, prop):
ind = self._filter_index(self._param_index_ops[prop])
return ind
def __delitem__(self, prop):
self.remove(prop, self[prop])
def __str__(self, *args, **kwargs):
import pprint
#py3 fixes
#return pprint.pformat(dict(self.iteritems()))
return pprint.pformat(dict(self.items()))
def update(self, parameter_index_view, offset=0):
#py3 fixes
#for i, v in parameter_index_view.iteritems():
for i, v in parameter_index_view.items():
self.add(i, v+offset)
def copy(self):
return self.__deepcopy__(None)
def __deepcopy__(self, memo):
#py3 fix
#return ParameterIndexOperations(dict(self.iteritems()))
return ParameterIndexOperations(dict(self.items()))
pass

View file

@ -1,139 +0,0 @@
# Copyright (c) 2014, Max Zwiessele
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from collections import defaultdict
import weakref
def intarray_default_factory():
import numpy as np
return np.int_([])
class IntArrayDict(defaultdict):
def __init__(self, default_factory=None):
"""
Default will be self._default, if not set otherwise
"""
defaultdict.__init__(self, intarray_default_factory)
class ArrayList(list):
"""
List to store ndarray-likes in.
It will look for 'is' instead of calling __eq__ on each element.
"""
def __contains__(self, other):
for el in self:
if el is other:
return True
return False
def index(self, item):
index = 0
for el in self:
if el is item:
return index
index += 1
raise ValueError("{} is not in list".format(item))
pass
class ObserverList(object):
"""
A list which containts the observables.
It only holds weak references to observers, such that unbound
observers dont dangle in memory.
"""
def __init__(self):
self._poc = []
def __getitem__(self, ind):
p,o,c = self._poc[ind]
return p, o(), c
def remove(self, priority, observer, callble):
"""
Remove one observer, which had priority and callble.
"""
self.flush()
for i in range(len(self) - 1, -1, -1):
p,o,c = self[i]
if priority==p and observer==o and callble==c:
del self._poc[i]
def __repr__(self):
return self._poc.__repr__()
def add(self, priority, observer, callble):
"""
Add an observer with priority and callble
"""
if observer is not None:
ins = 0
for pr, _, _ in self:
if priority > pr:
break
ins += 1
self._poc.insert(ins, (priority, weakref.ref(observer), callble))
def __str__(self):
from . import ObsAr, Param
from .parameter_core import Parameterizable
ret = []
curr_p = None
def frmt(o):
if isinstance(o, ObsAr):
return 'ObsArr <{}>'.format(hex(id(o)))
elif isinstance(o, (Param,Parameterizable)):
return '{}'.format(o.hierarchy_name())
else:
return repr(o)
for p, o, c in self:
curr = ''
if curr_p != p:
pre = "{!s}: ".format(p)
curr_pre = pre
else: curr_pre = " "*len(pre)
curr_p = p
curr += curr_pre
ret.append(curr + ", ".join([frmt(o), str(c)]))
return '\n'.join(ret)
def flush(self):
"""
Make sure all weak references, which point to nothing are flushed (deleted)
"""
self._poc = [(p,o,c) for p,o,c in self._poc if o() is not None]
def __iter__(self):
self.flush()
for p, o, c in self._poc:
yield p, o(), c
def __len__(self):
self.flush()
return self._poc.__len__()
def __deepcopy__(self, memo):
s = ObserverList()
for p,o,c in self:
import copy
s.add(p, copy.deepcopy(o, memo), copy.deepcopy(c, memo))
s.flush()
return s
def __getstate__(self):
self.flush()
from ...util.caching import Cacher
obs = []
for p, o, c in self:
if (getattr(o, c.__name__, None) is not None
and not isinstance(o, Cacher)):
obs.append((p,o,c.__name__))
return obs
def __setstate__(self, state):
self._poc = []
for p, o, c in state:
self.add(p,o,getattr(o, c))
pass

View file

@ -1,71 +0,0 @@
# Copyright (c) 2014, Max Zwiessele
# Licensed under the BSD 3-clause license (see LICENSE.txt)
class Observable(object):
"""
Observable pattern for parameterization.
This Object allows for observers to register with self and a (bound!) function
as an observer. Every time the observable changes, it sends a notification with
self as only argument to all its observers.
"""
def __init__(self, *args, **kwargs):
super(Observable, self).__init__()
from .lists_and_dicts import ObserverList
self.observers = ObserverList()
self._update_on = True
def set_updates(self, on=True):
self._update_on = on
def add_observer(self, observer, callble, priority=0):
"""
Add an observer `observer` with the callback `callble`
and priority `priority` to this observers list.
"""
self.observers.add(priority, observer, callble)
def remove_observer(self, observer, callble=None):
"""
Either (if callble is None) remove all callables,
which were added alongside observer,
or remove callable `callble` which was added alongside
the observer `observer`.
"""
to_remove = []
for poc in self.observers:
_, obs, clble = poc
if callble is not None:
if (obs is observer) and (callble == clble):
to_remove.append(poc)
else:
if obs is observer:
to_remove.append(poc)
for r in to_remove:
self.observers.remove(*r)
def notify_observers(self, which=None, min_priority=None):
"""
Notifies all observers. Which is the element, which kicked off this
notification loop. The first argument will be self, the second `which`.
NOTE: notifies only observers with priority p > min_priority!
^^^^^^^^^^^^^^^^
:param min_priority: only notify observers with priority > min_priority
if min_priority is None, notify all observers in order
"""
if self._update_on:
if which is None:
which = self
if min_priority is None:
[callble(self, which=which) for _, _, callble in self.observers]
else:
for p, _, callble in self.observers:
if p <= min_priority:
break
callble(self, which=which)
def change_priority(self, observer, callble, priority):
self.remove_observer(observer, callble)
self.add_observer(observer, callble, priority)

View file

@ -1,147 +0,0 @@
# Copyright (c) 2014, Max Zwiessele
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .parameter_core import Pickleable
from .observable import Observable
class ObsAr(np.ndarray, Pickleable, Observable):
"""
An ndarray which reports changes to its observers.
The observers can add themselves with a callable, which
will be called every time this array changes. The callable
takes exactly one argument, which is this array itself.
"""
__array_priority__ = -1 # Never give back ObsAr
def __new__(cls, input_array, *a, **kw):
# allways make a copy of input paramters, as we need it to be in C order:
if not isinstance(input_array, ObsAr):
obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['W', 'C'])).view(cls)
else: obj = input_array
super(ObsAr, obj).__init__(*a, **kw)
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None: return
self.observers = getattr(obj, 'observers', None)
def __array_wrap__(self, out_arr, context=None):
return out_arr.view(np.ndarray)
def _setup_observers(self):
# do not setup anything, as observable arrays do not have default observers
pass
@property
def values(self):
return self.view(np.ndarray)
def copy(self):
from .lists_and_dicts import ObserverList
memo = {}
memo[id(self)] = self
memo[id(self.observers)] = ObserverList()
return self.__deepcopy__(memo)
def __deepcopy__(self, memo):
s = self.__new__(self.__class__, input_array=self.view(np.ndarray).copy())
memo[id(self)] = s
import copy
Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo))
return s
def __reduce__(self):
func, args, state = super(ObsAr, self).__reduce__()
return func, args, (state, Pickleable.__getstate__(self))
def __setstate__(self, state):
np.ndarray.__setstate__(self, state[0])
Pickleable.__setstate__(self, state[1])
def __setitem__(self, s, val):
super(ObsAr, self).__setitem__(s, val)
self.notify_observers()
def __getslice__(self, start, stop):
return self.__getitem__(slice(start, stop))
def __setslice__(self, start, stop, val):
return self.__setitem__(slice(start, stop), val)
def __ilshift__(self, *args, **kwargs):
r = np.ndarray.__ilshift__(self, *args, **kwargs)
self.notify_observers()
return r
def __irshift__(self, *args, **kwargs):
r = np.ndarray.__irshift__(self, *args, **kwargs)
self.notify_observers()
return r
def __ixor__(self, *args, **kwargs):
r = np.ndarray.__ixor__(self, *args, **kwargs)
self.notify_observers()
return r
def __ipow__(self, *args, **kwargs):
r = np.ndarray.__ipow__(self, *args, **kwargs)
self.notify_observers()
return r
def __ifloordiv__(self, *args, **kwargs):
r = np.ndarray.__ifloordiv__(self, *args, **kwargs)
self.notify_observers()
return r
def __isub__(self, *args, **kwargs):
r = np.ndarray.__isub__(self, *args, **kwargs)
self.notify_observers()
return r
def __ior__(self, *args, **kwargs):
r = np.ndarray.__ior__(self, *args, **kwargs)
self.notify_observers()
return r
def __itruediv__(self, *args, **kwargs):
r = np.ndarray.__itruediv__(self, *args, **kwargs)
self.notify_observers()
return r
def __idiv__(self, *args, **kwargs):
r = np.ndarray.__idiv__(self, *args, **kwargs)
self.notify_observers()
return r
def __iand__(self, *args, **kwargs):
r = np.ndarray.__iand__(self, *args, **kwargs)
self.notify_observers()
return r
def __imod__(self, *args, **kwargs):
r = np.ndarray.__imod__(self, *args, **kwargs)
self.notify_observers()
return r
def __iadd__(self, *args, **kwargs):
r = np.ndarray.__iadd__(self, *args, **kwargs)
self.notify_observers()
return r
def __imul__(self, *args, **kwargs):
r = np.ndarray.__imul__(self, *args, **kwargs)
self.notify_observers()
return r

View file

@ -1,496 +1,10 @@
# Copyright (c) 2014, Max Zwiessele
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import itertools
import numpy
np = numpy
from .parameter_core import Parameterizable, adjust_name_for_printing, Pickleable
from .observable_array import ObsAr
from functools import reduce
from paramz import Param
from .priorizable import Priorizable
from paramz.transformations import __fixed__
import logging, numpy as np
###### printing
__constraints_name__ = "Constraint"
__index_name__ = "Index"
__tie_name__ = "Tied to"
__priors_name__ = "Prior"
__precision__ = numpy.get_printoptions()['precision'] # numpy printing precision used, sublassing numpy ndarray after all
__print_threshold__ = 5
######
class Param(Parameterizable, ObsAr):
"""
Parameter object for GPy models.
:param str name: name of the parameter to be printed
:param input_array: array which this parameter handles
:type input_array: numpy.ndarray
:param default_constraint: The default constraint for this parameter
:type default_constraint:
You can add/remove constraints by calling constrain on the parameter itself, e.g:
- self[:,1].constrain_positive()
- self[0].tie_to(other)
- self.untie()
- self[:3,:].unconstrain()
- self[1].fix()
Fixing parameters will fix them to the value they are right now. If you change
the fixed value, it will be fixed to the new value!
Important Note:
Multilevel indexing (e.g. self[:2][1:]) is not supported and might lead to unexpected behaviour.
Try to index in one go, using boolean indexing or the numpy builtin
np.index function.
See :py:class:`GPy.core.parameterized.Parameterized` for more details on constraining etc.
"""
__array_priority__ = -1 # Never give back Param
_fixes_ = None
parameters = []
def __new__(cls, name, input_array, default_constraint=None):
obj = numpy.atleast_1d(super(Param, cls).__new__(cls, input_array=input_array))
obj._current_slice_ = (slice(obj.shape[0]),)
obj._realshape_ = obj.shape
obj._realsize_ = obj.size
obj._realndim_ = obj.ndim
obj._original_ = obj
return obj
def __init__(self, name, input_array, default_constraint=None, *a, **kw):
self._in_init_ = True
super(Param, self).__init__(name=name, default_constraint=default_constraint, *a, **kw)
self._in_init_ = False
def build_pydot(self,G):
import pydot
node = pydot.Node(id(self), shape='trapezium', label=self.name)#, fontcolor='white', color='white')
G.add_node(node)
for _, o, _ in self.observers:
label = o.name if hasattr(o, 'name') else str(o)
observed_node = pydot.Node(id(o), label=label)
G.add_node(observed_node)
edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee')
G.add_edge(edge)
return node
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None: return
super(Param, self).__array_finalize__(obj)
self._parent_ = getattr(obj, '_parent_', None)
self._parent_index_ = getattr(obj, '_parent_index_', None)
self._default_constraint_ = getattr(obj, '_default_constraint_', None)
self._current_slice_ = getattr(obj, '_current_slice_', None)
self._realshape_ = getattr(obj, '_realshape_', None)
self._realsize_ = getattr(obj, '_realsize_', None)
self._realndim_ = getattr(obj, '_realndim_', None)
self._original_ = getattr(obj, '_original_', None)
self._name = getattr(obj, '_name', None)
self._gradient_array_ = getattr(obj, '_gradient_array_', None)
self._update_on = getattr(obj, '_update_on', None)
self.constraints = getattr(obj, 'constraints', None)
self.priors = getattr(obj, 'priors', None)
@property
def param_array(self):
"""
As we are a leaf, this just returns self
"""
return self
@property
def values(self):
"""
Return self as numpy array view
"""
return self.view(np.ndarray)
@property
def gradient(self):
"""
Return a view on the gradient, which is in the same shape as this parameter is.
Note: this is not the real gradient array, it is just a view on it.
To work on the real gradient array use: self.full_gradient
"""
if getattr(self, '_gradient_array_', None) is None:
self._gradient_array_ = numpy.empty(self._realshape_, dtype=numpy.float64)
return self._gradient_array_#[self._current_slice_]
@gradient.setter
def gradient(self, val):
self._gradient_array_[:] = val
#===========================================================================
# Array operations -> done
#===========================================================================
def __getitem__(self, s, *args, **kwargs):
if not isinstance(s, tuple):
s = (s,)
#if not reduce(lambda a, b: a or numpy.any(b is Ellipsis), s, False) and len(s) <= self.ndim:
# s += (Ellipsis,)
new_arr = super(Param, self).__getitem__(s, *args, **kwargs)
try:
new_arr._current_slice_ = s
new_arr._gradient_array_ = self.gradient[s]
new_arr._original_ = self._original_
except AttributeError: pass # returning 0d array or float, double etc
return new_arr
def _raveled_index(self, slice_index=None):
# return an index array on the raveled array, which is formed by the current_slice
# of this object
extended_realshape = numpy.cumprod((1,) + self._realshape_[:0:-1])[::-1]
ind = self._indices(slice_index)
if ind.ndim < 2: ind = ind[:, None]
return numpy.asarray(numpy.apply_along_axis(lambda x: numpy.sum(extended_realshape * x), 1, ind), dtype=int)
def _raveled_index_for(self, obj):
return self._raveled_index()
#===========================================================================
# Constrainable
#===========================================================================
def _ensure_fixes(self):
if not self._has_fixes(): self._fixes_ = numpy.ones(self._realsize_, dtype=bool)
#===========================================================================
# Convenience
#===========================================================================
@property
def is_fixed(self):
from .transformations import __fixed__
return self.constraints[__fixed__].size == self.size
def _get_original(self, param):
return self._original_
#===========================================================================
# Pickling and copying
#===========================================================================
def copy(self):
return Parameterizable.copy(self, which=self)
def __deepcopy__(self, memo):
s = self.__new__(self.__class__, name=self.name, input_array=self.view(numpy.ndarray).copy())
memo[id(self)] = s
import copy
Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo))
return s
def _setup_observers(self):
"""
Setup the default observers
1: pass through to parent, if present
"""
if self.has_parent():
self.add_observer(self._parent_, self._parent_._pass_through_notify_observers, -np.inf)
#===========================================================================
# Printing -> done
#===========================================================================
@property
def _description_str(self):
if self.size <= 1:
return [str(self.view(numpy.ndarray)[0])]
else: return [str(self.shape)]
def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True):
# this is just overwrighting the parameterized calls to parameter names, in order to maintain OOP
if adjust_for_printing:
return [adjust_name_for_printing(self.name)]
return [self.name]
@property
def flattened_parameters(self):
return [self]
@property
def parameter_shapes(self):
return [self.shape]
@property
def num_params(self):
return 0
@property
def _constraints_str(self):
#py3 fix
#return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.iteritems()))]
return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.constraints.items()))]
@property
def _priors_str(self):
#py3 fix
#return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.iteritems()))]
return [' '.join(map(lambda c: str(c[0]) if c[1].size == self._realsize_ else "{" + str(c[0]) + "}", self.priors.items()))]
@property
def _ties_str(self):
return ['']
def _ties_for(self, ravi):
return [['N/A']]*ravi.size
def __repr__(self, *args, **kwargs):
name = "\033[1m{x:s}\033[0;0m:\n".format(
x=self.hierarchy_name())
return name + super(Param, self).__repr__(*args, **kwargs)
def _indices(self, slice_index=None):
# get a int-array containing all indices in the first axis.
if slice_index is None:
slice_index = self._current_slice_
try:
indices = np.indices(self._realshape_, dtype=int)
indices = indices[(slice(None),)+slice_index]
indices = np.rollaxis(indices, 0, indices.ndim).reshape(-1,self._realndim_)
#print indices_
#if not np.all(indices==indices__):
# import ipdb; ipdb.set_trace()
except:
indices = np.indices(self._realshape_, dtype=int)
indices = indices[(slice(None),)+slice_index]
indices = np.rollaxis(indices, 0, indices.ndim)
return indices
def _max_len_names(self, gen, header):
gen = map(lambda x: " ".join(map(str, x)), gen)
return reduce(lambda a, b:max(a, len(b)), gen, len(header))
def _max_len_values(self):
return reduce(lambda a, b:max(a, len("{x:=.{0}g}".format(__precision__, x=b))), self.flat, len(self.hierarchy_name()))
def _max_len_index(self, ind):
return reduce(lambda a, b:max(a, len(str(b))), ind, len(__index_name__))
def _short(self):
# short string to print
name = self.hierarchy_name()
if self._realsize_ < 2:
return name
ind = self._indices()
if ind.size > 4: indstr = ','.join(map(str, ind[:2])) + "..." + ','.join(map(str, ind[-2:]))
else: indstr = ','.join(map(str, ind))
return name + '[' + indstr + ']'
def _repr_html_(self, constr_matrix=None, indices=None, prirs=None, ties=None):
"""Representation of the parameter in html for notebook display."""
filter_ = self._current_slice_
vals = self.flat
if indices is None: indices = self._indices(filter_)
ravi = self._raveled_index(filter_)
if constr_matrix is None: constr_matrix = self.constraints.properties_for(ravi)
if prirs is None: prirs = self.priors.properties_for(ravi)
if ties is None: ties = self._ties_for(ravi)
ties = [' '.join(map(lambda x: x, t)) for t in ties]
header_format = """
<tr>
<th><b>{i}</b></th>
<th><b>{x}</b></th>
<th><b>{c}</b></th>
<th><b>{p}</b></th>
<th><b>{t}</b></th>
</tr>"""
header = header_format.format(x=self.hierarchy_name(), c=__constraints_name__, i=__index_name__, t=__tie_name__, p=__priors_name__) # nice header for printing
if not ties: ties = itertools.cycle([''])
return "\n".join(["""<style type="text/css">
.tg {padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;}
.tg td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;}
.tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;}
</style>"""] + ['<table class="tg">'] + [header] + ["<tr><td class=tg-left>{i}</td><td class=tg-right>{x}</td><td class=tg-left>{c}</td><td class=tg-left>{p}</td><td class=tg-left>{t}</td></tr>".format(x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in zip(indices, vals, constr_matrix, ties, prirs)] + ["</table>"])
def __str__(self, constr_matrix=None, indices=None, prirs=None, ties=None, lc=None, lx=None, li=None, lp=None, lt=None, only_name=False):
filter_ = self._current_slice_
vals = self.flat
if indices is None: indices = self._indices(filter_)
ravi = self._raveled_index(filter_)
if constr_matrix is None: constr_matrix = self.constraints.properties_for(ravi)
if prirs is None: prirs = self.priors.properties_for(ravi)
if ties is None: ties = self._ties_for(ravi)
ties = [' '.join(map(lambda x: x, t)) for t in ties]
if lc is None: lc = self._max_len_names(constr_matrix, __constraints_name__)
if lx is None: lx = self._max_len_values()
if li is None: li = self._max_len_index(indices)
if lt is None: lt = self._max_len_names(ties, __tie_name__)
if lp is None: lp = self._max_len_names(prirs, __tie_name__)
sep = '-'
header_format = " {i:{5}^{2}s} | \033[1m{x:{5}^{1}s}\033[0;0m | {c:{5}^{0}s} | {p:{5}^{4}s} | {t:{5}^{3}s}"
if only_name: header = header_format.format(lc, lx, li, lt, lp, ' ', x=self.hierarchy_name(), c=sep*lc, i=sep*li, t=sep*lt, p=sep*lp) # nice header for printing
else: header = header_format.format(lc, lx, li, lt, lp, ' ', x=self.hierarchy_name(), c=__constraints_name__, i=__index_name__, t=__tie_name__, p=__priors_name__) # nice header for printing
if not ties: ties = itertools.cycle([''])
return "\n".join([header] + [" {i!s:^{3}s} | {x: >{1}.{2}g} | {c:^{0}s} | {p:^{5}s} | {t:^{4}s} ".format(lc, lx, __precision__, li, lt, lp, x=x, c=" ".join(map(str, c)), p=" ".join(map(str, p)), t=(t or ''), i=i) for i, x, c, t, p in zip(indices, vals, constr_matrix, ties, prirs)]) # return all the constraints with right indices
# except: return super(Param, self).__str__()
class ParamConcatenation(object):
def __init__(self, params):
"""
Parameter concatenation for convenience of printing regular expression matched arrays
you can index this concatenation as if it was the flattened concatenation
of all the parameters it contains, same for setting parameters (Broadcasting enabled).
See :py:class:`GPy.core.parameter.Param` for more details on constraining.
"""
# self.params = params
from .lists_and_dicts import ArrayList
self.params = ArrayList([])
for p in params:
for p in p.flattened_parameters:
if p not in self.params:
self.params.append(p)
self._param_sizes = [p.size for p in self.params]
startstops = numpy.cumsum([0] + self._param_sizes)
self._param_slices_ = [slice(start, stop) for start,stop in zip(startstops, startstops[1:])]
parents = dict()
for p in self.params:
if p.has_parent():
parent = p._parent_
level = 0
while parent is not None:
if parent in parents:
parents[parent] = max(level, parents[parent])
else:
parents[parent] = level
level += 1
parent = parent._parent_
import operator
#py3 fix
#self.parents = map(lambda x: x[0], sorted(parents.iteritems(), key=operator.itemgetter(1)))
self.parents = map(lambda x: x[0], sorted(parents.items(), key=operator.itemgetter(1)))
#===========================================================================
# Get/set items, enable broadcasting
#===========================================================================
def __getitem__(self, s):
ind = numpy.zeros(sum(self._param_sizes), dtype=bool); ind[s] = True;
params = [p.param_array.flat[ind[ps]] for p,ps in zip(self.params, self._param_slices_) if numpy.any(p.param_array.flat[ind[ps]])]
if len(params)==1: return params[0]
return ParamConcatenation(params)
def __setitem__(self, s, val, update=True):
if isinstance(val, ParamConcatenation):
val = val.values()
ind = numpy.zeros(sum(self._param_sizes), dtype=bool); ind[s] = True;
vals = self.values(); vals[s] = val
for p, ps in zip(self.params, self._param_slices_):
p.flat[ind[ps]] = vals[ps]
if update:
self.update_all_params()
def values(self):
return numpy.hstack([p.param_array.flat for p in self.params])
#===========================================================================
# parameter operations:
#===========================================================================
def update_all_params(self):
for par in self.parents:
par.trigger_update(trigger_parent=False)
def constrain(self, constraint, warning=True):
[param.constrain(constraint, trigger_parent=False) for param in self.params]
self.update_all_params()
constrain.__doc__ = Param.constrain.__doc__
def constrain_positive(self, warning=True):
[param.constrain_positive(warning, trigger_parent=False) for param in self.params]
self.update_all_params()
constrain_positive.__doc__ = Param.constrain_positive.__doc__
def constrain_fixed(self, value=None, warning=True, trigger_parent=True):
[param.constrain_fixed(value, warning, trigger_parent) for param in self.params]
constrain_fixed.__doc__ = Param.constrain_fixed.__doc__
fix = constrain_fixed
def constrain_negative(self, warning=True):
[param.constrain_negative(warning, trigger_parent=False) for param in self.params]
self.update_all_params()
constrain_negative.__doc__ = Param.constrain_negative.__doc__
def constrain_bounded(self, lower, upper, warning=True):
[param.constrain_bounded(lower, upper, warning, trigger_parent=False) for param in self.params]
self.update_all_params()
constrain_bounded.__doc__ = Param.constrain_bounded.__doc__
def unconstrain(self, *constraints):
[param.unconstrain(*constraints) for param in self.params]
unconstrain.__doc__ = Param.unconstrain.__doc__
def unconstrain_negative(self):
[param.unconstrain_negative() for param in self.params]
unconstrain_negative.__doc__ = Param.unconstrain_negative.__doc__
def unconstrain_positive(self):
[param.unconstrain_positive() for param in self.params]
unconstrain_positive.__doc__ = Param.unconstrain_positive.__doc__
def unconstrain_fixed(self):
[param.unconstrain_fixed() for param in self.params]
unconstrain_fixed.__doc__ = Param.unconstrain_fixed.__doc__
unfix = unconstrain_fixed
def unconstrain_bounded(self, lower, upper):
[param.unconstrain_bounded(lower, upper) for param in self.params]
unconstrain_bounded.__doc__ = Param.unconstrain_bounded.__doc__
def untie(self, *ties):
[param.untie(*ties) for param in self.params]
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3):
return self.params[0]._highest_parent_._checkgrad(self, verbose, step, tolerance)
#checkgrad.__doc__ = Gradcheckable.checkgrad.__doc__
__lt__ = lambda self, val: self.values() < val
__le__ = lambda self, val: self.values() <= val
__eq__ = lambda self, val: self.values() == val
__ne__ = lambda self, val: self.values() != val
__gt__ = lambda self, val: self.values() > val
__ge__ = lambda self, val: self.values() >= val
def __str__(self, *args, **kwargs):
def f(p):
ind = p._raveled_index()
return p.constraints.properties_for(ind), p._ties_for(ind), p.priors.properties_for(ind)
params = self.params
constr_matrices, ties_matrices, prior_matrices = zip(*map(f, params))
indices = [p._indices() for p in params]
lc = max([p._max_len_names(cm, __constraints_name__) for p, cm in zip(params, constr_matrices)])
lx = max([p._max_len_values() for p in params])
li = max([p._max_len_index(i) for p, i in zip(params, indices)])
lt = max([p._max_len_names(tm, __tie_name__) for p, tm in zip(params, ties_matrices)])
lp = max([p._max_len_names(pm, __constraints_name__) for p, pm in zip(params, prior_matrices)])
strings = []
start = True
for p, cm, i, tm, pm in zip(params,constr_matrices,indices,ties_matrices,prior_matrices):
strings.append(p.__str__(constr_matrix=cm, indices=i, prirs=pm, ties=tm, lc=lc, lx=lx, li=li, lp=lp, lt=lt, only_name=(1-start)))
start = False
return "\n".join(strings)
def __repr__(self):
return "\n".join(map(repr,self.params))
def __ilshift__(self, *args, **kwargs):
self[:] = np.ndarray.__ilshift__(self.values(), *args, **kwargs)
def __irshift__(self, *args, **kwargs):
self[:] = np.ndarray.__irshift__(self.values(), *args, **kwargs)
def __ixor__(self, *args, **kwargs):
self[:] = np.ndarray.__ixor__(self.values(), *args, **kwargs)
def __ipow__(self, *args, **kwargs):
self[:] = np.ndarray.__ipow__(self.values(), *args, **kwargs)
def __ifloordiv__(self, *args, **kwargs):
self[:] = np.ndarray.__ifloordiv__(self.values(), *args, **kwargs)
def __isub__(self, *args, **kwargs):
self[:] = np.ndarray.__isub__(self.values(), *args, **kwargs)
def __ior__(self, *args, **kwargs):
self[:] = np.ndarray.__ior__(self.values(), *args, **kwargs)
def __itruediv__(self, *args, **kwargs):
self[:] = np.ndarray.__itruediv__(self.values(), *args, **kwargs)
def __idiv__(self, *args, **kwargs):
self[:] = np.ndarray.__idiv__(self.values(), *args, **kwargs)
def __iand__(self, *args, **kwargs):
self[:] = np.ndarray.__iand__(self.values(), *args, **kwargs)
def __imod__(self, *args, **kwargs):
self[:] = np.ndarray.__imod__(self.values(), *args, **kwargs)
def __iadd__(self, *args, **kwargs):
self[:] = np.ndarray.__iadd__(self.values(), *args, **kwargs)
def __imul__(self, *args, **kwargs):
self[:] = np.ndarray.__imul__(self.values(), *args, **kwargs)
class Param(Param, Priorizable):
pass

File diff suppressed because it is too large Load diff

View file

@ -1,34 +1,13 @@
# Copyright (c) 2014, Max Zwiessele, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import six # For metaclass support in Python 2 and 3 simultaneously
import numpy; np = numpy
import itertools
from re import compile, _pattern_type
from .param import ParamConcatenation
from .parameter_core import HierarchyError, Parameterizable, adjust_name_for_printing
from paramz import Parameterized
from .priorizable import Priorizable
import logging
from .index_operations import ParameterIndexOperationsView
logger = logging.getLogger("parameters changed meta")
class ParametersChangedMeta(type):
def __call__(self, *args, **kw):
self._in_init_ = True
#import ipdb;ipdb.set_trace()
self = super(ParametersChangedMeta, self).__call__(*args, **kw)
logger.debug("finished init")
self._in_init_ = False
logger.debug("connecting parameters")
self._highest_parent_._connect_parameters()
#self._highest_parent_._notify_parent_change()
self._highest_parent_._connect_fixes()
logger.debug("calling parameters changed")
self.parameters_changed()
return self
@six.add_metaclass(ParametersChangedMeta)
class Parameterized(Parameterizable):
class Parameterized(Parameterized, Priorizable):
"""
Parameterized class
@ -69,365 +48,5 @@ class Parameterized(Parameterizable):
If you want to operate on all parameters use m[''] to wildcard select all paramters
and concatenate them. Printing m[''] will result in printing of all parameters in detail.
"""
#===========================================================================
# Metaclass for parameters changed after init.
# This makes sure, that parameters changed will always be called after __init__
# **Never** call parameters_changed() yourself
#This is ignored in Python 3 -- you need to put the meta class in the function definition.
#__metaclass__ = ParametersChangedMeta
#The six module is used to support both Python 2 and 3 simultaneously
#===========================================================================
def __init__(self, name=None, parameters=[], *a, **kw):
super(Parameterized, self).__init__(name=name, *a, **kw)
self.size = sum(p.size for p in self.parameters)
self.add_observer(self, self._parameters_changed_notification, -100)
if not self._has_fixes():
self._fixes_ = None
self._param_slices_ = []
#self._connect_parameters()
self.link_parameters(*parameters)
def build_pydot(self, G=None):
import pydot # @UnresolvedImport
iamroot = False
if G is None:
G = pydot.Dot(graph_type='digraph', bgcolor=None)
iamroot=True
node = pydot.Node(id(self), shape='box', label=self.name)#, color='white')
G.add_node(node)
for child in self.parameters:
child_node = child.build_pydot(G)
G.add_edge(pydot.Edge(node, child_node))#, color='white'))
for _, o, _ in self.observers:
label = o.name if hasattr(o, 'name') else str(o)
observed_node = pydot.Node(id(o), label=label)
G.add_node(observed_node)
edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee')
G.add_edge(edge)
if iamroot:
return G
return node
#===========================================================================
# Add remove parameters:
#===========================================================================
def link_parameter(self, param, index=None, _ignore_added_names=False):
"""
:param parameters: the parameters to add
:type parameters: list of or one :py:class:`GPy.core.param.Param`
:param [index]: index of where to put parameters
:param bool _ignore_added_names: whether the name of the parameter overrides a possibly existing field
Add all parameters to this param class, you can insert parameters
at any given index using the :func:`list.insert` syntax
"""
if param in self.parameters and index is not None:
self.unlink_parameter(param)
self.link_parameter(param, index)
# elif param.has_parent():
# raise HierarchyError, "parameter {} already in another model ({}), create new object (or copy) for adding".format(param._short(), param._highest_parent_._short())
elif param not in self.parameters:
if param.has_parent():
def visit(parent, self):
if parent is self:
raise HierarchyError("You cannot add a parameter twice into the hierarchy")
param.traverse_parents(visit, self)
param._parent_.unlink_parameter(param)
# make sure the size is set
if index is None:
start = sum(p.size for p in self.parameters)
self.constraints.shift_right(start, param.size)
self.priors.shift_right(start, param.size)
self.constraints.update(param.constraints, self.size)
self.priors.update(param.priors, self.size)
param._parent_ = self
param._parent_index_ = len(self.parameters)
self.parameters.append(param)
else:
start = sum(p.size for p in self.parameters[:index])
self.constraints.shift_right(start, param.size)
self.priors.shift_right(start, param.size)
self.constraints.update(param.constraints, start)
self.priors.update(param.priors, start)
param._parent_ = self
param._parent_index_ = index if index>=0 else len(self.parameters[:index])
for p in self.parameters[index:]:
p._parent_index_ += 1
self.parameters.insert(index, param)
param.add_observer(self, self._pass_through_notify_observers, -np.inf)
parent = self
while parent is not None:
parent.size += param.size
parent = parent._parent_
self._notify_parent_change()
if not self._in_init_:
#self._connect_parameters()
#self._notify_parent_change()
self._highest_parent_._connect_parameters(ignore_added_names=_ignore_added_names)
self._highest_parent_._notify_parent_change()
self._highest_parent_._connect_fixes()
else:
raise HierarchyError("""Parameter exists already, try making a copy""")
def link_parameters(self, *parameters):
"""
convenience method for adding several
parameters without gradient specification
"""
[self.link_parameter(p) for p in parameters]
def unlink_parameter(self, param):
"""
:param param: param object to remove from being a parameter of this parameterized object.
"""
if not param in self.parameters:
try:
raise RuntimeError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name))
except AttributeError:
raise RuntimeError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param)))
start = sum([p.size for p in self.parameters[:param._parent_index_]])
self.size -= param.size
del self.parameters[param._parent_index_]
self._remove_parameter_name(param)
param._disconnect_parent()
param.remove_observer(self, self._pass_through_notify_observers)
self.constraints.shift_left(start, param.size)
self._connect_parameters()
self._notify_parent_change()
parent = self._parent_
while parent is not None:
parent.size -= param.size
parent = parent._parent_
self._highest_parent_._connect_parameters()
self._highest_parent_._connect_fixes()
self._highest_parent_._notify_parent_change()
def add_parameter(self, *args, **kwargs):
raise DeprecationWarning("add_parameter was renamed to link_parameter to avoid confusion of setting variables, use link_parameter instead")
def remove_parameter(self, *args, **kwargs):
raise DeprecationWarning("remove_parameter was renamed to unlink_parameter to avoid confusion of setting variables, use unlink_parameter instead")
def _connect_parameters(self, ignore_added_names=False):
# connect parameterlist to this parameterized object
# This just sets up the right connection for the params objects
# to be used as parameters
# it also sets the constraints for each parameter to the constraints
# of their respective parents
if not hasattr(self, "parameters") or len(self.parameters) < 1:
# no parameters for this class
return
if self.param_array.size != self.size:
self._param_array_ = np.empty(self.size, dtype=np.float64)
if self.gradient.size != self.size:
self._gradient_array_ = np.empty(self.size, dtype=np.float64)
old_size = 0
self._param_slices_ = []
for i, p in enumerate(self.parameters):
if not p.param_array.flags['C_CONTIGUOUS']:
raise ValueError("This should not happen! Please write an email to the developers with the code, which reproduces this error. All parameter arrays must be C_CONTIGUOUS")
p._parent_ = self
p._parent_index_ = i
pslice = slice(old_size, old_size + p.size)
# first connect all children
p._propagate_param_grad(self.param_array[pslice], self.gradient_full[pslice])
# then connect children to self
self.param_array[pslice] = p.param_array.flat # , requirements=['C', 'W']).ravel(order='C')
self.gradient_full[pslice] = p.gradient_full.flat # , requirements=['C', 'W']).ravel(order='C')
p.param_array.data = self.param_array[pslice].data
p.gradient_full.data = self.gradient_full[pslice].data
self._param_slices_.append(pslice)
self._add_parameter_name(p, ignore_added_names=ignore_added_names)
old_size += p.size
#===========================================================================
# Get/set parameters:
#===========================================================================
def grep_param_names(self, regexp):
"""
create a list of parameters, matching regular expression regexp
"""
if not isinstance(regexp, _pattern_type): regexp = compile(regexp)
found_params = []
for n, p in zip(self.parameter_names(False, False, True), self.flattened_parameters):
if regexp.match(n) is not None:
found_params.append(p)
return found_params
def __getitem__(self, name, paramlist=None):
if isinstance(name, (int, slice, tuple, np.ndarray)):
return self.param_array[name]
else:
if paramlist is None:
paramlist = self.grep_param_names(name)
if len(paramlist) < 1: raise AttributeError(name)
if len(paramlist) == 1:
if isinstance(paramlist[-1], Parameterized):
paramlist = paramlist[-1].flattened_parameters
if len(paramlist) != 1:
return ParamConcatenation(paramlist)
return paramlist[-1]
return ParamConcatenation(paramlist)
def __setitem__(self, name, value, paramlist=None):
if value is None:
return # nothing to do here
if isinstance(name, (slice, tuple, np.ndarray)):
try:
self.param_array[name] = value
except:
raise ValueError("Setting by slice or index only allowed with array-like")
self.trigger_update()
else:
try: param = self.__getitem__(name, paramlist)
except: raise
param[:] = value
def __setattr__(self, name, val):
# override the default behaviour, if setting a param, so broadcasting can by used
if hasattr(self, "parameters"):
try:
pnames = self.parameter_names(False, adjust_for_printing=True, recursive=False)
if name in pnames:
param = self.parameters[pnames.index(name)]
param[:] = val; return
except AttributeError as a:
raise
return object.__setattr__(self, name, val);
#===========================================================================
# Pickling
#===========================================================================
def __setstate__(self, state):
super(Parameterized, self).__setstate__(state)
try:
self._connect_parameters()
self._connect_fixes()
self._notify_parent_change()
self.parameters_changed()
except Exception as e:
print("WARNING: caught exception {!s}, trying to continue".format(e))
def copy(self, memo=None):
if memo is None:
memo = {}
memo[id(self.optimizer_array)] = None # and param_array
memo[id(self.param_array)] = None # and param_array
copy = super(Parameterized, self).copy(memo)
copy._connect_parameters()
copy._connect_fixes()
copy._notify_parent_change()
return copy
#===========================================================================
# Printing:
#===========================================================================
def _short(self):
return self.hierarchy_name()
@property
def flattened_parameters(self):
return [xi for x in self.parameters for xi in x.flattened_parameters]
@property
def _parameter_sizes_(self):
return [x.size for x in self.parameters]
@property
def parameter_shapes(self):
return [xi for x in self.parameters for xi in x.parameter_shapes]
@property
def _constraints_str(self):
return [cs for p in self.parameters for cs in p._constraints_str]
@property
def _priors_str(self):
return [cs for p in self.parameters for cs in p._priors_str]
@property
def _description_str(self):
return [xi for x in self.parameters for xi in x._description_str]
@property
def _ties_str(self):
return [','.join(x._ties_str) for x in self.flattened_parameters]
def _repr_html_(self, header=True):
"""Representation of the parameters in html for notebook display."""
name = adjust_name_for_printing(self.name) + "."
constrs = self._constraints_str;
ts = self._ties_str
prirs = self._priors_str
desc = self._description_str; names = self.parameter_names()
nl = max([len(str(x)) for x in names + [name]])
sl = max([len(str(x)) for x in desc + ["Value"]])
cl = max([len(str(x)) if x else 0 for x in constrs + ["Constraint"]])
tl = max([len(str(x)) if x else 0 for x in ts + ["Tied to"]])
pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]])
format_spec = "<tr><td class=tg-left>{{name:<{0}s}}</td><td class=tg-right>{{desc:>{1}s}}</td><td class=tg-left>{{const:^{2}s}}</td><td class=tg-left>{{pri:^{3}s}}</td><td class=tg-left>{{t:^{4}s}}</td></tr>".format(nl, sl, cl, pl, tl)
to_print = []
for n, d, c, t, p in zip(names, desc, constrs, ts, prirs):
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))
sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3)
if header:
header = """
<tr>
<th><b>{name}</b></th>
<th><b>Value</b></th>
<th><b>Constraint</b></th>
<th><b>Prior</b></th>
<th><b>Tied to</b></th>
</tr>""".format(name=name)
to_print.insert(0, header)
style = """<style type="text/css">
.tg {font-family:"Courier New", Courier, monospace !important;padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;}
.tg td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;}
.tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;}
</style>"""
return style + '\n' + '<table class="tg">' + '\n'.format(sep).join(to_print) + '\n</table>'
def __str__(self, header=True, VT100=True):
name = adjust_name_for_printing(self.name) + "."
constrs = self._constraints_str;
ts = self._ties_str
prirs = self._priors_str
desc = self._description_str; names = self.parameter_names()
nl = max([len(str(x)) for x in names + [name]])
sl = max([len(str(x)) for x in desc + ["Value"]])
cl = max([len(str(x)) if x else 0 for x in constrs + ["Constraint"]])
tl = max([len(str(x)) if x else 0 for x in ts + ["Tied to"]])
pl = max([len(str(x)) if x else 0 for x in prirs + ["Prior"]])
if VT100:
format_spec = " \033[1m{{name:<{0}s}}\033[0;0m | {{desc:>{1}s}} | {{const:^{2}s}} | {{pri:^{3}s}} | {{t:^{4}s}}".format(nl, sl, cl, pl, tl)
else:
format_spec = " {{name:<{0}s}} | {{desc:>{1}s}} | {{const:^{2}s}} | {{pri:^{3}s}} | {{t:^{4}s}}".format(nl, sl, cl, pl, tl)
to_print = []
for n, d, c, t, p in zip(names, desc, constrs, ts, prirs):
to_print.append(format_spec.format(name=n, desc=d, const=c, t=t, pri=p))
sep = '-' * (nl + sl + cl + + pl + tl + 8 * 2 + 3)
if header:
header = " {{0:<{0}s}} | {{1:^{1}s}} | {{2:^{2}s}} | {{3:^{3}s}} | {{4:^{4}s}}".format(nl, sl, cl, pl, tl).format(name, "Value", "Constraint", "Prior", "Tied to")
to_print.insert(0, header)
return '\n'.format(sep).join(to_print)
pass
"""
pass

View file

@ -0,0 +1,82 @@
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from paramz.transformations import Transformation, __fixed__
from paramz.core.parameter_core import Parameterizable
from functools import reduce
class Priorizable(Parameterizable):
def __init__(self, name, default_prior=None, *a, **kw):
super(Priorizable, self).__init__(name=name, *a, **kw)
self._default_prior_ = default_prior
from paramz.core.index_operations import ParameterIndexOperations
self.add_index_operation('priors', ParameterIndexOperations())
if self._default_prior_ is not None:
self.set_prior(self._default_prior_)
def __setstate__(self, state):
super(Priorizable, self).__setstate__(state)
self._index_operations['priors'] = self.priors
#===========================================================================
# Prior Operations
#===========================================================================
def set_prior(self, prior, warning=True):
"""
Set the prior for this object to prior.
:param :class:`~GPy.priors.Prior` prior: a prior to set for this parameter
:param bool warning: whether to warn if another prior was set for this parameter
"""
repriorized = self.unset_priors()
self._add_to_index_operations(self.priors, repriorized, prior, warning)
from paramz.domains import _REAL, _POSITIVE, _NEGATIVE
if prior.domain is _POSITIVE:
self.constrain_positive(warning)
elif prior.domain is _NEGATIVE:
self.constrain_negative(warning)
elif prior.domain is _REAL:
rav_i = self._raveled_index()
assert all(all(False if c is __fixed__ else c.domain is _REAL for c in con) for con in self.constraints.properties_for(rav_i)), 'Domain of prior and constraint have to match, please unconstrain if you REALLY wish to use this prior'
def unset_priors(self, *priors):
"""
Un-set all priors given (in *priors) from this parameter handle.
"""
return self._remove_from_index_operations(self.priors, priors)
def log_prior(self):
"""evaluate the prior"""
if self.priors.size == 0:
return 0.
x = self.param_array
#evaluate the prior log densities
log_p = reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.items()), 0)
#account for the transformation by evaluating the log Jacobian (where things are transformed)
log_j = 0.
priored_indexes = np.hstack([i for p, i in self.priors.items()])
for c,j in self.constraints.items():
if not isinstance(c, Transformation):continue
for jj in j:
if jj in priored_indexes:
log_j += c.log_jacobian(x[jj])
return log_p + log_j
def _log_prior_gradients(self):
"""evaluate the gradients of the priors"""
if self.priors.size == 0:
return 0.
x = self.param_array
ret = np.zeros(x.size)
#compute derivate of prior density
[np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.items()]
#add in jacobian derivatives if transformed
priored_indexes = np.hstack([i for p, i in self.priors.items()])
for c,j in self.constraints.items():
if not isinstance(c, Transformation):continue
for jj in j:
if jj in priored_indexes:
ret[jj] += c.log_jacobian_grad(x[jj])
return ret

View file

@ -5,7 +5,7 @@
import numpy as np
from scipy.special import gammaln, digamma
from ...util.linalg import pdinv
from .domains import _REAL, _POSITIVE
from paramz.domains import _REAL, _POSITIVE
import warnings
import weakref
@ -725,8 +725,9 @@ class DGPLVM(Prior):
# ******************************************
from .. import Parameterized
from .. import Param
from . import Parameterized
from . import Param
class DGPLVM_Lamda(Prior, Parameterized):
"""
Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel.

View file

@ -1,225 +0,0 @@
# Copyright (c) 2014, James Hensman, Max Zwiessele
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .parameterized import Parameterized
from .param import Param
class Remapping(Parameterized):
def mapping(self):
"""
The return value of this function gives the values which the re-mapped
parameters should take. Implement in sub-classes.
"""
raise NotImplementedError
def callback(self):
raise NotImplementedError
def __str__(self):
return self.name
def parameters_changed(self):
#ensure all out parameters have the correct value, as specified by our mapping
index = self._highest_parent_.constraints[self]
self._highest_parent_.param_array[index] = self.mapping()
[p.notify_observers(which=self) for p in self.tied_parameters]
class Fix(Remapping):
pass
class Tie(Parameterized):
"""
The new parameter tie framework. (under development)
All the parameters tied together get a new parameter inside the *Tie* object.
Its value should always be equal to all the tied parameters, and its gradient
is the sum of all the tied parameters.
=====Implementation Details=====
The *Tie* object should only exist on the top of param tree (the highest parent).
self.label_buf:
It uses a label buffer that has the same length as all the parameters (self._highest_parent_.param_array).
The buffer keeps track of all the tied parameters. All the tied parameters have a label (an interger) higher
than 0, and the parameters that have the same label are tied together.
self.buf_index:
An auxiliary index list for the global index of the tie parameter inside the *Tie* object.
================================
TODO:
* EVERYTHING
"""
def __init__(self, name='tie'):
super(Tie, self).__init__(name)
self.tied_param = None
# The buffer keeps track of tie status
self.label_buf = None
# The global indices of the 'tied' param
self.buf_idx = None
# A boolean array indicating non-tied parameters
self._tie_ = None
def getTieFlag(self, p=None):
if self.tied_param is None:
if self._tie_ is None or self._tie_.size != self._highest_parent_.param_array.size:
self._tie_ = np.ones((self._highest_parent_.param_array.size,),dtype=np.bool)
if p is not None:
return self._tie_[p._highest_parent_._raveled_index_for(p)]
return self._tie_
def _init_labelBuf(self):
if self.label_buf is None:
self.label_buf = np.zeros(self._highest_parent_.param_array.shape, dtype=np.int)
if self._tie_ is None or self._tie_.size != self._highest_parent_.param_array.size:
self._tie_ = np.ones((self._highest_parent_.param_array.size,),dtype=np.bool)
def _updateTieFlag(self):
if self._tie_.size != self.label_buf.size:
self._tie_ = np.ones((self._highest_parent_.param_array.size,),dtype=np.bool)
self._tie_[self.label_buf>0] = False
self._tie_[self.buf_idx] = True
def add_tied_parameter(self, p, p2=None):
"""
Tie the list of parameters p together (p2==None) or
Tie the list of parameters p with the list of parameters p2 (p2!=None)
"""
self._init_labelBuf()
if p2 is None:
idx = self._highest_parent_._raveled_index_for(p)
val = self._sync_val_group(idx)
if np.all(self.label_buf[idx]==0):
# None of p has been tied before.
tie_idx = self._expandTieParam(1)
print(tie_idx)
tie_id = self.label_buf.max()+1
self.label_buf[tie_idx] = tie_id
else:
b = self.label_buf[idx]
ids = np.unique(b[b>0])
tie_id, tie_idx = self._merge_tie_param(ids)
self._highest_parent_.param_array[tie_idx] = val
idx = self._highest_parent_._raveled_index_for(p)
self.label_buf[idx] = tie_id
else:
pass
self._updateTieFlag()
def _merge_tie_param(self, ids):
"""Merge the tie parameters with ids in the list."""
if len(ids)==1:
id_final_idx = self.buf_idx[self.label_buf[self.buf_idx]==ids[0]][0]
return ids[0],id_final_idx
id_final = ids[0]
ids_rm = ids[1:]
label_buf_param = self.label_buf[self.buf_idx]
idx_param = [np.where(label_buf_param==i)[0][0] for i in ids_rm]
self._removeTieParam(idx_param)
[np.put(self.label_buf, np.where(self.label_buf==i), id_final) for i in ids_rm]
id_final_idx = self.buf_idx[self.label_buf[self.buf_idx]==id_final][0]
return id_final, id_final_idx
def _sync_val_group(self, idx):
self._highest_parent_.param_array[idx] = self._highest_parent_.param_array[idx].mean()
return self._highest_parent_.param_array[idx][0]
def _expandTieParam(self, num):
"""Expand the tie param with the number of *num* parameters"""
if self.tied_param is None:
new_buf = np.empty((num,))
else:
new_buf = np.empty((self.tied_param.size+num,))
new_buf[:self.tied_param.size] = self.tied_param.param_array.copy()
self.remove_parameter(self.tied_param)
self.tied_param = Param('tied',new_buf)
self.add_parameter(self.tied_param)
buf_idx_new = self._highest_parent_._raveled_index_for(self.tied_param)
self._expand_label_buf(self.buf_idx, buf_idx_new)
self.buf_idx = buf_idx_new
return self.buf_idx[-num:]
def _removeTieParam(self, idx):
"""idx within tied_param"""
new_buf = np.empty((self.tied_param.size-len(idx),))
bool_list = np.ones((self.tied_param.size,),dtype=np.bool)
bool_list[idx] = False
new_buf[:] = self.tied_param.param_array[bool_list]
self.remove_parameter(self.tied_param)
self.tied_param = Param('tied',new_buf)
self.add_parameter(self.tied_param)
buf_idx_new = self._highest_parent_._raveled_index_for(self.tied_param)
self._shrink_label_buf(self.buf_idx, buf_idx_new, bool_list)
self.buf_idx = buf_idx_new
def _expand_label_buf(self, idx_old, idx_new):
"""Expand label buffer accordingly"""
if idx_old is None:
self.label_buf = np.zeros(self._highest_parent_.param_array.shape, dtype=np.int)
else:
bool_old = np.zeros((self.label_buf.size,),dtype=np.bool)
bool_old[idx_old] = True
bool_new = np.zeros((self._highest_parent_.param_array.size,),dtype=np.bool)
bool_new[idx_new] = True
label_buf_new = np.zeros(self._highest_parent_.param_array.shape, dtype=np.int)
label_buf_new[np.logical_not(bool_new)] = self.label_buf[np.logical_not(bool_old)]
label_buf_new[idx_new[:len(idx_old)]] = self.label_buf[idx_old]
self.label_buf = label_buf_new
def _shrink_label_buf(self, idx_old, idx_new, bool_list):
bool_old = np.zeros((self.label_buf.size,),dtype=np.bool)
bool_old[idx_old] = True
bool_new = np.zeros((self._highest_parent_.param_array.size,),dtype=np.bool)
bool_new[idx_new] = True
label_buf_new = np.empty(self._highest_parent_.param_array.shape, dtype=np.int)
label_buf_new[np.logical_not(bool_new)] = self.label_buf[np.logical_not(bool_old)]
label_buf_new[idx_new] = self.label_buf[idx_old[bool_list]]
self.label_buf = label_buf_new
def _check_change(self):
changed = False
if self.tied_param is not None:
for i in range(self.tied_param.size):
b0 = self.label_buf==self.label_buf[self.buf_idx[i]]
b = self._highest_parent_.param_array[b0]!=self.tied_param[i]
if b.sum()==0:
print('XXX')
continue
elif b.sum()==1:
print('!!!')
val = self._highest_parent_.param_array[b0][b][0]
self._highest_parent_.param_array[b0] = val
else:
print('@@@')
self._highest_parent_.param_array[b0] = self.tied_param[i]
changed = True
return changed
def parameters_changed(self):
#ensure all out parameters have the correct value, as specified by our mapping
changed = self._check_change()
if changed:
self._highest_parent_._trigger_params_changed()
self.collate_gradient()
def collate_gradient(self):
if self.tied_param is not None:
self.tied_param.gradient = 0.
[np.put(self.tied_param.gradient, i, self._highest_parent_.gradient[self.label_buf==self.label_buf[self.buf_idx[i]]].sum())
for i in range(self.tied_param.size)]
def propagate_val(self):
if self.tied_param is not None:
for i in range(self.tied_param.size):
self._highest_parent_.param_array[self.label_buf==self.label_buf[self.buf_idx[i]]] = self.tied_param[i]

View file

@ -1,518 +1,4 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Copyright (c) 2014, Max Zwiessele, James Hensman
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .domains import _POSITIVE,_NEGATIVE, _BOUNDED
import weakref
import sys
_exp_lim_val = np.finfo(np.float64).max
_lim_val = 36.0
epsilon = np.finfo(np.float64).resolution
#===============================================================================
# Fixing constants
__fixed__ = "fixed"
FIXED = False
UNFIXED = True
#===============================================================================
class Transformation(object):
domain = None
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance or cls._instance.__class__ is not cls:
cls._instance = super(Transformation, cls).__new__(cls, *args, **kwargs)
return cls._instance
def f(self, opt_param):
raise NotImplementedError
def finv(self, model_param):
raise NotImplementedError
def log_jacobian(self, model_param):
"""
compute the log of the jacobian of f, evaluated at f(x)= model_param
"""
raise NotImplementedError
def log_jacobian_grad(self, model_param):
"""
compute the drivative of the log of the jacobian of f, evaluated at f(x)= model_param
"""
raise NotImplementedError
def gradfactor(self, model_param, dL_dmodel_param):
""" df(opt_param)_dopt_param evaluated at self.f(opt_param)=model_param, times the gradient dL_dmodel_param,
i.e.:
define
.. math::
\frac{\frac{\partial L}{\partial f}\left(\left.\partial f(x)}{\partial x}\right|_{x=f^{-1}(f)\right)}
"""
raise NotImplementedError
def gradfactor_non_natural(self, model_param, dL_dmodel_param):
return self.gradfactor(model_param, dL_dmodel_param)
def initialize(self, f):
""" produce a sensible initial value for f(x)"""
raise NotImplementedError
def plot(self, xlabel=r'transformed $\theta$', ylabel=r'$\theta$', axes=None, *args,**kw):
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
import matplotlib.pyplot as plt
from ...plotting.matplot_dep import base_plots
x = np.linspace(-8,8)
base_plots.meanplot(x, self.f(x), *args, ax=axes, **kw)
axes = plt.gca()
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
def __str__(self):
raise NotImplementedError
def __repr__(self):
return self.__class__.__name__
class Logexp(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x>_lim_val, x, np.log1p(np.exp(np.clip(x, -_lim_val, _lim_val)))) + epsilon
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.where(f>_lim_val, f, np.log(np.exp(f+1e-20) - 1.))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, np.where(f>_lim_val, 1., 1. - np.exp(-f)))
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def log_jacobian(self, model_param):
return np.where(model_param>_lim_val, model_param, np.log(np.exp(model_param+1e-20) - 1.)) - model_param
def log_jacobian_grad(self, model_param):
return 1./(np.exp(model_param)-1.)
def __str__(self):
return '+ve'
class Exponent(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x<_lim_val, np.where(x>-_lim_val, np.exp(x), np.exp(-_lim_val)), np.exp(_lim_val))
def finv(self, x):
return np.log(x)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, f)
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def log_jacobian(self, model_param):
return np.log(model_param)
def log_jacobian_grad(self, model_param):
return 1./model_param
def __str__(self):
return '+ve'
class NormalTheta(Transformation):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def f(self, theta):
# In here abs is only a trick to make sure the numerics are ok.
# The variance will never go below zero, but at initialization we need to make sure
# that the values are ok
# Before:
theta[self.var_indices] = np.abs(-.5/theta[self.var_indices])
#theta[self.var_indices] = np.exp(-.5/theta[self.var_indices])
theta[self.mu_indices] *= theta[self.var_indices]
return theta # which is now {mu, var}
def finv(self, muvar):
# before:
varp = muvar[self.var_indices]
muvar[self.mu_indices] /= varp
muvar[self.var_indices] = -.5/varp
#muvar[self.var_indices] = -.5/np.log(varp)
return muvar # which is now {theta1, theta2}
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*(var)**2
dmuvar[self.var_indices] += 2*dmuvar[self.mu_indices]*mu
#=======================================================================
return dmuvar # which is now the gradient multiplicator for {theta1, theta2}
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
def __str__(self):
return "theta"
def __getstate__(self):
return [self.mu_indices, self.var_indices]
def __setstate__(self, state):
self.mu_indices = state[0]
self.var_indices = state[1]
class NormalNaturalAntti(NormalTheta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*var**2#np.einsum('i,i,i,i->i', dmuvar[self.var_indices], [2], var, var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
def __str__(self):
return "natantti"
class NormalEta(Transformation):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def f(self, theta):
theta[self.var_indices] = np.abs(theta[self.var_indices] - theta[self.mu_indices]**2)
return theta # which is now {mu, var}
def finv(self, muvar):
muvar[self.var_indices] += muvar[self.mu_indices]**2
return muvar # which is now {eta1, eta2}
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
#=======================================================================
# Lets try natural gradients instead: Not working with bfgs... try stochastic!
dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
def __str__(self):
return "eta"
class NormalNaturalThroughTheta(NormalTheta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# This is just eta direction:
dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
#=======================================================================
# This is by going through theta fully and then going into eta direction:
#dmu = dmuvar[self.mu_indices]
#dmuvar[self.var_indices] += dmu*mu*(var + 4/var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def gradfactor_non_natural(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*(var)**2
dmuvar[self.var_indices] += 2*dmuvar[self.mu_indices]*mu
#=======================================================================
return dmuvar # which is now the gradient multiplicator for {theta1, theta2}
def __str__(self):
return "natgrad"
class NormalNaturalWhooot(NormalTheta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
#mu = muvar[self.mu_indices]
#var = muvar[self.var_indices]
#=======================================================================
# This is just eta direction:
#dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
#=======================================================================
# This is by going through theta fully and then going into eta direction:
#dmu = dmuvar[self.mu_indices]
#dmuvar[self.var_indices] += dmu*mu*(var + 4/var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def __str__(self):
return "natgrad"
class NormalNaturalThroughEta(NormalEta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*(var)**2
dmuvar[self.var_indices] += 2*dmuvar[self.mu_indices]*mu
#=======================================================================
return dmuvar
def __str__(self):
return "natgrad"
class LogexpNeg(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x>_lim_val, -x, -np.log(1. + np.exp(np.clip(x, -np.inf, _lim_val))))
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.where(f>_lim_val, 0, np.log(np.exp(-f) - 1.))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, np.where(f>_lim_val, -1, -1 + np.exp(-f)))
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve'
class NegativeLogexp(Transformation):
domain = _NEGATIVE
logexp = Logexp()
def f(self, x):
return -self.logexp.f(x) # np.log(1. + np.exp(x))
def finv(self, f):
return self.logexp.finv(-f) # np.log(np.exp(-f) - 1.)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, -self.logexp.gradfactor(-f))
def initialize(self, f):
return -self.logexp.initialize(f) # np.abs(f)
def __str__(self):
return '-ve'
class LogexpClipped(Logexp):
max_bound = 1e100
min_bound = 1e-10
log_max_bound = np.log(max_bound)
log_min_bound = np.log(min_bound)
domain = _POSITIVE
_instances = []
def __new__(cls, lower=1e-6, *args, **kwargs):
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower:
return instance()
o = super(Transformation, cls).__new__(cls, lower, *args, **kwargs)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower=1e-6):
self.lower = lower
def f(self, x):
exp = np.exp(np.clip(x, self.log_min_bound, self.log_max_bound))
f = np.log(1. + exp)
# if np.isnan(f).any():
# import ipdb;ipdb.set_trace()
return np.clip(f, self.min_bound, self.max_bound)
def finv(self, f):
return np.log(np.exp(f - 1.))
def gradfactor(self, f, df):
ef = np.exp(f) # np.clip(f, self.min_bound, self.max_bound))
gf = (ef - 1.) / ef
return np.einsum('i,i->i', df, gf) # np.where(f < self.lower, 0, gf)
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve_c'
class NegativeExponent(Exponent):
domain = _NEGATIVE
def f(self, x):
return -Exponent.f(x)
def finv(self, f):
return Exponent.finv(-f)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, f)
def initialize(self, f):
return -Exponent.initialize(f) #np.abs(f)
def __str__(self):
return '-ve'
class Square(Transformation):
domain = _POSITIVE
def f(self, x):
return x ** 2
def finv(self, x):
return np.sqrt(x)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, 2 * np.sqrt(f))
def initialize(self, f):
return np.abs(f)
def __str__(self):
return '+sq'
class Logistic(Transformation):
domain = _BOUNDED
_instances = []
def __new__(cls, lower=1e-6, upper=1e-6, *args, **kwargs):
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower and instance().upper == upper:
return instance()
newfunc = super(Transformation, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, lower, upper, *args, **kwargs)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower, upper):
assert lower < upper
self.lower, self.upper = float(lower), float(upper)
self.difference = self.upper - self.lower
def f(self, x):
if (x<-300.).any():
x = x.copy()
x[x<-300.] = -300.
return self.lower + self.difference / (1. + np.exp(-x))
def finv(self, f):
return np.log(np.clip(f - self.lower, 1e-10, np.inf) / np.clip(self.upper - f, 1e-10, np.inf))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference)
def initialize(self, f):
if np.any(np.logical_or(f < self.lower, f > self.upper)):
print("Warning: changing parameters to satisfy constraints")
#return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f)
#FIXME: Max, zeros_like right?
return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f)
def __str__(self):
return '{},{}'.format(self.lower, self.upper)
from paramz.transformations import *

View file

@ -1,54 +0,0 @@
'''
Created on 11 Nov 2014
@author: maxz
'''
from .observable import Observable
class Updateable(Observable):
"""
A model can be updated or not.
Make sure updates can be switched on and off.
"""
def __init__(self, *args, **kwargs):
super(Updateable, self).__init__(*args, **kwargs)
def update_model(self, updates=None):
"""
Get or set, whether automatic updates are performed. When updates are
off, the model might be in a non-working state. To make the model work
turn updates on again.
:param bool|None updates:
bool: whether to do updates
None: get the current update state
"""
if updates is None:
return self._update_on
assert isinstance(updates, bool), "updates are either on (True) or off (False)"
p = getattr(self, '_highest_parent_', None)
def turn_updates(s):
s._update_on = updates
p.traverse(turn_updates)
self.trigger_update()
def toggle_update(self):
print("deprecated: toggle_update was renamed to update_toggle for easier access")
self.update_toggle()
def update_toggle(self):
self.update_model(not self.update_model())
def trigger_update(self, trigger_parent=True):
"""
Update the model from the current state.
Make sure that updates are on, otherwise this
method will do nothing
:param bool trigger_parent: Whether to trigger the parent, after self has updated
"""
if not self.update_model() or (hasattr(self, "_in_init_") and self._in_init_):
#print "Warning: updates are off, updating the model will do nothing"
return
self._trigger_params_changed(trigger_parent)

View file

@ -7,9 +7,7 @@ Created on 6 Nov 2013
import numpy as np
from .parameterized import Parameterized
from .param import Param
from .transformations import Logexp, Logistic,__fixed__
from GPy.util.misc import param_to_array
from GPy.util.caching import Cache_this
from paramz.transformations import Logexp, Logistic,__fixed__
class VariationalPrior(Parameterized):
def __init__(self, name='latent space', **kw):

View file

@ -6,11 +6,9 @@ from .gp import GP
from .parameterization.param import Param
from ..inference.latent_function_inference import var_dtc
from .. import likelihoods
from .parameterization.variational import VariationalPosterior, NormalPosterior
from ..util.linalg import mdot
from GPy.core.parameterization.variational import VariationalPosterior
import logging
import itertools
logger = logging.getLogger("sparse gp")
class SparseGP(GP):

View file

@ -38,7 +38,7 @@ class SVGP(SparseGP):
#create the SVI inference method
inf_method = svgp_inf()
SparseGP.__init__(self, X_batch, Y_batch, Z, kernel, likelihood, mean_function=mean_function, inference_method=inf_method,
super(SVGP, self).__init__(X_batch, Y_batch, Z, kernel, likelihood, mean_function=mean_function, inference_method=inf_method,
name=name, Y_metadata=Y_metadata, normalizer=False)
#assume the number of latent functions is one per col of Y unless specified

View file

@ -1,185 +0,0 @@
# Copyright (c) 2012-2014, Max Zwiessele.
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
import numpy as np
import sys
import time
import datetime
def exponents(fnow, current_grad):
exps = [np.abs(np.float(fnow)), 1 if current_grad is np.nan else current_grad]
return np.sign(exps) * np.log10(exps).astype(int)
class VerboseOptimization(object):
def __init__(self, model, opt, maxiters, verbose=False, current_iteration=0, ipython_notebook=True, clear_after_finish=False):
self.verbose = verbose
if self.verbose:
self.model = model
self.iteration = current_iteration
self.p_iter = self.iteration
self.maxiters = maxiters
self.len_maxiters = len(str(maxiters))
self.opt_name = opt.opt_name
self.model.add_observer(self, self.print_status)
self.status = 'running'
self.clear = clear_after_finish
self.update()
try:
from notebook.display import display
from ipywidgets.widgets import IntProgress, HTML, Box, VBox, FlexBox
self.text = HTML(width='100%')
self.progress = IntProgress(min=0, max=maxiters)
#self.progresstext = Text(width='100%', disabled=True, value='0/{}'.format(maxiters))
self.model_show = HTML()
self.ipython_notebook = ipython_notebook
except:
# Not in Jupyter notebook
self.ipython_notebook = False
if self.ipython_notebook:
left_col = VBox(children=[self.progress, self.text], padding=2, width='40%')
right_col = Box(children=[self.model_show], padding=2, width='60%')
self.hor_align = FlexBox(children = [left_col, right_col], width='100%', orientation='horizontal')
display(self.hor_align)
try:
self.text.set_css('width', '100%')
left_col.set_css({
'padding': '2px',
'width': "100%",
})
right_col.set_css({
'padding': '2px',
})
self.hor_align.set_css({
'width': "100%",
})
self.hor_align.remove_class('vbox')
self.hor_align.add_class('hbox')
left_col.add_class("box-flex1")
right_col.add_class('box-flex0')
except:
pass
#self.text.add_class('box-flex2')
#self.progress.add_class('box-flex1')
else:
self.exps = exponents(self.fnow, self.current_gradient)
print('Running {} Code:'.format(self.opt_name))
print(' {3:7s} {0:{mi}s} {1:11s} {2:11s}'.format("i", "f", "|g|", "runtime", mi=self.len_maxiters))
def __enter__(self):
self.start = time.time()
self._time = self.start
return self
def print_out(self, seconds):
if seconds<60:
ms = (seconds%1)*100
self.timestring = "{s:0>2d}s{ms:0>2d}".format(s=int(seconds), ms=int(ms))
else:
m, s = divmod(seconds, 60)
if m>59:
h, m = divmod(m, 60)
if h>23:
d, h = divmod(h, 24)
self.timestring = '{d:0>2d}d{h:0>2d}h{m:0>2d}'.format(m=int(m), h=int(h), d=int(d))
else:
self.timestring = '{h:0>2d}h{m:0>2d}m{s:0>2d}'.format(m=int(m), s=int(s), h=int(h))
else:
ms = (seconds%1)*100
self.timestring = '{m:0>2d}m{s:0>2d}s{ms:0>2d}'.format(m=int(m), s=int(s), ms=int(ms))
if self.ipython_notebook:
names_vals = [['optimizer', "{:s}".format(self.opt_name)],
['runtime', "{:>s}".format(self.timestring)],
['evaluation', "{:>0{l}}".format(self.iteration, l=self.len_maxiters)],
['objective', "{: > 12.3E}".format(self.fnow)],
['||gradient||', "{: >+12.3E}".format(float(self.current_gradient))],
['status', "{:s}".format(self.status)],
]
#message = "Lik:{:5.3E} Grad:{:5.3E} Lik:{:5.3E} Len:{!s}".format(float(m.log_likelihood()), np.einsum('i,i->', grads, grads), float(m.likelihood.variance), " ".join(["{:3.2E}".format(l) for l in m.kern.lengthscale.values]))
html_begin = """<style type="text/css">
.tg-opt {font-family:"Courier New", Courier, monospace !important;padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;}
.tg-opt td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg-opt th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg-opt .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;}
.tg-opt .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;}
</style>
<table class="tg-opt">"""
html_end = "</table>"
html_body = ""
for name, val in names_vals:
html_body += "<tr>"
html_body += "<td class='tg-left'>{}</td>".format(name)
html_body += "<td class='tg-right'>{}</td>".format(val)
html_body += "</tr>"
self.text.value = html_begin + html_body + html_end
self.progress.value = (self.iteration+1)
#self.progresstext.value = '0/{}'.format((self.iteration+1))
self.model_show.value = self.model._repr_html_()
else:
n_exps = exponents(self.fnow, self.current_gradient)
if self.iteration - self.p_iter >= 20 * np.random.rand():
a = self.iteration >= self.p_iter * 2.78
b = np.any(n_exps < self.exps)
if a or b:
self.p_iter = self.iteration
print('')
if b:
self.exps = n_exps
print('\r', end=' ')
print('{3:} {0:>0{mi}g} {1:> 12e} {2:> 12e}'.format(self.iteration, float(self.fnow), float(self.current_gradient), "{:>8s}".format(self.timestring), mi=self.len_maxiters), end=' ') # print 'Iteration:', iteration, ' Objective:', fnow, ' Scale:', beta, '\r',
sys.stdout.flush()
def print_status(self, me, which=None):
self.update()
t = time.time()
seconds = t-self.start
#sys.stdout.write(" "*len(self.message))
if t-self._time > .3 or seconds < .3:
self.print_out(seconds)
self._time = t
self.iteration += 1
def update(self):
self.fnow = self.model.objective_function()
if self.model.obj_grads is not None:
grad = self.model.obj_grads
self.current_gradient = np.dot(grad, grad)
else:
self.current_gradient = np.nan
def finish(self, opt):
self.status = opt.status
if self.verbose and self.ipython_notebook:
if 'conv' in self.status.lower():
self.progress.bar_style = 'success'
elif self.iteration >= self.maxiters:
self.progress.bar_style = 'warning'
else:
self.progress.bar_style = 'danger'
def __exit__(self, type, value, traceback):
if self.verbose:
self.stop = time.time()
self.model.remove_observer(self)
self.print_out(self.stop - self.start)
if not self.ipython_notebook:
print()
print('Runtime: {}'.format("{:>9s}".format(self.timestring)))
print('Optimization status: {0}'.format(self.status))
print()
elif self.clear:
self.hor_align.close()

View file

@ -2,7 +2,7 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ...util.linalg import jitchol, DSYR, dtrtrs, dtrtri
from ...core.parameterization.observable_array import ObsAr
from paramz import ObsAr
from . import ExactGaussianInference, VarDTC
from ...util import diag

View file

@ -3,9 +3,8 @@
import numpy as np
from ...core import Model
from ...core.parameterization import variational
from GPy.core.parameterization import variational
from ...util.linalg import tdot
from GPy.core.parameterization.variational import VariationalPosterior
def infer_newX(model, Y_new, optimize=True, init='L2'):
"""
@ -62,14 +61,12 @@ class InferenceX(Model):
# self.kern.GPU(True)
from copy import deepcopy
self.posterior = deepcopy(model.posterior)
from ...core.parameterization.variational import VariationalPosterior
if isinstance(model.X, VariationalPosterior):
if isinstance(model.X, variational.VariationalPosterior):
self.uncertain_input = True
from ...models.ss_gplvm import IBPPrior
from ...models.ss_mrd import IBPPrior_SSMRD
if isinstance(model.variational_prior, IBPPrior) or isinstance(model.variational_prior, IBPPrior_SSMRD):
from ...core.parameterization.variational import SpikeAndSlabPrior
self.variational_prior = SpikeAndSlabPrior(pi=0.5, learnPi=False, group_spike=False)
self.variational_prior = variational.SpikeAndSlabPrior(pi=0.5, learnPi=False, group_spike=False)
else:
self.variational_prior = model.variational_prior.copy()
else:
@ -105,17 +102,16 @@ class InferenceX(Model):
idx = dist.argmin(axis=1)
from ...models import SSGPLVM
from ...util.misc import param_to_array
if isinstance(model, SSGPLVM):
X = variational.SpikeAndSlabPosterior(param_to_array(model.X.mean[idx]), param_to_array(model.X.variance[idx]), param_to_array(model.X.gamma[idx]))
X = variational.SpikeAndSlabPosterior((model.X.mean[idx].values), (model.X.variance[idx].values), (model.X.gamma[idx].values))
if model.group_spike:
X.gamma.fix()
else:
if self.uncertain_input and self.sparse_gp:
X = variational.NormalPosterior(param_to_array(model.X.mean[idx]), param_to_array(model.X.variance[idx]))
X = variational.NormalPosterior((model.X.mean[idx].values), (model.X.variance[idx].values))
else:
from ...core import Param
X = Param('latent mean',param_to_array(model.X[idx]).copy())
X = Param('latent mean',(model.X[idx].values).copy())
return X
@ -160,8 +156,7 @@ class InferenceX(Model):
self.X.gradient = X_grad
if self.uncertain_input:
from ...core.parameterization.variational import SpikeAndSlabPrior
if isinstance(self.variational_prior, SpikeAndSlabPrior):
if isinstance(self.variational_prior, variational.SpikeAndSlabPrior):
# Update Log-likelihood
KL_div = self.variational_prior.KL_divergence(self.X)
# update for the KL divergence

View file

@ -4,7 +4,7 @@
from .posterior import Posterior
from ...util.linalg import mdot, jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri, dpotri, dpotrs, symmetrify
from ...util import diag
from ...core.parameterization.variational import VariationalPosterior
from GPy.core.parameterization.variational import VariationalPosterior
import numpy as np
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)
@ -23,8 +23,7 @@ class VarDTC(LatentFunctionInference):
"""
const_jitter = 1e-8
def __init__(self, limit=1):
#self._YYTfactor_cache = caching.cache()
from ...util.caching import Cacher
from paramz.caching import Cacher
self.limit = limit
self.get_trYYT = Cacher(self._get_trYYT, limit)
self.get_YYTfactor = Cacher(self._get_YYTfactor, limit)
@ -45,7 +44,7 @@ class VarDTC(LatentFunctionInference):
def __setstate__(self, state):
# has to be overridden, as Cacher objects cannot be pickled.
self.limit = state
from ...util.caching import Cacher
from paramz.caching import Cacher
self.get_trYYT = Cacher(self._get_trYYT, self.limit)
self.get_YYTfactor = Cacher(self._get_YYTfactor, self.limit)

View file

@ -4,7 +4,7 @@
from .posterior import Posterior
from ...util.linalg import jitchol, backsub_both_sides, tdot, dtrtrs, dtrtri,pdinv
from ...util import diag
from ...core.parameterization.variational import VariationalPosterior
from GPy.core.parameterization.variational import VariationalPosterior
import numpy as np
from . import LatentFunctionInference
log_2_pi = np.log(2*np.pi)

View file

@ -4,7 +4,7 @@
from .kern import Kern
from .independent_outputs import index_to_slices
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
import numpy as np
class ODE_UY(Kern):

View file

@ -3,7 +3,7 @@
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
import numpy as np
from .independent_outputs import index_to_slices

View file

@ -2,7 +2,7 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
import numpy as np
from .independent_outputs import index_to_slices

View file

@ -1,6 +1,6 @@
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
import numpy as np
from .independent_outputs import index_to_slices

View file

@ -3,7 +3,7 @@
import numpy as np
import itertools
from ...util.caching import Cache_this
from paramz.caching import Cache_this
from .kern import CombinationKernel, Kern
from functools import reduce

View file

@ -3,8 +3,8 @@
import numpy as np
from .kern import Kern
from ...core.parameterization.param import Param
from ...core.parameterization.transformations import Logexp
from ...util.caching import Cache_this
from paramz.transformations import Logexp
from paramz.caching import Cache_this
from ...util.linalg import tdot, mdot
class BasisFuncKernel(Kern):

View file

@ -3,7 +3,7 @@
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
import numpy as np
class Brownian(Kern):

View file

@ -4,7 +4,7 @@
from .kern import Kern
import numpy as np
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
from ...util.config import config # for assesing whether to use cython
try:
from . import coregionalize_cython

View file

@ -5,8 +5,8 @@ import numpy as np
from scipy.special import wofz
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from ...util.caching import Cache_this
from paramz.transformations import Logexp
from paramz.caching import Cache_this
class EQ_ODE2(Kern):
"""

View file

@ -3,8 +3,8 @@
import sys
import numpy as np
from ...core.parameterization.parameterized import Parameterized
from ...core.parameterization.observable_array import ObsAr
from ...util.caching import Cache_this
from paramz.core.observable_array import ObsAr
from paramz.caching import Cache_this
from .kernel_slice_operations import KernCallsViaSlicerMeta
from functools import reduce
import six
@ -30,18 +30,16 @@ class Kern(Parameterized):
tight dimensionality of inputs.
You most likely want this to be the integer telling the number of
input dimensions of the kernel.
If this is not an integer (!) we will work on the whole input matrix X,
and not check whether dimensions match or not (!).
_all_dims_active:
active_dims:
is the active_dimensions of inputs X we will work on.
All kernels will get sliced Xes as inputs, if _all_dims_active is not None
Only positive integers are allowed in _all_dims_active!
if _all_dims_active is None, slicing is switched off and all X will be passed through as given.
Only positive integers are allowed in active_dims!
if active_dims is None, slicing is switched off and all X will be passed through as given.
:param int input_dim: the number of input dimensions to the function
:param array-like|None _all_dims_active: list of indices on which dimensions this kernel works on, or none if no slicing
:param array-like|None active_dims: list of indices on which dimensions this kernel works on, or none if no slicing
Do not instantiate.
"""

View file

@ -7,9 +7,9 @@ This module provides a meta class for the kernels. The meta class is for
slicing the inputs (X, X2) for the kernels, before K (or any other method involving X)
gets calls. The `_all_dims_active` of a kernel decide which dimensions the kernel works on.
'''
from ...core.parameterization.parameterized import ParametersChangedMeta
import numpy as np
from functools import wraps
from paramz.parameterized import ParametersChangedMeta
def put_clean(dct, name, func):
if name in dct:

View file

@ -6,8 +6,8 @@ import numpy as np
from .kern import Kern
from ...util.linalg import tdot
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from ...util.caching import Cache_this
from paramz.transformations import Logexp
from paramz.caching import Cache_this
from .psi_comp import PSICOMP_Linear
class Linear(Kern):

View file

@ -3,9 +3,9 @@
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
import numpy as np
from ...util.caching import Cache_this
from paramz.caching import Cache_this
four_over_tau = 2./np.pi
class MLP(Kern):

View file

@ -7,7 +7,7 @@ from .kern import Kern
from ...util.linalg import mdot
from ...util.decorators import silence_errors
from ...core.parameterization.param import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
class Periodic(Kern):
def __init__(self, input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name):

View file

@ -4,7 +4,8 @@
import numpy as np
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
class Poly(Kern):
"""
Polynomial kernel

View file

@ -3,7 +3,7 @@
import numpy as np
from .kern import CombinationKernel
from ...util.caching import Cache_this
from paramz.caching import Cache_this
import itertools
from functools import reduce

View file

@ -1,9 +1,9 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from ....core.parameterization.parameter_core import Pickleable
from ....util.caching import Cache_this
from ....core.parameterization import variational
from paramz.core.pickleable import Pickleable
from paramz.caching import Cache_this
from GPy.core.parameterization import variational
#from linear_psi_comp import LINEAr
class PSICOMP(Pickleable):

View file

@ -8,7 +8,7 @@ An approximated psi-statistics implementation based on Gauss-Hermite Quadrature
import numpy as np
from ....core.parameterization import Param
from ....util.caching import Cache_this
from paramz.caching import Cache_this
from ....util.linalg import tdot
from . import PSICOMP
@ -30,7 +30,7 @@ class PSICOMP_GH(PSICOMP):
@Cache_this(limit=10, ignore_args=(0,))
def comp_K(self, Z, qX):
if self.Xs is None or self.Xs.shape != qX.mean.shape:
from ....core.parameterization import ObsAr
from paramz import ObsAr
self.Xs = ObsAr(np.empty((self.degree,)+qX.mean.shape))
mu, S = qX.mean.values, qX.variance.values
S_sq = np.sqrt(S)

View file

@ -3,7 +3,7 @@ The module for psi-statistics for RBF kernel
"""
import numpy as np
from GPy.util.caching import Cacher
from paramz.caching import Cacher
def psicomputations(variance, lengthscale, Z, variational_posterior, return_psi2_n=False):
# here are the "statistics" for psi0, psi1 and psi2

View file

@ -3,7 +3,7 @@ The module for psi-statistics for RBF kernel
"""
import numpy as np
from ....util.caching import Cache_this
from paramz.caching import Cache_this
from . import PSICOMP_RBF
from ....util import gpu_init

View file

@ -4,7 +4,7 @@ The module for psi-statistics for RBF kernel for Spike-and-Slab GPLVM
"""
import numpy as np
from ....util.caching import Cache_this
from paramz.caching import Cache_this
from . import PSICOMP_RBF

View file

@ -6,7 +6,7 @@ import numpy as np
from .stationary import Stationary
from .psi_comp import PSICOMP_RBF, PSICOMP_RBF_GPU
from ...core import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
class RBF(Stationary):
"""

View file

@ -4,7 +4,7 @@
import numpy as np
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
class Spline(Kern):
"""

View file

@ -15,7 +15,7 @@ Neural Networks and Machine Learning, pages 133-165. Springer, 1998.
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
import numpy as np

View file

@ -5,7 +5,7 @@
from .kern import Kern
import numpy as np
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
class Static(Kern):
def __init__(self, input_dim, variance, active_dims, name):

View file

@ -2,15 +2,15 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from ...util.linalg import tdot
from ... import util
import numpy as np
from scipy import integrate
from .kern import Kern
from ...core.parameterization import Param
from ...util.linalg import tdot
from ... import util
from ...util.config import config # for assesing whether to use cython
from ...util.caching import Cache_this
from paramz.caching import Cache_this
from paramz.transformations import Logexp
try:
from . import stationary_cython

View file

@ -5,8 +5,8 @@
import numpy as np
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from ...util.caching import Cache_this
from paramz.transformations import Logexp
from paramz.caching import Cache_this
class TruncLinear(Kern):
"""

View file

@ -16,7 +16,7 @@ from scipy import stats, special
from . import link_functions
from .likelihood import Likelihood
from ..core.parameterization import Param
from ..core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
from scipy import stats
class Gaussian(Likelihood):

View file

@ -7,7 +7,7 @@ from . import link_functions
from .likelihood import Likelihood
from .gaussian import Gaussian
from ..core.parameterization import Param
from ..core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
from ..core.parameterization import Parameterized
import itertools

View file

@ -9,7 +9,7 @@ from scipy import stats, integrate
from scipy.special import gammaln, gamma
from .likelihood import Likelihood
from ..core.parameterization import Param
from ..core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
from scipy.special import psi as digamma
class StudentT(Likelihood):

View file

@ -5,7 +5,7 @@ import numpy as np
from .. import kern
from ..core.sparse_gp_mpi import SparseGP_MPI
from ..likelihoods import Gaussian
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from GPy.core.parameterization.variational import NormalPosterior, NormalPrior
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
import logging

View file

@ -2,14 +2,12 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import logging
from .. import kern
from ..likelihoods import Gaussian
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..inference.latent_function_inference.var_dtc_parallel import VarDTC_minibatch
import logging
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
from GPy.core.parameterization.param import Param
from GPy.core.parameterization.observable_array import ObsAr
from GPy.core.parameterization.variational import NormalPosterior, NormalPrior
from .sparse_gp_minibatch import SparseGPMiniBatch
from ..core.parameterization.param import Param
class BayesianGPLVMMiniBatch(SparseGPMiniBatch):
"""

View file

@ -1,10 +1,7 @@
# Copyright (c) 2015 the GPy Austhors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .. import kern
from .bayesian_gplvm import BayesianGPLVM
from ..core.parameterization.variational import NormalPosterior, NormalPrior
class DPBayesianGPLVM(BayesianGPLVM):
"""

View file

@ -2,8 +2,8 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core.model import Model
from ..core.parameterization import ObsAr
from ..core import Model
from paramz import ObsAr
from .. import likelihoods
class GPKroneckerGaussianRegression(Model):

View file

@ -3,8 +3,6 @@
import numpy as np
from ..core import GP
from ..core.parameterization import ObsAr
from .. import kern
from ..core.parameterization.param import Param
from ..inference.latent_function_inference import VarGauss

View file

@ -1,11 +1,11 @@
# ## Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from ..core.model import Model
import itertools
import numpy
from ..core.parameterization import Param
np = numpy
from ..core.parameterization import Param
from GPy.core.model import Model
from ..util.block_matrices import get_blocks, get_block_shapes, unblock, get_blocks_3d, get_block_shapes_3d
def get_shape(x):
@ -62,7 +62,7 @@ class GradientChecker(Model):
grad.randomize()
grad.checkgrad(verbose=1)
"""
Model.__init__(self, 'GradientChecker')
super(GradientChecker, self).__init__(name='GradientChecker')
if isinstance(x0, (list, tuple)) and names is None:
self.shapes = [get_shape(xi) for xi in x0]
self.names = ['X{i}'.format(i=i) for i in range(len(x0))]

View file

@ -5,18 +5,14 @@ import numpy as np
import itertools, logging
from ..kern import Kern
from ..core.parameterization.variational import NormalPosterior, NormalPrior
from ..core.parameterization import Param, Parameterized
from ..core.parameterization.observable_array import ObsAr
from GPy.core.parameterization.variational import NormalPrior
from ..core.parameterization import Param
from paramz import ObsAr
from ..inference.latent_function_inference.var_dtc import VarDTC
from ..inference.latent_function_inference import InferenceMethodList
from ..likelihoods import Gaussian
from ..util.initialization import initialize_latent
from ..core.sparse_gp import SparseGP, GP
from GPy.core.parameterization.variational import VariationalPosterior
from GPy.models.bayesian_gplvm_minibatch import BayesianGPLVMMiniBatch
from GPy.models.bayesian_gplvm import BayesianGPLVM
from GPy.models.sparse_gp_minibatch import SparseGPMiniBatch
class MRD(BayesianGPLVMMiniBatch):
"""

View file

@ -1,7 +1,6 @@
# Copyright (c) 2013, the GPy Authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from ..core import GP
from . import SparseGPClassification
from .. import likelihoods
from .. import kern

View file

@ -62,7 +62,7 @@ class SparseGPClassificationUncertainInput(SparseGP):
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, X_variance, Y, kernel=None, Z=None, num_inducing=10, Y_metadata=None, normalizer=None):
from ..core.parameterization.variational import NormalPosterior
from GPy.core.parameterization.variational import NormalPosterior
if kernel is None:
kernel = kern.RBF(X.shape[1])

View file

@ -4,7 +4,6 @@
import numpy as np
from ..core import SparseGP
from ..inference.latent_function_inference import VarDTC
from .. import likelihoods
from .. import kern
from .. import util

View file

@ -4,18 +4,15 @@
from __future__ import print_function
import numpy as np
from ..core.parameterization.param import Param
from GPy.core.parameterization.variational import VariationalPosterior
from ..core.sparse_gp import SparseGP
from ..core.gp import GP
from ..inference.latent_function_inference import var_dtc
from .. import likelihoods
from ..core.parameterization.variational import VariationalPosterior
import logging
from GPy.inference.latent_function_inference.posterior import Posterior
from GPy.inference.optimization.stochastics import SparseGPStochastics,\
SparseGPMissing
#no stochastics.py file added! from GPy.inference.optimization.stochastics import SparseGPStochastics,\
#SparseGPMissing
from ..inference.latent_function_inference.posterior import Posterior
from ..inference.optimization.stochastics import SparseGPStochastics, SparseGPMissing
logger = logging.getLogger("sparse gp")
class SparseGPMiniBatch(SparseGP):

View file

@ -3,12 +3,11 @@
import numpy as np
from ..core import SparseGP
from ..core.sparse_gp_mpi import SparseGP_MPI
from .. import likelihoods
from .. import kern
from ..inference.latent_function_inference import VarDTC
from ..core.parameterization.variational import NormalPosterior
from GPy.core.parameterization.variational import NormalPosterior
class SparseGPRegression(SparseGP_MPI):
"""

View file

@ -2,9 +2,8 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import sys
from GPy.models.sparse_gp_regression import SparseGPRegression
from .sparse_gp_regression import SparseGPRegression
class SparseGPLVM(SparseGPRegression):
"""

View file

@ -7,7 +7,7 @@ from ..core.sparse_gp_mpi import SparseGP_MPI
from .. import kern
from ..core.parameterization import Param
from ..likelihoods import Gaussian
from ..core.parameterization.variational import SpikeAndSlabPrior, SpikeAndSlabPosterior,VariationalPrior
from GPy.core.parameterization.variational import SpikeAndSlabPrior, SpikeAndSlabPosterior,VariationalPrior
from ..inference.latent_function_inference.var_dtc_parallel import update_gradients, VarDTC_minibatch
from ..kern.src.psi_comp.ssrbf_psi_gpucomp import PSICOMP_SSRBF_GPU
@ -19,7 +19,7 @@ class IBPPosterior(SpikeAndSlabPosterior):
"""
binary_prob : the probability of the distribution on the slab part.
"""
from ..core.parameterization.transformations import Logexp
from paramz.transformations import Logexp
super(IBPPosterior, self).__init__(means, variances, binary_prob, group_spike=True, name=name)
self.sharedX = sharedX
if sharedX:
@ -60,7 +60,7 @@ class IBPPosterior(SpikeAndSlabPosterior):
class IBPPrior(VariationalPrior):
def __init__(self, input_dim, alpha =2., name='IBPPrior', **kw):
super(IBPPrior, self).__init__(name=name, **kw)
from ..core.parameterization.transformations import Logexp, __fixed__
from paramz.transformations import Logexp, __fixed__
self.input_dim = input_dim
self.variance = 1.
self.alpha = Param('alpha', alpha, __fixed__)

View file

@ -5,7 +5,7 @@ The Maniforld Relevance Determination model with the spike-and-slab prior
import numpy as np
from ..core import Model
from .ss_gplvm import SSGPLVM
from ..core.parameterization.variational import SpikeAndSlabPrior,NormalPosterior,VariationalPrior
from GPy.core.parameterization.variational import SpikeAndSlabPrior,NormalPosterior,VariationalPrior
from ..util.misc import param_to_array
from ..kern import RBF
from ..core import Param
@ -214,7 +214,7 @@ class SpikeAndSlabPrior_SSMRD(SpikeAndSlabPrior):
class IBPPrior_SSMRD(VariationalPrior):
def __init__(self, nModels, input_dim, alpha =2., tau=None, name='IBPPrior', **kw):
super(IBPPrior_SSMRD, self).__init__(name=name, **kw)
from ..core.parameterization.transformations import Logexp, __fixed__
from paramz.transformations import Logexp, __fixed__
self.nModels = nModels
self._b_prob_all = 0.5
self.input_dim = input_dim

View file

@ -337,7 +337,7 @@ def x_frame1D(X,plot_limits=None,resolution=None):
"""
assert X.shape[1] ==1, "x_frame1D is defined for one-dimensional inputs"
if plot_limits is None:
from ...core.parameterization.variational import VariationalPosterior
from GPy.core.parameterization.variational import VariationalPosterior
if isinstance(X, VariationalPosterior):
xmin,xmax = X.mean.min(0),X.mean.max(0)
else:

View file

@ -1,6 +1,7 @@
import numpy as np
import time
from ...core.parameterization.variational import VariationalPosterior
try:
import matplotlib.pyplot as plt
import matplotlib as mpl

View file

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 2.2 KiB

After

Width:  |  Height:  |  Size: 2.2 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 3 KiB

After

Width:  |  Height:  |  Size: 3 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 37 KiB

After

Width:  |  Height:  |  Size: 37 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 7.7 KiB

After

Width:  |  Height:  |  Size: 7.7 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

Before After
Before After

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 KiB

View file

Before

Width:  |  Height:  |  Size: 2 KiB

After

Width:  |  Height:  |  Size: 2 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 6.8 KiB

After

Width:  |  Height:  |  Size: 6.8 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 3.9 KiB

After

Width:  |  Height:  |  Size: 3.9 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 4.2 KiB

After

Width:  |  Height:  |  Size: 4.2 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 60 KiB

After

Width:  |  Height:  |  Size: 60 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 6.6 KiB

After

Width:  |  Height:  |  Size: 6.6 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 5.9 KiB

After

Width:  |  Height:  |  Size: 5.9 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 8.7 KiB

After

Width:  |  Height:  |  Size: 8.7 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 3.4 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 31 KiB

After

Width:  |  Height:  |  Size: 31 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 2.3 KiB

After

Width:  |  Height:  |  Size: 2.3 KiB

Before After
Before After

View file

Before

Width:  |  Height:  |  Size: 3 KiB

After

Width:  |  Height:  |  Size: 3 KiB

Before After
Before After

Some files were not shown because too many files have changed in this diff Show more