This commit is contained in:
James Hensman 2014-02-28 12:07:19 +00:00
commit ba1d6697e5
14 changed files with 356 additions and 122 deletions

View file

@ -60,20 +60,6 @@ class Model(Parameterized):
self.priors = state.pop() self.priors = state.pop()
Parameterized._setstate(self, state) Parameterized._setstate(self, state)
def randomize(self):
"""
Randomize the model.
Make this draw from the prior if one exists, else draw from N(0,1)
"""
# first take care of all parameters (from N(0,1))
# x = self._get_params_transformed()
x = np.random.randn(self.size_transformed)
x = self._untransform_params(x)
# now draw from prior where possible
[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]
self._set_params(x)
# self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs): def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):
""" """
Perform random restarts of the model, and set the model to the best Perform random restarts of the model, and set the model to the best

View file

@ -62,10 +62,11 @@ class ObservableArray(np.ndarray, Observable):
def __setitem__(self, s, val): def __setitem__(self, s, val):
if self._s_not_empty(s): if self._s_not_empty(s):
super(ObservableArray, self).__setitem__(s, val) super(ObservableArray, self).__setitem__(s, val)
self._notify_observers() self._notify_observers(self[s])
def __getslice__(self, start, stop): def __getslice__(self, start, stop):
return self.__getitem__(slice(start, stop)) return self.__getitem__(slice(start, stop))
def __setslice__(self, start, stop, val): def __setslice__(self, start, stop, val):
return self.__setitem__(slice(start, stop), val) return self.__setitem__(slice(start, stop), val)

View file

@ -0,0 +1,18 @@
'''
Created on 27 Feb 2014
@author: maxz
'''
class ParamList(list):
"""
List to store ndarray-likes in.
It will look for 'is' instead of calling __eq__ on each element.
"""
def __contains__(self, other):
for el in self:
if el is other:
return True
return False
pass

View file

@ -3,8 +3,8 @@
import itertools import itertools
import numpy import numpy
from parameter_core import Constrainable, Gradcheckable, Indexable, Parentable, adjust_name_for_printing from parameter_core import OptimizationHandlable, Gradcheckable, adjust_name_for_printing
from array_core import ObservableArray, ParamList from array_core import ObservableArray
###### printing ###### printing
__constraints_name__ = "Constraint" __constraints_name__ = "Constraint"
@ -15,7 +15,7 @@ __precision__ = numpy.get_printoptions()['precision'] # numpy printing precision
__print_threshold__ = 5 __print_threshold__ = 5
###### ######
class Param(Constrainable, ObservableArray, Gradcheckable): class Param(OptimizationHandlable, ObservableArray, Gradcheckable):
""" """
Parameter object for GPy models. Parameter object for GPy models.
@ -148,8 +148,11 @@ class Param(Constrainable, ObservableArray, Gradcheckable):
#=========================================================================== #===========================================================================
# get/set parameters # get/set parameters
#=========================================================================== #===========================================================================
def _set_params(self, param, update=True): def _set_params(self, param, trigger_parent=True):
self.flat = param self.flat = param
if trigger_parent: min_priority = None
else: min_priority = -numpy.inf
self._notify_observers(None, min_priority)
def _get_params(self): def _get_params(self):
return self.flat return self.flat
@ -172,11 +175,9 @@ class Param(Constrainable, ObservableArray, Gradcheckable):
try: new_arr._current_slice_ = s; new_arr._original_ = self.base is new_arr.base try: new_arr._current_slice_ = s; new_arr._original_ = self.base is new_arr.base
except AttributeError: pass # returning 0d array or float, double etc except AttributeError: pass # returning 0d array or float, double etc
return new_arr return new_arr
def __setitem__(self, s, val): def __setitem__(self, s, val):
super(Param, self).__setitem__(s, val) super(Param, self).__setitem__(s, val)
if self.has_parent():
self._direct_parent_._notify_parameters_changed()
#self._notify_observers()
#=========================================================================== #===========================================================================
# Index Operations: # Index Operations:
@ -204,6 +205,7 @@ class Param(Constrainable, ObservableArray, Gradcheckable):
ind = self._indices(slice_index) ind = self._indices(slice_index)
if ind.ndim < 2: ind = ind[:, None] if ind.ndim < 2: ind = ind[:, None]
return numpy.asarray(numpy.apply_along_axis(lambda x: numpy.sum(extended_realshape * x), 1, ind), dtype=int) return numpy.asarray(numpy.apply_along_axis(lambda x: numpy.sum(extended_realshape * x), 1, ind), dtype=int)
def _expand_index(self, slice_index=None): def _expand_index(self, slice_index=None):
# this calculates the full indexing arrays from the slicing objects given by get_item for _real..._ attributes # this calculates the full indexing arrays from the slicing objects given by get_item for _real..._ attributes
# it basically translates slices to their respective index arrays and turns negative indices around # it basically translates slices to their respective index arrays and turns negative indices around
@ -345,6 +347,7 @@ class ParamConcatenation(object):
See :py:class:`GPy.core.parameter.Param` for more details on constraining. See :py:class:`GPy.core.parameter.Param` for more details on constraining.
""" """
# self.params = params # self.params = params
from lists_and_dicts import ParamList
self.params = ParamList([]) self.params = ParamList([])
for p in params: for p in params:
for p in p.flattened_parameters: for p in p.flattened_parameters:

View file

@ -2,6 +2,7 @@
# Licensed under the BSD 3-clause license (see LICENSE.txt) # Licensed under the BSD 3-clause license (see LICENSE.txt)
from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED from transformations import Transformation, Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED
import heapq
__updated__ = '2013-12-16' __updated__ = '2013-12-16'
@ -11,25 +12,43 @@ def adjust_name_for_printing(name):
return '' return ''
class Observable(object): class Observable(object):
_updated = True
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
from collections import defaultdict self._observer_callables_ = []
self._observer_callables_ = defaultdict(list)
def add_observer(self, observer, callble):
self._observer_callables_[observer].append(callble)
def add_observer(self, observer, callble, priority=0):
heapq.heappush(self._observer_callables_, (priority, observer, callble))
def remove_observer(self, observer, callble=None): def remove_observer(self, observer, callble=None):
if observer in self._observer_callables_: to_remove = []
if callble is None: for p, obs, clble in self._observer_callables_:
del self._observer_callables_[observer] if callble is not None:
elif callble in self._observer_callables_[observer]: if (obs == observer) and (callble == clble):
self._observer_callables_[observer].remove(callble) to_remove.append((p, obs, clble))
if len(self._observer_callables_[observer]) == 0: else:
self.remove_observer(observer) if obs is observer:
to_remove.append((p, obs, clble))
def _notify_observers(self): for r in to_remove:
[[callble(self) for callble in callables] self._observer_callables_.remove(r)
for callables in self._observer_callables_.itervalues()]
def _notify_observers(self, which=None, min_priority=None):
"""
Notifies all observers. Which is the element, which kicked off this
notification loop.
NOTE: notifies only observers with priority p > min_priority!
^^^^^^^^^^^^^^^^
:param which: object, which started this notification loop
:param min_priority: only notify observers with priority > min_priority
if min_priority is None, notify all observers in order
"""
if which is None:
which = self
if min_priority is None:
[callble(which) for _, _, callble in heapq.nlargest(len(self._observer_callables_), self._observer_callables_)]
else:
[callble(which) for p, _, callble in heapq.nlargest(len(self._observer_callables_), self._observer_callables_) if p > min_priority]
class Pickleable(object): class Pickleable(object):
def _getstate(self): def _getstate(self):
@ -205,9 +224,9 @@ class Constrainable(Nameable, Indexable):
#=========================================================================== #===========================================================================
# Prior Operations # Prior Operations
#=========================================================================== #===========================================================================
def set_prior(self, prior, warning=True, update=True): def set_prior(self, prior, warning=True):
repriorized = self.unset_priors() repriorized = self.unset_priors()
self._add_to_index_operations(self.priors, repriorized, prior, warning, update) self._add_to_index_operations(self.priors, repriorized, prior, warning)
def unset_priors(self, *priors): def unset_priors(self, *priors):
return self._remove_from_index_operations(self.priors, priors) return self._remove_from_index_operations(self.priors, priors)
@ -233,7 +252,7 @@ class Constrainable(Nameable, Indexable):
# Constrain operations -> done # Constrain operations -> done
#=========================================================================== #===========================================================================
def constrain(self, transform, warning=True, update=True): def constrain(self, transform, warning=True):
""" """
:param transform: the :py:class:`GPy.core.transformations.Transformation` :param transform: the :py:class:`GPy.core.transformations.Transformation`
to constrain the this parameter to. to constrain the this parameter to.
@ -243,9 +262,9 @@ class Constrainable(Nameable, Indexable):
:py:class:`GPy.core.transformations.Transformation`. :py:class:`GPy.core.transformations.Transformation`.
""" """
if isinstance(transform, Transformation): if isinstance(transform, Transformation):
self._set_params(transform.initialize(self._get_params()), update=False) self._set_params(transform.initialize(self._get_params()), trigger_parent=True)
reconstrained = self.unconstrain() reconstrained = self.unconstrain()
self._add_to_index_operations(self.constraints, reconstrained, transform, warning, update) self._add_to_index_operations(self.constraints, reconstrained, transform, warning)
def unconstrain(self, *transforms): def unconstrain(self, *transforms):
""" """
@ -256,30 +275,30 @@ class Constrainable(Nameable, Indexable):
""" """
return self._remove_from_index_operations(self.constraints, transforms) return self._remove_from_index_operations(self.constraints, transforms)
def constrain_positive(self, warning=True, update=True): def constrain_positive(self, warning=True):
""" """
:param warning: print a warning if re-constraining parameters. :param warning: print a warning if re-constraining parameters.
Constrain this parameter to the default positive constraint. Constrain this parameter to the default positive constraint.
""" """
self.constrain(Logexp(), warning=warning, update=update) self.constrain(Logexp(), warning=warning)
def constrain_negative(self, warning=True, update=True): def constrain_negative(self, warning=True):
""" """
:param warning: print a warning if re-constraining parameters. :param warning: print a warning if re-constraining parameters.
Constrain this parameter to the default negative constraint. Constrain this parameter to the default negative constraint.
""" """
self.constrain(NegativeLogexp(), warning=warning, update=update) self.constrain(NegativeLogexp(), warning=warning)
def constrain_bounded(self, lower, upper, warning=True, update=True): def constrain_bounded(self, lower, upper, warning=True):
""" """
:param lower, upper: the limits to bound this parameter to :param lower, upper: the limits to bound this parameter to
:param warning: print a warning if re-constraining parameters. :param warning: print a warning if re-constraining parameters.
Constrain this parameter to lie within the given range. Constrain this parameter to lie within the given range.
""" """
self.constrain(Logistic(lower, upper), warning=warning, update=update) self.constrain(Logistic(lower, upper), warning=warning)
def unconstrain_positive(self): def unconstrain_positive(self):
""" """
@ -309,12 +328,10 @@ class Constrainable(Nameable, Indexable):
for p in self._parameters_: for p in self._parameters_:
p._parent_changed(parent) p._parent_changed(parent)
def _add_to_index_operations(self, which, reconstrained, transform, warning, update): def _add_to_index_operations(self, which, reconstrained, transform, warning):
if warning and reconstrained.size > 0: if warning and reconstrained.size > 0:
print "WARNING: reconstraining parameters {}".format(self.parameter_names() or self.name) print "WARNING: reconstraining parameters {}".format(self.parameter_names() or self.name)
which.add(transform, self._raveled_index()) which.add(transform, self._raveled_index())
if update:
self._notify_observers()
def _remove_from_index_operations(self, which, transforms): def _remove_from_index_operations(self, which, transforms):
if len(transforms) == 0: if len(transforms) == 0:
@ -329,11 +346,72 @@ class Constrainable(Nameable, Indexable):
return removed return removed
class OptimizationHandlable(Constrainable, Observable):
def _get_params_transformed(self):
# transformed parameters (apply transformation rules)
p = self._get_params()
[np.put(p, ind, c.finv(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
if self._has_fixes():
return p[self._fixes_]
return p
def _set_params_transformed(self, p):
# inverse apply transformations for parameters and set the resulting parameters
self._set_params(self._untransform_params(p))
def _untransform_params(self, p):
p = p.copy()
if self._has_fixes(): tmp = self._get_params(); tmp[self._fixes_] = p; p = tmp; del tmp
[np.put(p, ind, c.f(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
return p
def _get_params(self):
# don't overwrite this anymore!
if not self.size:
return np.empty(shape=(0,), dtype=np.float64)
return np.hstack([x._get_params() for x in self._parameters_ if x.size > 0])
class Parameterizable(Constrainable, Observable): def _set_params(self, params, trigger_parent=True):
# don't overwrite this anymore!
raise NotImplementedError, "This needs to be implemented seperately"
#===========================================================================
# Optimization handles:
#===========================================================================
def _get_param_names(self):
n = np.array([p.hirarchy_name() + '[' + str(i) + ']' for p in self.flattened_parameters for i in p._indices()])
return n
def _get_param_names_transformed(self):
n = self._get_param_names()
if self._has_fixes():
return n[self._fixes_]
return n
#===========================================================================
# Randomizeable
#===========================================================================
def randomize(self):
"""
Randomize the model.
Make this draw from the prior if one exists, else draw from N(0,1)
"""
import numpy as np
# first take care of all parameters (from N(0,1))
# x = self._get_params_transformed()
x = np.random.randn(self.size_transformed)
x = self._untransform_params(x)
# now draw from prior where possible
[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]
self._set_params(x)
# self._set_params_transformed(self._get_params_transformed()) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
import numpy as np
class Parameterizable(OptimizationHandlable):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(Parameterizable, self).__init__(*args, **kwargs) super(Parameterizable, self).__init__(*args, **kwargs)
from GPy.core.parameterization.array_core import ParamList from GPy.core.parameterization.lists_and_dicts import ParamList
_parameters_ = ParamList() _parameters_ = ParamList()
self._added_names_ = set() self._added_names_ = set()
@ -377,28 +455,26 @@ class Parameterizable(Constrainable, Observable):
import itertools import itertools
[p._collect_gradient(target[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)] [p._collect_gradient(target[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
def _set_params(self, params, trigger_parent=True):
import itertools
[p._set_params(params[s], trigger_parent=False) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
if trigger_parent: min_priority = None
else: min_priority = -np.inf
self._notify_observers(None, min_priority)
def _set_gradient(self, g): def _set_gradient(self, g):
import itertools import itertools
[p._set_gradient(g[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)] [p._set_gradient(g[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
def _get_params(self):
import numpy as np #===========================================================================
# don't overwrite this anymore! # TODO: not working yet
if not self.size: #===========================================================================
return np.empty(shape=(0,), dtype=np.float64)
return np.hstack([x._get_params() for x in self._parameters_ if x.size > 0])
def _set_params(self, params, update=True):
# don't overwrite this anymore!
import itertools
[p._set_params(params[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_)]
self._notify_parameters_changed()
def copy(self): def copy(self):
"""Returns a (deep) copy of the current model""" """Returns a (deep) copy of the current model"""
import copy import copy
from .index_operations import ParameterIndexOperations, ParameterIndexOperationsView from .index_operations import ParameterIndexOperations, ParameterIndexOperationsView
from .array_core import ParamList from .lists_and_dicts import ParamList
dc = dict() dc = dict()
for k, v in self.__dict__.iteritems(): for k, v in self.__dict__.iteritems():
@ -424,12 +500,6 @@ class Parameterizable(Constrainable, Observable):
s.add_parameter(p) s.add_parameter(p)
return s return s
def _notify_parameters_changed(self):
self.parameters_changed()
self._notify_observers()
if self.has_parent():
self._direct_parent_._notify_parameters_changed()
def parameters_changed(self): def parameters_changed(self):
""" """

View file

@ -58,6 +58,7 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
self._in_init_ = True self._in_init_ = True
self._parameters_ = ParamList() self._parameters_ = ParamList()
self.size = sum(p.size for p in self._parameters_) self.size = sum(p.size for p in self._parameters_)
self.add_observer(self, self._parameters_changed_notification, -100)
if not self._has_fixes(): if not self._has_fixes():
self._fixes_ = None self._fixes_ = None
self._param_slices_ = [] self._param_slices_ = []
@ -65,7 +66,7 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
del self._in_init_ del self._in_init_
def build_pydot(self, G=None): def build_pydot(self, G=None):
import pydot import pydot # @UnresolvedImport
iamroot = False iamroot = False
if G is None: if G is None:
G = pydot.Dot(graph_type='digraph') G = pydot.Dot(graph_type='digraph')
@ -116,6 +117,7 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
self.constraints.update(param.constraints, start) self.constraints.update(param.constraints, start)
self.priors.update(param.priors, start) self.priors.update(param.priors, start)
self._parameters_.insert(index, param) self._parameters_.insert(index, param)
param.add_observer(self, self._pass_through_notify_observers, -np.inf)
self.size += param.size self.size += param.size
else: else:
raise RuntimeError, """Parameter exists already added and no copy made""" raise RuntimeError, """Parameter exists already added and no copy made"""
@ -170,6 +172,13 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
self._add_parameter_name(p) self._add_parameter_name(p)
#=========================================================================== #===========================================================================
# notification system
#===========================================================================
def _parameters_changed_notification(self, which):
self.parameters_changed()
def _pass_through_notify_observers(self, which):
self._notify_observers(which)
#===========================================================================
# Pickling operations # Pickling operations
#=========================================================================== #===========================================================================
def pickle(self, f, protocol=-1): def pickle(self, f, protocol=-1):
@ -237,32 +246,7 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
g[self._offset_for(p) + numpy.array(list(i))] += g[self._raveled_index_for(t)] g[self._offset_for(p) + numpy.array(list(i))] += g[self._raveled_index_for(t)]
if self._has_fixes(): return g[self._fixes_] if self._has_fixes(): return g[self._fixes_]
return g return g
#===========================================================================
# Optimization handles:
#===========================================================================
def _get_param_names(self):
n = numpy.array([p.hirarchy_name() + '[' + str(i) + ']' for p in self.flattened_parameters for i in p._indices()])
return n
def _get_param_names_transformed(self):
n = self._get_param_names()
if self._has_fixes():
return n[self._fixes_]
return n
def _get_params_transformed(self):
# transformed parameters (apply transformation rules)
p = self._get_params()
[numpy.put(p, ind, c.finv(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
if self._has_fixes():
return p[self._fixes_]
return p
def _set_params_transformed(self, p):
# inverse apply transformations for parameters and set the resulting parameters
self._set_params(self._untransform_params(p))
def _untransform_params(self, p):
p = p.copy()
if self._has_fixes(): tmp = self._get_params(); tmp[self._fixes_] = p; p = tmp; del tmp
[numpy.put(p, ind, c.f(p[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
return p
#=========================================================================== #===========================================================================
# Indexable Handling # Indexable Handling
#=========================================================================== #===========================================================================
@ -297,6 +281,7 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
this is not in the global view of things! this is not in the global view of things!
""" """
return numpy.r_[:self.size] return numpy.r_[:self.size]
#=========================================================================== #===========================================================================
# Fixing parameters: # Fixing parameters:
#=========================================================================== #===========================================================================
@ -304,6 +289,7 @@ class Parameterized(Parameterizable, Pickleable, Gradcheckable):
if self._has_fixes(): if self._has_fixes():
return self._fixes_[self._raveled_index_for(param)] return self._fixes_[self._raveled_index_for(param)]
return numpy.ones(self.size, dtype=bool)[self._raveled_index_for(param)] return numpy.ones(self.size, dtype=bool)[self._raveled_index_for(param)]
#=========================================================================== #===========================================================================
# Convenience for fixed, tied checking of param: # Convenience for fixed, tied checking of param:
#=========================================================================== #===========================================================================

View file

@ -64,6 +64,36 @@ class Gaussian(Prior):
return np.random.randn(n) * self.sigma + self.mu return np.random.randn(n) * self.sigma + self.mu
class Uniform(Prior):
domain = _REAL
_instances = []
def __new__(cls, lower, upper): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower and instance().upper == upper:
return instance()
o = super(Prior, cls).__new__(cls, lower, upper)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower, upper):
self.lower = float(lower)
self.upper = float(upper)
def __str__(self):
return "[" + str(np.round(self.lower)) + ', ' + str(np.round(self.upper)) + ']'
def lnpdf(self, x):
region = (x>=self.lower) * (x<=self.upper)
return region
def lnpdf_grad(self, x):
return np.zeros(x.shape)
def rvs(self, n):
return np.random.uniform(self.lower, self.upper, size=n)
class LogGaussian(Prior): class LogGaussian(Prior):
""" """
Implementation of the univariate *log*-Gaussian probability function, coupled with random variables. Implementation of the univariate *log*-Gaussian probability function, coupled with random variables.

View file

@ -6,8 +6,11 @@ import numpy as np
from domains import _POSITIVE,_NEGATIVE, _BOUNDED from domains import _POSITIVE,_NEGATIVE, _BOUNDED
import weakref import weakref
import sys
#_lim_val = -np.log(sys.float_info.epsilon)
_exp_lim_val = np.finfo(np.float64).max _exp_lim_val = np.finfo(np.float64).max
_lim_val = np.log(_exp_lim_val)#-np.log(sys.float_info.epsilon) _lim_val = np.log(_exp_lim_val)#
#=============================================================================== #===============================================================================
# Fixing constants # Fixing constants
@ -35,7 +38,6 @@ class Transformation(object):
""" produce a sensible initial value for f(x)""" """ produce a sensible initial value for f(x)"""
raise NotImplementedError raise NotImplementedError
def plot(self, xlabel=r'transformed $\theta$', ylabel=r'$\theta$', axes=None, *args,**kw): def plot(self, xlabel=r'transformed $\theta$', ylabel=r'$\theta$', axes=None, *args,**kw):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported." assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from ...plotting.matplot_dep import base_plots from ...plotting.matplot_dep import base_plots
@ -52,7 +54,7 @@ class Transformation(object):
class Logexp(Transformation): class Logexp(Transformation):
domain = _POSITIVE domain = _POSITIVE
def f(self, x): def f(self, x):
return np.where(x>_lim_val, x, np.log(1. + np.exp(np.clip(x, -np.inf, _lim_val)))) return np.where(x>_lim_val, x, np.log(1. + np.exp(np.clip(x, -_lim_val, _lim_val))))
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x))) #raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f): def finv(self, f):
return np.where(f>_lim_val, f, np.log(np.exp(f) - 1.)) return np.where(f>_lim_val, f, np.log(np.exp(f) - 1.))

View file

@ -60,7 +60,7 @@ class VarDTC(object):
_, output_dim = Y.shape _, output_dim = Y.shape
#see whether we've got a different noise variance for each datum #see whether we've got a different noise variance for each datum
beta = 1./np.squeeze(likelihood.variance) beta = 1./max(1e-6, np.squeeze(likelihood.variance))
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency! # VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
#self.YYTfactor = self.get_YYTfactor(Y) #self.YYTfactor = self.get_YYTfactor(Y)
@ -214,7 +214,7 @@ class VarDTCMissingData(object):
psi2_all = None psi2_all = None
Ys, traces = self._Y(Y) Ys, traces = self._Y(Y)
beta_all = 1./likelihood.variance beta_all = 1./max(1e-6, likelihood.variance)
het_noise = beta_all.size != 1 het_noise = beta_all.size != 1
import itertools import itertools

View file

@ -20,7 +20,7 @@ class RBF(Stationary):
""" """
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='RBF'): def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, name='rbf'):
super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, name) super(RBF, self).__init__(input_dim, variance, lengthscale, ARD, name)
self.weave_options = {} self.weave_options = {}
@ -159,7 +159,7 @@ class RBF(Stationary):
grad_mu = np.sum(dL_dpsi1[:, :, None] * tmp * dist, 1) grad_mu = np.sum(dL_dpsi1[:, :, None] * tmp * dist, 1)
grad_S = np.sum(dL_dpsi1[:, :, None] * 0.5 * tmp * (dist_sq - 1), 1) grad_S = np.sum(dL_dpsi1[:, :, None] * 0.5 * tmp * (dist_sq - 1), 1)
#psi2 #psi2
denom, Zdist, Zdist_sq, mudist, mudist_sq, psi2 = self._psi2computations(Z, variational_posterior) denom, _, _, mudist, mudist_sq, psi2 = self._psi2computations(Z, variational_posterior)
tmp = psi2[:, :, :, None] / l2 / denom tmp = psi2[:, :, :, None] / l2 / denom
grad_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * mudist).sum(1).sum(1) grad_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * mudist).sum(1).sum(1)
grad_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*mudist_sq - 1)).sum(1).sum(1) grad_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*mudist_sq - 1)).sum(1).sum(1)
@ -237,7 +237,7 @@ class RBF(Stationary):
return denom, dist, dist_sq, psi1 return denom, dist, dist_sq, psi1
#@cache_this(ignore_args=(1,)) @Cache_this(limit=1, ignore_args=(0,))
def _Z_distances(self, Z): def _Z_distances(self, Z):
Zhat = 0.5 * (Z[:, None, :] + Z[None, :, :]) # M,M,Q Zhat = 0.5 * (Z[:, None, :] + Z[None, :, :]) # M,M,Q
Zdist = 0.5 * (Z[:, None, :] - Z[None, :, :]) # M,M,Q Zdist = 0.5 * (Z[:, None, :] - Z[None, :, :]) # M,M,Q
@ -257,7 +257,6 @@ class RBF(Stationary):
#allocate memory for the things we want to compute #allocate memory for the things we want to compute
mudist = np.empty((N, M, M, Q)) mudist = np.empty((N, M, M, Q))
mudist_sq = np.empty((N, M, M, Q)) mudist_sq = np.empty((N, M, M, Q))
exponent = np.zeros((N,M,M))
psi2 = np.empty((N, M, M)) psi2 = np.empty((N, M, M))
l2 = self.lengthscale **2 l2 = self.lengthscale **2

View file

@ -69,12 +69,12 @@ class Stationary(Kern):
def dK_dr(self, r): def dK_dr(self, r):
raise NotImplementedError, "implement derivative of the covariance function wrt r to use this class" raise NotImplementedError, "implement derivative of the covariance function wrt r to use this class"
#@Cache_this(limit=5, ignore_args=()) @Cache_this(limit=5, ignore_args=())
def K(self, X, X2=None): def K(self, X, X2=None):
r = self._scaled_dist(X, X2) r = self._scaled_dist(X, X2)
return self.K_of_r(r) return self.K_of_r(r)
#@Cache_this(limit=5, ignore_args=(0,)) @Cache_this(limit=5, ignore_args=(0,))
def _unscaled_dist(self, X, X2=None): def _unscaled_dist(self, X, X2=None):
""" """
Compute the Euclidean distance between each row of X and X2, or between Compute the Euclidean distance between each row of X and X2, or between
@ -88,7 +88,7 @@ class Stationary(Kern):
X2sq = np.sum(np.square(X2),1) X2sq = np.sum(np.square(X2),1)
return np.sqrt(-2.*np.dot(X, X2.T) + (X1sq[:,None] + X2sq[None,:])) return np.sqrt(-2.*np.dot(X, X2.T) + (X1sq[:,None] + X2sq[None,:]))
#@Cache_this(limit=5, ignore_args=()) @Cache_this(limit=5, ignore_args=())
def _scaled_dist(self, X, X2=None): def _scaled_dist(self, X, X2=None):
""" """
Efficiently compute the scaled distance, r. Efficiently compute the scaled distance, r.
@ -141,7 +141,7 @@ class Stationary(Kern):
diagonal, where we return zero (the distance on the diagonal is zero). diagonal, where we return zero (the distance on the diagonal is zero).
This term appears in derviatives. This term appears in derviatives.
""" """
dist = self._scaled_dist(X, X2) dist = self._scaled_dist(X, X2).copy()
if X2 is None: if X2 is None:
nondiag = util.diag.offdiag_view(dist) nondiag = util.diag.offdiag_view(dist)
nondiag[:] = 1./nondiag nondiag[:] = 1./nondiag

View file

@ -56,10 +56,13 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
if ax is None: if ax is None:
fig = pb.figure(num=fignum) fig = pb.figure(num=fignum)
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
X, Y = param_to_array(model.X, model.Y) if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs(): X_variance = model.X_variance X = model.X.mean
X_variance = param_to_array(model.X.variance)
else:
X = model.X
X, Y = param_to_array(X, model.Y)
if hasattr(model, 'Z'): Z = param_to_array(model.Z) if hasattr(model, 'Z'): Z = param_to_array(model.Z)
#work out what the inputs are for plotting (1D or 2D) #work out what the inputs are for plotting (1D or 2D)
@ -98,10 +101,10 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
#add error bars for uncertain (if input uncertainty is being modelled) #add error bars for uncertain (if input uncertainty is being modelled)
#if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs(): if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs():
# ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(), ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(),
# xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()), xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()),
# ecolor='k', fmt=None, elinewidth=.5, alpha=.5) ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
#set the limits of the plot to some sensible values #set the limits of the plot to some sensible values

View file

@ -0,0 +1,133 @@
'''
Created on 27 Feb 2014
@author: maxz
'''
import unittest
from GPy.core.parameterization.parameterized import Parameterized
from GPy.core.parameterization.param import Param
import numpy
class ParamTestParent(Parameterized):
parent_changed_count = 0
def parameters_changed(self):
self.parent_changed_count += 1
class ParameterizedTest(Parameterized):
params_changed_count = 0
def parameters_changed(self):
self.params_changed_count += 1
def _set_params(self, params, trigger_parent=True):
Parameterized._set_params(self, params, trigger_parent=trigger_parent)
class Test(unittest.TestCase):
def setUp(self):
self.parent = ParamTestParent('test parent')
self.par = ParameterizedTest('test model')
self.par2 = ParameterizedTest('test model 2')
self.p = Param('test parameter', numpy.random.normal(1,2,(10,3)))
self.par.add_parameter(self.p)
self.par.add_parameter(Param('test1', numpy.random.normal(0,1,(1,))))
self.par.add_parameter(Param('test2', numpy.random.normal(0,1,(1,))))
self.par2.add_parameter(Param('par2 test1', numpy.random.normal(0,1,(1,))))
self.par2.add_parameter(Param('par2 test2', numpy.random.normal(0,1,(1,))))
self.parent.add_parameter(self.par)
self.parent.add_parameter(self.par2)
self._observer_triggered = None
self._trigger_count = 0
self._first = None
self._second = None
def _trigger(self, which):
self._observer_triggered = float(which)
self._trigger_count += 1
if self._first is not None:
self._second = self._trigger
else:
self._first = self._trigger
def _trigger_priority(self, which):
if self._first is not None:
self._second = self._trigger_priority
else:
self._first = self._trigger_priority
def test_observable(self):
self.par.add_observer(self, self._trigger, -1)
self.assertEqual(self.par.params_changed_count, 0, 'no params changed yet')
self.assertEqual(self.par.params_changed_count, self.parent.parent_changed_count, 'parent should be triggered as often as param')
self.p[0,1] = 3 # trigger observers
self.assertEqual(self._observer_triggered, 3, 'observer should have triggered')
self.assertEqual(self._trigger_count, 1, 'observer should have triggered once')
self.assertEqual(self.par.params_changed_count, 1, 'params changed once')
self.assertEqual(self.par.params_changed_count, self.parent.parent_changed_count, 'parent should be triggered as often as param')
self.par.remove_observer(self)
self.p[2,1] = 4
self.assertEqual(self._observer_triggered, 3, 'observer should not have triggered')
self.assertEqual(self._trigger_count, 1, 'observer should have triggered once')
self.assertEqual(self.par.params_changed_count, 2, 'params changed second')
self.assertEqual(self.par.params_changed_count, self.parent.parent_changed_count, 'parent should be triggered as often as param')
self.par.add_observer(self, self._trigger, -1)
self.p[2,1] = 4
self.assertEqual(self._observer_triggered, 4, 'observer should have triggered')
self.assertEqual(self._trigger_count, 2, 'observer should have triggered once')
self.assertEqual(self.par.params_changed_count, 3, 'params changed second')
self.assertEqual(self.par.params_changed_count, self.parent.parent_changed_count, 'parent should be triggered as often as param')
self.par.remove_observer(self, self._trigger)
self.p[0,1] = 3
self.assertEqual(self._observer_triggered, 4, 'observer should not have triggered')
self.assertEqual(self._trigger_count, 2, 'observer should have triggered once')
self.assertEqual(self.par.params_changed_count, 4, 'params changed second')
self.assertEqual(self.par.params_changed_count, self.parent.parent_changed_count, 'parent should be triggered as often as param')
def test_set_params(self):
self.assertEqual(self.par.params_changed_count, 0, 'no params changed yet')
self.par._set_params(numpy.ones(self.par.size))
self.assertEqual(self.par.params_changed_count, 1, 'now params changed')
self.assertEqual(self.parent.parent_changed_count, self.par.params_changed_count)
self.parent._set_params(numpy.ones(self.parent.size) * 2)
self.assertEqual(self.par.params_changed_count, 2, 'now params changed')
self.assertEqual(self.parent.parent_changed_count, self.par.params_changed_count)
def test_priority_notify(self):
self.assertEqual(self.par.params_changed_count, 0)
self.par._notify_observers(0, None)
self.assertEqual(self.par.params_changed_count, 1)
self.assertEqual(self.parent.parent_changed_count, self.par.params_changed_count)
self.par._notify_observers(0, -numpy.inf)
self.assertEqual(self.par.params_changed_count, 2)
self.assertEqual(self.parent.parent_changed_count, 1)
def test_priority(self):
self.par.add_observer(self, self._trigger, -1)
self.par.add_observer(self, self._trigger_priority, 0)
self.par._notify_observers(0)
self.assertEqual(self._first, self._trigger_priority, 'priority should be first')
self.assertEqual(self._second, self._trigger, 'priority should be first')
self.par.remove_observer(self)
self._first = self._second = None
self.par.add_observer(self, self._trigger, 1)
self.par.add_observer(self, self._trigger_priority, 0)
self.par._notify_observers(0)
self.assertEqual(self._first, self._trigger, 'priority should be second')
self.assertEqual(self._second, self._trigger_priority, 'priority should be second')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()

View file

@ -38,6 +38,9 @@ class Cacher(object):
if not all([isinstance(arg, Observable) for arg in observable_args]): if not all([isinstance(arg, Observable) for arg in observable_args]):
return self.operation(*args) return self.operation(*args)
# TODO: WARNING !!! Cache OFFSWITCH !!! WARNING
# return self.operation(*args)
#if the result is cached, return the cached computation #if the result is cached, return the cached computation
state = [all(a is b for a, b in zip(args, cached_i)) for cached_i in self.cached_inputs] state = [all(a is b for a, b in zip(args, cached_i)) for cached_i in self.cached_inputs]
if any(state): if any(state):