mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-13 05:52:38 +02:00
merged params here
This commit is contained in:
commit
dab35dcbb0
13 changed files with 220 additions and 412 deletions
|
|
@ -14,21 +14,19 @@ class ObservableArray(np.ndarray, Observable):
|
|||
takes exactly one argument, which is this array itself.
|
||||
"""
|
||||
__array_priority__ = -1 # Never give back ObservableArray
|
||||
def __new__(cls, input_array):
|
||||
def __new__(cls, input_array, *a, **kw):
|
||||
if not isinstance(input_array, ObservableArray):
|
||||
obj = np.atleast_1d(np.require(input_array, dtype=np.float64, requirements=['W', 'C'])).view(cls)
|
||||
else: obj = input_array
|
||||
cls.__name__ = "ObservableArray\n "
|
||||
super(ObservableArray, obj).__init__(*a, **kw)
|
||||
return obj
|
||||
|
||||
def __init__(self, *a, **kw):
|
||||
super(ObservableArray, self).__init__(*a, **kw)
|
||||
|
||||
|
||||
def __array_finalize__(self, obj):
|
||||
# see InfoArray.__array_finalize__ for comments
|
||||
if obj is None: return
|
||||
self._observer_callables_ = getattr(obj, '_observer_callables_', None)
|
||||
|
||||
|
||||
def __array_wrap__(self, out_arr, context=None):
|
||||
return out_arr.view(np.ndarray)
|
||||
|
||||
|
|
@ -50,10 +48,10 @@ class ObservableArray(np.ndarray, Observable):
|
|||
if self._s_not_empty(s):
|
||||
super(ObservableArray, self).__setitem__(s, val)
|
||||
self.notify_observers(self[s])
|
||||
|
||||
|
||||
def __getslice__(self, start, stop):
|
||||
return self.__getitem__(slice(start, stop))
|
||||
|
||||
|
||||
def __setslice__(self, start, stop, val):
|
||||
return self.__setitem__(slice(start, stop), val)
|
||||
|
||||
|
|
@ -85,7 +83,7 @@ class ObservableArray(np.ndarray, Observable):
|
|||
self.notify_observers()
|
||||
return r
|
||||
|
||||
|
||||
|
||||
def __ifloordiv__(self, *args, **kwargs):
|
||||
r = np.ndarray.__ifloordiv__(self, *args, **kwargs)
|
||||
self.notify_observers()
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
import itertools
|
||||
import numpy
|
||||
from parameter_core import OptimizationHandlable, Gradcheckable, adjust_name_for_printing
|
||||
from parameter_core import OptimizationHandlable, adjust_name_for_printing
|
||||
from array_core import ObservableArray
|
||||
|
||||
###### printing
|
||||
|
|
@ -43,13 +43,12 @@ class Param(OptimizationHandlable, ObservableArray):
|
|||
_fixes_ = None
|
||||
_parameters_ = []
|
||||
def __new__(cls, name, input_array, default_constraint=None):
|
||||
obj = numpy.atleast_1d(super(Param, cls).__new__(cls, input_array=input_array))
|
||||
obj = numpy.atleast_1d(super(Param, cls).__new__(cls, input_array=input_array, name=name, default_constraint=default_constraint))
|
||||
cls.__name__ = "Param"
|
||||
obj._current_slice_ = (slice(obj.shape[0]),)
|
||||
obj._realshape_ = obj.shape
|
||||
obj._realsize_ = obj.size
|
||||
obj._realndim_ = obj.ndim
|
||||
obj._updated_ = False
|
||||
from lists_and_dicts import SetDict
|
||||
obj._tied_to_me_ = SetDict()
|
||||
obj._tied_to_ = []
|
||||
|
|
@ -86,7 +85,6 @@ class Param(OptimizationHandlable, ObservableArray):
|
|||
self._realshape_ = getattr(obj, '_realshape_', None)
|
||||
self._realsize_ = getattr(obj, '_realsize_', None)
|
||||
self._realndim_ = getattr(obj, '_realndim_', None)
|
||||
self._updated_ = getattr(obj, '_updated_', None)
|
||||
self._original_ = getattr(obj, '_original_', None)
|
||||
self._name = getattr(obj, 'name', None)
|
||||
self._gradient_array_ = getattr(obj, '_gradient_array_', None)
|
||||
|
|
@ -121,14 +119,12 @@ class Param(OptimizationHandlable, ObservableArray):
|
|||
self._realndim_,
|
||||
self._tied_to_me_,
|
||||
self._tied_to_,
|
||||
self._updated_,
|
||||
)
|
||||
)
|
||||
|
||||
def __setstate__(self, state):
|
||||
super(Param, self).__setstate__(state[0])
|
||||
state = list(state[1])
|
||||
self._updated_ = state.pop()
|
||||
self._tied_to_ = state.pop()
|
||||
self._tied_to_me_ = state.pop()
|
||||
self._realndim_ = state.pop()
|
||||
|
|
|
|||
|
|
@ -41,10 +41,8 @@ class Observable(object):
|
|||
"""
|
||||
_updated = True
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Observable, self).__init__(*args, **kwargs)
|
||||
self._observer_callables_ = []
|
||||
|
||||
def __del__(self, *args, **kwargs):
|
||||
del self._observer_callables_
|
||||
|
||||
def add_observer(self, observer, callble, priority=0):
|
||||
self._insert_sorted(priority, observer, callble)
|
||||
|
|
@ -161,7 +159,9 @@ class Parentable(object):
|
|||
"""
|
||||
_parent_ = None
|
||||
_parent_index_ = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Parentable, self).__init__(*args, **kwargs)
|
||||
|
||||
def has_parent(self):
|
||||
"""
|
||||
Return whether this parentable object currently has a parent.
|
||||
|
|
@ -205,6 +205,7 @@ class Gradcheckable(Parentable):
|
|||
"""
|
||||
def __init__(self, *a, **kw):
|
||||
super(Gradcheckable, self).__init__(*a, **kw)
|
||||
|
||||
def checkgrad(self, verbose=0, step=1e-6, tolerance=1e-3):
|
||||
"""
|
||||
Check the gradient of this parameter with respect to the highest parent's
|
||||
|
|
@ -272,6 +273,9 @@ class Indexable(object):
|
|||
Enable enraveled indexes and offsets for this object.
|
||||
The raveled index of an object is the index for its parameters in a flattened int array.
|
||||
"""
|
||||
def __init__(self, *a, **kw):
|
||||
super(Indexable, self).__init__(*a, **kw)
|
||||
|
||||
def _raveled_index(self):
|
||||
"""
|
||||
Flattened array of ints, specifying the index of this object.
|
||||
|
|
@ -534,8 +538,11 @@ class OptimizationHandlable(Constrainable, Observable):
|
|||
"""
|
||||
This enables optimization handles on an Object as done in GPy 0.4.
|
||||
|
||||
transformed: make sure the transformations and constraints etc are handled
|
||||
`..._transformed`: make sure the transformations and constraints etc are handled
|
||||
"""
|
||||
def __init__(self, name, default_constraint=None, *a, **kw):
|
||||
super(OptimizationHandlable, self).__init__(name, default_constraint=default_constraint, *a, **kw)
|
||||
|
||||
def transform(self):
|
||||
[np.put(self._param_array_, ind, c.finv(self._param_array_[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
|
||||
|
||||
|
|
@ -551,8 +558,8 @@ class OptimizationHandlable(Constrainable, Observable):
|
|||
return p
|
||||
|
||||
def _set_params_transformed(self, p):
|
||||
#if p is self._param_array_:
|
||||
p = p.copy()
|
||||
if p is self._param_array_:
|
||||
p = p.copy()
|
||||
if self._has_fixes(): self._param_array_[self._fixes_] = p
|
||||
else: self._param_array_[:] = p
|
||||
self.untransform()
|
||||
|
|
@ -625,6 +632,24 @@ class OptimizationHandlable(Constrainable, Observable):
|
|||
[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]
|
||||
self._set_params_transformed(x) # makes sure all of the tied parameters get the same init (since there's only one prior object...)
|
||||
|
||||
#===========================================================================
|
||||
# For shared memory arrays. This does nothing in Param, but sets the memory
|
||||
# for all parameterized objects
|
||||
#===========================================================================
|
||||
def _propagate_param_grad(self, parray, garray):
|
||||
pi_old_size = 0
|
||||
for pi in self._parameters_:
|
||||
pislice = slice(pi_old_size, pi_old_size+pi.size)
|
||||
|
||||
self._param_array_[pislice] = pi._param_array_.ravel()#, requirements=['C', 'W']).flat
|
||||
self._gradient_array_[pislice] = pi._gradient_array_.ravel()#, requirements=['C', 'W']).flat
|
||||
|
||||
pi._param_array_.data = parray[pislice].data
|
||||
pi._gradient_array_.data = garray[pislice].data
|
||||
|
||||
pi._propagate_param_grad(parray[pislice], garray[pislice])
|
||||
pi_old_size += pi.size
|
||||
|
||||
class Parameterizable(OptimizationHandlable):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Parameterizable, self).__init__(*args, **kwargs)
|
||||
|
|
@ -811,22 +836,21 @@ class Parameterizable(OptimizationHandlable):
|
|||
p._parent_index_ = i
|
||||
|
||||
pslice = slice(old_size, old_size+p.size)
|
||||
pi_old_size = old_size
|
||||
for pi in p.flattened_parameters:
|
||||
pislice = slice(pi_old_size, pi_old_size+pi.size)
|
||||
|
||||
self._param_array_[pislice] = pi._param_array_.flat
|
||||
self._gradient_array_[pislice] = pi._gradient_array_.flat
|
||||
|
||||
pi._param_array_.data = self._param_array_[pislice].data
|
||||
pi._gradient_array_.data = self._gradient_array_[pislice].data
|
||||
|
||||
pi_old_size += pi.size
|
||||
|
||||
# first connect all children
|
||||
p._propagate_param_grad(self._param_array_[pslice], self._gradient_array_[pslice])
|
||||
|
||||
# then connect children to self
|
||||
self._param_array_[pslice] = p._param_array_.ravel()#, requirements=['C', 'W']).ravel(order='C')
|
||||
self._gradient_array_[pslice] = p._gradient_array_.ravel()#, requirements=['C', 'W']).ravel(order='C')
|
||||
|
||||
if not p._param_array_.flags['C_CONTIGUOUS']:
|
||||
import ipdb;ipdb.set_trace()
|
||||
p._param_array_.data = self._param_array_[pslice].data
|
||||
p._gradient_array_.data = self._gradient_array_[pslice].data
|
||||
|
||||
self._param_slices_.append(pslice)
|
||||
|
||||
self._add_parameter_name(p, ignore_added_names=ignore_added_names)
|
||||
old_size += p.size
|
||||
|
||||
|
|
|
|||
|
|
@ -65,8 +65,8 @@ class Parameterized(Parameterizable, Pickleable):
|
|||
# **Never** call parameters_changed() yourself
|
||||
__metaclass__ = ParametersChangedMeta
|
||||
#===========================================================================
|
||||
def __init__(self, name=None, *a, **kw):
|
||||
super(Parameterized, self).__init__(name=name, parent=None, parent_index=None, *a, **kw)
|
||||
def __init__(self, name=None, parameters=[], *a, **kw):
|
||||
super(Parameterized, self).__init__(name=name, *a, **kw)
|
||||
self._in_init_ = True
|
||||
self._parameters_ = ArrayList()
|
||||
self.size = sum(p.size for p in self._parameters_)
|
||||
|
|
@ -76,6 +76,7 @@ class Parameterized(Parameterizable, Pickleable):
|
|||
self._param_slices_ = []
|
||||
self._connect_parameters()
|
||||
del self._in_init_
|
||||
self.add_parameters(*parameters)
|
||||
|
||||
def build_pydot(self, G=None):
|
||||
import pydot # @UnresolvedImport
|
||||
|
|
@ -205,25 +206,29 @@ class Parameterized(Parameterizable, Pickleable):
|
|||
return found_params
|
||||
|
||||
def __getitem__(self, name, paramlist=None):
|
||||
if paramlist is None:
|
||||
paramlist = self.grep_param_names(name)
|
||||
if len(paramlist) < 1: raise AttributeError, name
|
||||
if len(paramlist) == 1:
|
||||
if isinstance(paramlist[-1], Parameterized):
|
||||
paramlist = paramlist[-1].flattened_parameters
|
||||
if len(paramlist) != 1:
|
||||
return ParamConcatenation(paramlist)
|
||||
return paramlist[-1]
|
||||
return ParamConcatenation(paramlist)
|
||||
if isinstance(name, (int, slice, tuple, np.ndarray)):
|
||||
return self._param_array_[name]
|
||||
else:
|
||||
if paramlist is None:
|
||||
paramlist = self.grep_param_names(name)
|
||||
if len(paramlist) < 1: raise AttributeError, name
|
||||
if len(paramlist) == 1:
|
||||
if isinstance(paramlist[-1], Parameterized):
|
||||
paramlist = paramlist[-1].flattened_parameters
|
||||
if len(paramlist) != 1:
|
||||
return ParamConcatenation(paramlist)
|
||||
return paramlist[-1]
|
||||
return ParamConcatenation(paramlist)
|
||||
|
||||
def __setitem__(self, name, value, paramlist=None):
|
||||
if isinstance(name, (slice, tuple, np.ndarray)):
|
||||
self._param_array_[name] = value
|
||||
self.notify_observers()
|
||||
else:
|
||||
try: param = self.__getitem__(name, paramlist)
|
||||
except AttributeError as a: raise a
|
||||
param[:] = value
|
||||
|
||||
|
||||
def __setattr__(self, name, val):
|
||||
# override the default behaviour, if setting a param, so broadcasting can by used
|
||||
if hasattr(self, '_parameters_'):
|
||||
|
|
|
|||
|
|
@ -63,14 +63,15 @@ class SpikeAndSlabPrior(VariationalPrior):
|
|||
|
||||
|
||||
class VariationalPosterior(Parameterized):
|
||||
def __init__(self, means=None, variances=None, name=None, **kw):
|
||||
super(VariationalPosterior, self).__init__(name=name, **kw)
|
||||
def __init__(self, means=None, variances=None, name=None, *a, **kw):
|
||||
super(VariationalPosterior, self).__init__(name=name, *a, **kw)
|
||||
self.mean = Param("mean", means)
|
||||
self.variance = Param("variance", variances, Logexp())
|
||||
self.add_parameters(self.mean, self.variance)
|
||||
self.ndim = self.mean.ndim
|
||||
self.shape = self.mean.shape
|
||||
self.num_data, self.input_dim = self.mean.shape
|
||||
self.add_parameters(self.mean, self.variance)
|
||||
self.num_data, self.input_dim = self.mean.shape
|
||||
if self.has_uncertain_inputs():
|
||||
assert self.variance.shape == self.mean.shape, "need one variance per sample and dimenion"
|
||||
|
||||
|
|
@ -78,17 +79,23 @@ class VariationalPosterior(Parameterized):
|
|||
return not self.variance is None
|
||||
|
||||
def __getitem__(self, s):
|
||||
import copy
|
||||
n = self.__new__(self.__class__)
|
||||
dc = copy.copy(self.__dict__)
|
||||
dc['mean'] = dc['mean'][s]
|
||||
dc['variance'] = dc['variance'][s]
|
||||
dc['shape'] = dc['mean'].shape
|
||||
dc['ndim'] = dc['ndim']
|
||||
dc['num_data'], dc['input_dim'] = self.mean.shape[0], self.mean.shape[1] if dc['ndim'] > 1 else 1
|
||||
n.__dict__ = dc
|
||||
return n
|
||||
|
||||
if isinstance(s, (int, slice, tuple, list, np.ndarray)):
|
||||
import copy
|
||||
n = self.__new__(self.__class__, self.name)
|
||||
dc = self.__dict__.copy()
|
||||
dc['mean'] = self.mean[s]
|
||||
dc['variance'] = self.variance[s]
|
||||
dc['_parameters_'] = copy.copy(self._parameters_)
|
||||
n.__dict__.update(dc)
|
||||
n._parameters_[dc['mean']._parent_index_] = dc['mean']
|
||||
n._parameters_[dc['variance']._parent_index_] = dc['variance']
|
||||
n.ndim = n.mean.ndim
|
||||
n.shape = n.mean.shape
|
||||
n.num_data = n.mean.shape[0]
|
||||
n.input_dim = n.mean.shape[1] if n.ndim != 1 else 1
|
||||
return n
|
||||
else:
|
||||
return super(VariationalPrior, self).__getitem__(s)
|
||||
|
||||
class NormalPosterior(VariationalPosterior):
|
||||
'''
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue