From ef05f49b8b648b6c96e3cbc1b1899fb5742fab2b Mon Sep 17 00:00:00 2001 From: Max Zwiessele Date: Mon, 22 Sep 2014 09:26:01 +0100 Subject: [PATCH] [updates] updated update structure immensely --- GPy/core/parameterization/param.py | 16 ++-- GPy/core/parameterization/parameter_core.py | 29 ++++--- GPy/examples/dimensionality_reduction.py | 90 +++++++++++++++------ GPy/kern/_src/rbf.py | 2 +- GPy/testing/parameterized_tests.py | 15 ++-- 5 files changed, 99 insertions(+), 53 deletions(-) diff --git a/GPy/core/parameterization/param.py b/GPy/core/parameterization/param.py index fb8adf4c..cb9b0cfe 100644 --- a/GPy/core/parameterization/param.py +++ b/GPy/core/parameterization/param.py @@ -49,7 +49,7 @@ class Param(Parameterizable, ObsAr): obj._realshape_ = obj.shape obj._realsize_ = obj.size obj._realndim_ = obj.ndim - obj._original_ = True + obj._original_ = obj return obj def __init__(self, name, input_array, default_constraint=None, *a, **kw): @@ -124,10 +124,10 @@ class Param(Parameterizable, ObsAr): #if not reduce(lambda a, b: a or numpy.any(b is Ellipsis), s, False) and len(s) <= self.ndim: # s += (Ellipsis,) new_arr = super(Param, self).__getitem__(s, *args, **kwargs) - try: + try: new_arr._current_slice_ = s new_arr._gradient_array_ = self.gradient[s] - new_arr._original_ = self.base is new_arr.base + new_arr._original_ = self._original_ except AttributeError: pass # returning 0d array or float, double etc return new_arr @@ -157,29 +157,29 @@ class Param(Parameterizable, ObsAr): return self.constraints[__fixed__].size == self.size def _get_original(self, param): - return self + return self._original_ #=========================================================================== # Pickling and copying #=========================================================================== def copy(self): return Parameterizable.copy(self, which=self) - + def __deepcopy__(self, memo): s = self.__new__(self.__class__, name=self.name, input_array=self.view(numpy.ndarray).copy()) - memo[id(self)] = s + memo[id(self)] = s import copy Pickleable.__setstate__(s, copy.deepcopy(self.__getstate__(), memo)) return s def _setup_observers(self): """ Setup the default observers - + 1: pass through to parent, if present """ if self.has_parent(): self.add_observer(self._parent_, self._parent_._pass_through_notify_observers, -np.inf) - + #=========================================================================== # Printing -> done #=========================================================================== diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index cae999d9..f453257c 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -18,7 +18,7 @@ import numpy as np import re import logging -__updated__ = '2014-05-21' +__updated__ = '2014-09-22' class HierarchyError(Exception): """ @@ -63,7 +63,7 @@ class Observable(object): @updates.setter def updates(self, ups): raise DeprecationWarning("updates is now a function, see update(True|False|None)") - + def update_model(self, updates=None): """ Get or set, whether automatic updates are performed. When updates are @@ -87,21 +87,23 @@ class Observable(object): else: self._updates = updates self.trigger_update() - + def toggle_update(self): self.update_model(not self.update()) - def trigger_update(self): + def trigger_update(self, trigger_parent=True): """ Update the model from the current state. Make sure that updates are on, otherwise this method will do nothing + + :param bool trigger_parent: Whether to trigger the parent, after self has updated """ if not self.update_model(): #print "Warning: updates are off, updating the model will do nothing" return - self._trigger_params_changed() - + self._trigger_params_changed(trigger_parent) + def add_observer(self, observer, callble, priority=0): """ Add an observer `observer` with the callback `callble` @@ -539,18 +541,18 @@ class Indexable(Nameable, Observable): [np.put(ret, ind, p.lnpdf_grad(x[ind])) for p, ind in self.priors.iteritems()] return ret return 0. - + #=========================================================================== # Tie parameters together #=========================================================================== - + def _has_ties(self): if self._highest_parent_.tie.tied_param is None: return False if self.has_parent(): return self._highest_parent_.tie.label_buf[self._highest_parent_._raveled_index_for(self)].sum()>0 return True - + def tie_together(self): self._highest_parent_.tie.add_tied_parameter(self) self._highest_parent_._set_fixed(self,self._raveled_index()) @@ -740,7 +742,7 @@ class OptimizationHandlable(Indexable): self.param_array.flat[f] = p [np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]])) for c, ind in self.constraints.iteritems() if c != __fixed__] - self._highest_parent_.tie.propagate_val() + #self._highest_parent_.tie.propagate_val() self._optimizer_copy_transformed = False self._trigger_params_changed() @@ -826,6 +828,7 @@ class OptimizationHandlable(Indexable): """ # first take care of all parameters (from N(0,1)) x = rand_gen(size=self._size_transformed(), *args, **kwargs) + updates = self.update_model() self.update_model(False) # Switch off the updates self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...) # now draw from prior where possible @@ -833,8 +836,8 @@ class OptimizationHandlable(Indexable): [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None] unfixlist = np.ones((self.size,),dtype=np.bool) unfixlist[self.constraints[__fixed__]] = False - self.param_array[unfixlist] = x[unfixlist] - self.update_model(True) + self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist] + self.update_model(updates) #=========================================================================== # For shared memory arrays. This does nothing in Param, but sets the memory @@ -928,7 +931,7 @@ class Parameterizable(OptimizationHandlable): """ if self.__dict__.get('_param_array_', None) is None: self._param_array_ = np.empty(self.size, dtype=np.float64) - + if self.constraints[__fixed__].size !=0: fixes = np.ones(self.size).astype(bool) fixes[self.constraints[__fixed__]] = FIXED diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 842d0bf8..a640e360 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -210,7 +210,62 @@ def ssgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40 plt.close(fig) return m -def _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim=False): +def _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim=False): + Q_signal = 4 + import GPy + import numpy as np + np.random.seed(0) + + k = GPy.kern.Matern32(Q_signal, 1., lengthscale=np.random.uniform(1,6,Q_signal), ARD=1) + t = np.c_[[np.linspace(-1,5,N) for _ in range(Q_signal)]].T + K = k.K(t) + s1, s2, s3, sS = np.random.multivariate_normal(np.zeros(K.shape[0]), K, size=(4))[:,:,None] + + Y1, Y2, Y3, S1, S2, S3 = _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS) + + slist = [sS, s1, s2, s3] + slist_names = ["sS", "s1", "s2", "s3"] + Ylist = [Y1, Y2, Y3] + + if plot_sim: + import pylab + import matplotlib.cm as cm + import itertools + fig = pylab.figure("MRD Simulation Data", figsize=(8, 6)) + fig.clf() + ax = fig.add_subplot(2, 1, 1) + labls = slist_names + for S, lab in itertools.izip(slist, labls): + ax.plot(S, label=lab) + ax.legend() + for i, Y in enumerate(Ylist): + ax = fig.add_subplot(2, len(Ylist), len(Ylist) + 1 + i) + ax.imshow(Y, aspect='auto', cmap=cm.gray) # @UndefinedVariable + ax.set_title("Y{}".format(i + 1)) + pylab.draw() + pylab.tight_layout() + + return slist, [S1, S2, S3], Ylist + +def _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS): + S1 = _np.hstack([s1, sS]) + S2 = _np.hstack([s2, s3, sS]) + S3 = _np.hstack([s3, sS]) + Y1 = S1.dot(_np.random.randn(S1.shape[1], D1)) + Y2 = S2.dot(_np.random.randn(S2.shape[1], D2)) + Y3 = S3.dot(_np.random.randn(S3.shape[1], D3)) + Y1 += .3 * _np.random.randn(*Y1.shape) + Y2 += .2 * _np.random.randn(*Y2.shape) + Y3 += .25 * _np.random.randn(*Y3.shape) + Y1 -= Y1.mean(0) + Y2 -= Y2.mean(0) + Y3 -= Y3.mean(0) + Y1 /= Y1.std(0) + Y2 /= Y2.std(0) + Y3 /= Y3.std(0) + return Y1, Y2, Y3, S1, S2, S3 + +def _simulate_sincos(D1, D2, D3, N, num_inducing, plot_sim=False): _np.random.seed(1234) x = _np.linspace(0, 4 * _np.pi, N)[:, None] @@ -229,24 +284,7 @@ def _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim=False): s3 -= s3.mean(); s3 /= s3.std(0) sS -= sS.mean(); sS /= sS.std(0) - S1 = _np.hstack([s1, sS]) - S2 = _np.hstack([s2, s3, sS]) - S3 = _np.hstack([s3, sS]) - - Y1 = S1.dot(_np.random.randn(S1.shape[1], D1)) - Y2 = S2.dot(_np.random.randn(S2.shape[1], D2)) - Y3 = S3.dot(_np.random.randn(S3.shape[1], D3)) - - Y1 += .3 * _np.random.randn(*Y1.shape) - Y2 += .2 * _np.random.randn(*Y2.shape) - Y3 += .25 * _np.random.randn(*Y3.shape) - - Y1 -= Y1.mean(0) - Y2 -= Y2.mean(0) - Y3 -= Y3.mean(0) - Y1 /= Y1.std(0) - Y2 /= Y2.std(0) - Y3 /= Y3.std(0) + Y1, Y2, Y3, S1, S2, S3 = _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS) slist = [sS, s1, s2, s3] slist_names = ["sS", "s1", "s2", "s3"] @@ -298,7 +336,7 @@ def bgplvm_simulation(optimize=True, verbose=1, from GPy.models import BayesianGPLVM D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9 - _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) + _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim) Y = Ylist[0] k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) #k = kern.RBF(Q, ARD=True, lengthscale=10.) @@ -323,7 +361,7 @@ def ssgplvm_simulation(optimize=True, verbose=1, from GPy.models import SSGPLVM D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9 - _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) + _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim) Y = Ylist[0] k = kern.Linear(Q, ARD=True, useGPU=useGPU)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) #k = kern.RBF(Q, ARD=True, lengthscale=10.) @@ -349,7 +387,7 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1, from GPy.inference.latent_function_inference.var_dtc import VarDTCMissingData D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 400, 3, 4 - _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) + _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim) Y = Ylist[0] k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) @@ -380,7 +418,7 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw): from GPy.models import MRD D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5 - _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) + _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim) #Ylist = [Ylist[0]] k = kern.Linear(Q, ARD=True) @@ -390,7 +428,7 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw): if optimize: print "Optimizing Model:" - m.optimize(messages=verbose, max_iters=8e3, gtol=.1) + m.optimize(messages=verbose, max_iters=8e3) if plot: m.X.plot("MRD Latent Space 1D") m.plot_scales("MRD Scales") @@ -402,7 +440,7 @@ def mrd_simulation_missing_data(optimize=True, verbose=True, plot=True, plot_sim from GPy.inference.latent_function_inference.var_dtc import VarDTCMissingData D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5 - _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) + _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim) #Ylist = [Ylist[0]] k = kern.Linear(Q, ARD=True) @@ -542,7 +580,7 @@ def bcgplvm_stick(kernel=None, optimize=True, verbose=True, plot=True): y = m.likelihood.Y[0, :] data_show = GPy.plotting.matplot_dep.visualize.stick_show(y[None, :], connect=data['connect']) GPy.plotting.matplot_dep.visualize.lvm(m.X[0, :].copy(), m, data_show, ax) - raw_input('Press enter to finish') + #raw_input('Press enter to finish') return m diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index 3711738a..f5b67b89 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -39,7 +39,7 @@ class RBF(Stationary): if self.useGPU: dc['psicomp'] = PSICOMP_RBF() return dc - + def __setstate__(self, state): return super(RBF, self).__setstate__(state) diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index a51d9e09..943c993f 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -32,7 +32,7 @@ class ParameterizedTest(unittest.TestCase): self.white = GPy.kern.White(1) from GPy.core.parameterization import Param from GPy.core.parameterization.transformations import Logistic - self.param = Param('param', np.random.uniform(0,1,(25,2)), Logistic(0, 1)) + self.param = Param('param', np.random.uniform(0,1,(10,5)), Logistic(0, 1)) self.test1 = GPy.core.Parameterized("test model") self.test1.param = self.param @@ -153,16 +153,21 @@ class ParameterizedTest(unittest.TestCase): self.assertEqual(val, self.rbf.variance) def test_updates(self): - self.test1.update_model(False) - val = float(self.rbf.variance) - self.test1.kern.randomize() - self.assertEqual(val, self.rbf.variance) + val = float(self.testmodel.log_likelihood()) + self.testmodel.update_model(False) + self.testmodel.kern.randomize() + self.testmodel.likelihood.randomize() + self.assertEqual(val, self.testmodel.log_likelihood()) + self.testmodel.update_model(True) + self.assertNotEqual(val, self.testmodel.log_likelihood()) def test_fixing_optimize(self): self.testmodel.kern.lengthscale.fix() val = float(self.testmodel.kern.lengthscale) + val2 = float(self.testmodel.kern.variance) self.testmodel.randomize() self.assertEqual(val, self.testmodel.kern.lengthscale) + self.assertNotEqual(val2, self.testmodel.kern.variance) def test_add_parameter_in_hierarchy(self): from GPy.core import Param