[updates] updated update structure immensely

This commit is contained in:
Max Zwiessele 2014-09-22 09:26:01 +01:00
parent 4543fc3480
commit ef05f49b8b
5 changed files with 99 additions and 53 deletions

View file

@ -49,7 +49,7 @@ class Param(Parameterizable, ObsAr):
obj._realshape_ = obj.shape obj._realshape_ = obj.shape
obj._realsize_ = obj.size obj._realsize_ = obj.size
obj._realndim_ = obj.ndim obj._realndim_ = obj.ndim
obj._original_ = True obj._original_ = obj
return obj return obj
def __init__(self, name, input_array, default_constraint=None, *a, **kw): def __init__(self, name, input_array, default_constraint=None, *a, **kw):
@ -127,7 +127,7 @@ class Param(Parameterizable, ObsAr):
try: try:
new_arr._current_slice_ = s new_arr._current_slice_ = s
new_arr._gradient_array_ = self.gradient[s] new_arr._gradient_array_ = self.gradient[s]
new_arr._original_ = self.base is new_arr.base new_arr._original_ = self._original_
except AttributeError: pass # returning 0d array or float, double etc except AttributeError: pass # returning 0d array or float, double etc
return new_arr return new_arr
@ -157,7 +157,7 @@ class Param(Parameterizable, ObsAr):
return self.constraints[__fixed__].size == self.size return self.constraints[__fixed__].size == self.size
def _get_original(self, param): def _get_original(self, param):
return self return self._original_
#=========================================================================== #===========================================================================
# Pickling and copying # Pickling and copying

View file

@ -18,7 +18,7 @@ import numpy as np
import re import re
import logging import logging
__updated__ = '2014-05-21' __updated__ = '2014-09-22'
class HierarchyError(Exception): class HierarchyError(Exception):
""" """
@ -91,16 +91,18 @@ class Observable(object):
def toggle_update(self): def toggle_update(self):
self.update_model(not self.update()) self.update_model(not self.update())
def trigger_update(self): def trigger_update(self, trigger_parent=True):
""" """
Update the model from the current state. Update the model from the current state.
Make sure that updates are on, otherwise this Make sure that updates are on, otherwise this
method will do nothing method will do nothing
:param bool trigger_parent: Whether to trigger the parent, after self has updated
""" """
if not self.update_model(): if not self.update_model():
#print "Warning: updates are off, updating the model will do nothing" #print "Warning: updates are off, updating the model will do nothing"
return return
self._trigger_params_changed() self._trigger_params_changed(trigger_parent)
def add_observer(self, observer, callble, priority=0): def add_observer(self, observer, callble, priority=0):
""" """
@ -740,7 +742,7 @@ class OptimizationHandlable(Indexable):
self.param_array.flat[f] = p self.param_array.flat[f] = p
[np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]])) [np.put(self.param_array, ind[f[ind]], c.f(self.param_array.flat[ind[f[ind]]]))
for c, ind in self.constraints.iteritems() if c != __fixed__] for c, ind in self.constraints.iteritems() if c != __fixed__]
self._highest_parent_.tie.propagate_val() #self._highest_parent_.tie.propagate_val()
self._optimizer_copy_transformed = False self._optimizer_copy_transformed = False
self._trigger_params_changed() self._trigger_params_changed()
@ -826,6 +828,7 @@ class OptimizationHandlable(Indexable):
""" """
# first take care of all parameters (from N(0,1)) # first take care of all parameters (from N(0,1))
x = rand_gen(size=self._size_transformed(), *args, **kwargs) x = rand_gen(size=self._size_transformed(), *args, **kwargs)
updates = self.update_model()
self.update_model(False) # Switch off the updates self.update_model(False) # Switch off the updates
self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...) self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)
# now draw from prior where possible # now draw from prior where possible
@ -833,8 +836,8 @@ class OptimizationHandlable(Indexable):
[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None] [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]
unfixlist = np.ones((self.size,),dtype=np.bool) unfixlist = np.ones((self.size,),dtype=np.bool)
unfixlist[self.constraints[__fixed__]] = False unfixlist[self.constraints[__fixed__]] = False
self.param_array[unfixlist] = x[unfixlist] self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]
self.update_model(True) self.update_model(updates)
#=========================================================================== #===========================================================================
# For shared memory arrays. This does nothing in Param, but sets the memory # For shared memory arrays. This does nothing in Param, but sets the memory

View file

@ -210,7 +210,62 @@ def ssgplvm_oil(optimize=True, verbose=1, plot=True, N=200, Q=7, num_inducing=40
plt.close(fig) plt.close(fig)
return m return m
def _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim=False): def _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim=False):
Q_signal = 4
import GPy
import numpy as np
np.random.seed(0)
k = GPy.kern.Matern32(Q_signal, 1., lengthscale=np.random.uniform(1,6,Q_signal), ARD=1)
t = np.c_[[np.linspace(-1,5,N) for _ in range(Q_signal)]].T
K = k.K(t)
s1, s2, s3, sS = np.random.multivariate_normal(np.zeros(K.shape[0]), K, size=(4))[:,:,None]
Y1, Y2, Y3, S1, S2, S3 = _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS)
slist = [sS, s1, s2, s3]
slist_names = ["sS", "s1", "s2", "s3"]
Ylist = [Y1, Y2, Y3]
if plot_sim:
import pylab
import matplotlib.cm as cm
import itertools
fig = pylab.figure("MRD Simulation Data", figsize=(8, 6))
fig.clf()
ax = fig.add_subplot(2, 1, 1)
labls = slist_names
for S, lab in itertools.izip(slist, labls):
ax.plot(S, label=lab)
ax.legend()
for i, Y in enumerate(Ylist):
ax = fig.add_subplot(2, len(Ylist), len(Ylist) + 1 + i)
ax.imshow(Y, aspect='auto', cmap=cm.gray) # @UndefinedVariable
ax.set_title("Y{}".format(i + 1))
pylab.draw()
pylab.tight_layout()
return slist, [S1, S2, S3], Ylist
def _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS):
S1 = _np.hstack([s1, sS])
S2 = _np.hstack([s2, s3, sS])
S3 = _np.hstack([s3, sS])
Y1 = S1.dot(_np.random.randn(S1.shape[1], D1))
Y2 = S2.dot(_np.random.randn(S2.shape[1], D2))
Y3 = S3.dot(_np.random.randn(S3.shape[1], D3))
Y1 += .3 * _np.random.randn(*Y1.shape)
Y2 += .2 * _np.random.randn(*Y2.shape)
Y3 += .25 * _np.random.randn(*Y3.shape)
Y1 -= Y1.mean(0)
Y2 -= Y2.mean(0)
Y3 -= Y3.mean(0)
Y1 /= Y1.std(0)
Y2 /= Y2.std(0)
Y3 /= Y3.std(0)
return Y1, Y2, Y3, S1, S2, S3
def _simulate_sincos(D1, D2, D3, N, num_inducing, plot_sim=False):
_np.random.seed(1234) _np.random.seed(1234)
x = _np.linspace(0, 4 * _np.pi, N)[:, None] x = _np.linspace(0, 4 * _np.pi, N)[:, None]
@ -229,24 +284,7 @@ def _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim=False):
s3 -= s3.mean(); s3 /= s3.std(0) s3 -= s3.mean(); s3 /= s3.std(0)
sS -= sS.mean(); sS /= sS.std(0) sS -= sS.mean(); sS /= sS.std(0)
S1 = _np.hstack([s1, sS]) Y1, Y2, Y3, S1, S2, S3 = _generate_high_dimensional_output(D1, D2, D3, s1, s2, s3, sS)
S2 = _np.hstack([s2, s3, sS])
S3 = _np.hstack([s3, sS])
Y1 = S1.dot(_np.random.randn(S1.shape[1], D1))
Y2 = S2.dot(_np.random.randn(S2.shape[1], D2))
Y3 = S3.dot(_np.random.randn(S3.shape[1], D3))
Y1 += .3 * _np.random.randn(*Y1.shape)
Y2 += .2 * _np.random.randn(*Y2.shape)
Y3 += .25 * _np.random.randn(*Y3.shape)
Y1 -= Y1.mean(0)
Y2 -= Y2.mean(0)
Y3 -= Y3.mean(0)
Y1 /= Y1.std(0)
Y2 /= Y2.std(0)
Y3 /= Y3.std(0)
slist = [sS, s1, s2, s3] slist = [sS, s1, s2, s3]
slist_names = ["sS", "s1", "s2", "s3"] slist_names = ["sS", "s1", "s2", "s3"]
@ -298,7 +336,7 @@ def bgplvm_simulation(optimize=True, verbose=1,
from GPy.models import BayesianGPLVM from GPy.models import BayesianGPLVM
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9 D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
Y = Ylist[0] Y = Ylist[0]
k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
#k = kern.RBF(Q, ARD=True, lengthscale=10.) #k = kern.RBF(Q, ARD=True, lengthscale=10.)
@ -323,7 +361,7 @@ def ssgplvm_simulation(optimize=True, verbose=1,
from GPy.models import SSGPLVM from GPy.models import SSGPLVM
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9 D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 45, 3, 9
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
Y = Ylist[0] Y = Ylist[0]
k = kern.Linear(Q, ARD=True, useGPU=useGPU)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) k = kern.Linear(Q, ARD=True, useGPU=useGPU)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
#k = kern.RBF(Q, ARD=True, lengthscale=10.) #k = kern.RBF(Q, ARD=True, lengthscale=10.)
@ -349,7 +387,7 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1,
from GPy.inference.latent_function_inference.var_dtc import VarDTCMissingData from GPy.inference.latent_function_inference.var_dtc import VarDTCMissingData
D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 400, 3, 4 D1, D2, D3, N, num_inducing, Q = 13, 5, 8, 400, 3, 4
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
Y = Ylist[0] Y = Ylist[0]
k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q)
@ -380,7 +418,7 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw):
from GPy.models import MRD from GPy.models import MRD
D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5 D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
#Ylist = [Ylist[0]] #Ylist = [Ylist[0]]
k = kern.Linear(Q, ARD=True) k = kern.Linear(Q, ARD=True)
@ -390,7 +428,7 @@ def mrd_simulation(optimize=True, verbose=True, plot=True, plot_sim=True, **kw):
if optimize: if optimize:
print "Optimizing Model:" print "Optimizing Model:"
m.optimize(messages=verbose, max_iters=8e3, gtol=.1) m.optimize(messages=verbose, max_iters=8e3)
if plot: if plot:
m.X.plot("MRD Latent Space 1D") m.X.plot("MRD Latent Space 1D")
m.plot_scales("MRD Scales") m.plot_scales("MRD Scales")
@ -402,7 +440,7 @@ def mrd_simulation_missing_data(optimize=True, verbose=True, plot=True, plot_sim
from GPy.inference.latent_function_inference.var_dtc import VarDTCMissingData from GPy.inference.latent_function_inference.var_dtc import VarDTCMissingData
D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5 D1, D2, D3, N, num_inducing, Q = 60, 20, 36, 60, 6, 5
_, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) _, _, Ylist = _simulate_matern(D1, D2, D3, N, num_inducing, plot_sim)
#Ylist = [Ylist[0]] #Ylist = [Ylist[0]]
k = kern.Linear(Q, ARD=True) k = kern.Linear(Q, ARD=True)
@ -542,7 +580,7 @@ def bcgplvm_stick(kernel=None, optimize=True, verbose=True, plot=True):
y = m.likelihood.Y[0, :] y = m.likelihood.Y[0, :]
data_show = GPy.plotting.matplot_dep.visualize.stick_show(y[None, :], connect=data['connect']) data_show = GPy.plotting.matplot_dep.visualize.stick_show(y[None, :], connect=data['connect'])
GPy.plotting.matplot_dep.visualize.lvm(m.X[0, :].copy(), m, data_show, ax) GPy.plotting.matplot_dep.visualize.lvm(m.X[0, :].copy(), m, data_show, ax)
raw_input('Press enter to finish') #raw_input('Press enter to finish')
return m return m

View file

@ -32,7 +32,7 @@ class ParameterizedTest(unittest.TestCase):
self.white = GPy.kern.White(1) self.white = GPy.kern.White(1)
from GPy.core.parameterization import Param from GPy.core.parameterization import Param
from GPy.core.parameterization.transformations import Logistic from GPy.core.parameterization.transformations import Logistic
self.param = Param('param', np.random.uniform(0,1,(25,2)), Logistic(0, 1)) self.param = Param('param', np.random.uniform(0,1,(10,5)), Logistic(0, 1))
self.test1 = GPy.core.Parameterized("test model") self.test1 = GPy.core.Parameterized("test model")
self.test1.param = self.param self.test1.param = self.param
@ -153,16 +153,21 @@ class ParameterizedTest(unittest.TestCase):
self.assertEqual(val, self.rbf.variance) self.assertEqual(val, self.rbf.variance)
def test_updates(self): def test_updates(self):
self.test1.update_model(False) val = float(self.testmodel.log_likelihood())
val = float(self.rbf.variance) self.testmodel.update_model(False)
self.test1.kern.randomize() self.testmodel.kern.randomize()
self.assertEqual(val, self.rbf.variance) self.testmodel.likelihood.randomize()
self.assertEqual(val, self.testmodel.log_likelihood())
self.testmodel.update_model(True)
self.assertNotEqual(val, self.testmodel.log_likelihood())
def test_fixing_optimize(self): def test_fixing_optimize(self):
self.testmodel.kern.lengthscale.fix() self.testmodel.kern.lengthscale.fix()
val = float(self.testmodel.kern.lengthscale) val = float(self.testmodel.kern.lengthscale)
val2 = float(self.testmodel.kern.variance)
self.testmodel.randomize() self.testmodel.randomize()
self.assertEqual(val, self.testmodel.kern.lengthscale) self.assertEqual(val, self.testmodel.kern.lengthscale)
self.assertNotEqual(val2, self.testmodel.kern.variance)
def test_add_parameter_in_hierarchy(self): def test_add_parameter_in_hierarchy(self):
from GPy.core import Param from GPy.core import Param