diff --git a/GPy/core/model.py b/GPy/core/model.py index c067d51d..21bcf0c7 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -485,20 +485,17 @@ class Model(Parameterized): if not hasattr(self, 'kern'): raise ValueError, "this model has no kernel" - k = [p for p in self.kern._parameters_ if hasattr(p, "ARD") and p.ARD] - if (not len(k) == 1): - raise ValueError, "cannot determine sensitivity for this kernel" - k = k[0] - from ..kern.parts.rbf import RBF - from ..kern.parts.rbf_inv import RBFInv - from ..kern.parts.linear import Linear + k = self.kern#[p for p in self.kern._parameters_ if hasattr(p, "ARD") and p.ARD] + from ..kern import RBF, Linear#, RBFInv + if isinstance(k, RBF): return 1. / k.lengthscale - elif isinstance(k, RBFInv): - return k.inv_lengthscale + #elif isinstance(k, RBFInv): + # return k.inv_lengthscale elif isinstance(k, Linear): return k.variances - + else: + raise ValueError, "cannot determine sensitivity for this kernel" def pseudo_EM(self, stop_crit=.1, **kwargs): """ diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index bfd0bf21..b5399741 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -83,11 +83,21 @@ class ParameterIndexOperations(object): def iterproperties(self): return self._properties.iterkeys() - def shift(self, start, size): + def shift_right(self, start, size): for ind in self.iterindices(): toshift = ind>=start - if toshift.size > 0: - ind[toshift] += size + ind[toshift] += size + + def shift_left(self, start, size): + for v, ind in self.items(): + todelete = (ind>=start) * (ind=start + if toshift.size != 0: + ind[toshift] -= size + if ind.size != 0: self._properties[v] = ind + else: del self._properties[v] def clear(self): self._properties.clear() @@ -183,7 +193,7 @@ class ParameterIndexOperationsView(object): yield i - def shift(self, start, size): + def shift_right(self, start, size): raise NotImplementedError, 'Shifting only supported in original ParamIndexOperations' diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index 45b57eab..c2c8a05a 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -390,6 +390,7 @@ class Parameterizable(Constrainable): import copy from .index_operations import ParameterIndexOperations, ParameterIndexOperationsView from .array_core import ParamList + dc = dict() for k, v in self.__dict__.iteritems(): if k not in ['_direct_parent_', '_parameters_', '_parent_index_'] + self.parameter_names(): @@ -399,18 +400,21 @@ class Parameterizable(Constrainable): dc[k] = copy.deepcopy(v) if k == '_parameters_': params = [p.copy() for p in v] - # dc = copy.deepcopy(self.__dict__) + dc['_direct_parent_'] = None dc['_parent_index_'] = None dc['_parameters_'] = ParamList() + dc['constraints'].clear() + dc['priors'].clear() + dc['size'] = 0 + s = self.__new__(self.__class__) s.__dict__ = dc - # import ipdb;ipdb.set_trace() + for p in params: s.add_parameter(p) - # dc._notify_parent_change() + return s - # return copy.deepcopy(self) def _notify_parameters_changed(self): self.parameters_changed() diff --git a/GPy/core/parameterization/parameterized.py b/GPy/core/parameterization/parameterized.py index 177cc217..d463ed43 100644 --- a/GPy/core/parameterization/parameterized.py +++ b/GPy/core/parameterization/parameterized.py @@ -87,8 +87,8 @@ class Parameterized(Parameterizable, Pickleable, Observable, Gradcheckable): self._parameters_.append(param) else: start = sum(p.size for p in self._parameters_[:index]) - self.constraints.shift(start, param.size) - self.priors.shift(start, param.size) + self.constraints.shift_right(start, param.size) + self.priors.shift_right(start, param.size) self.constraints.update(param.constraints, start) self.priors.update(param.priors, start) self._parameters_.insert(index, param) @@ -113,15 +113,19 @@ class Parameterized(Parameterizable, Pickleable, Observable, Gradcheckable): """ if not param in self._parameters_: raise RuntimeError, "Parameter {} does not belong to this object, remove parameters directly from their respective parents".format(param._short()) - del self._parameters_[param._parent_index_] + + start = sum([p.size for p in self._parameters_[:param._parent_index_]]) + self._remove_parameter_name(param) self.size -= param.size + del self._parameters_[param._parent_index_] param._disconnect_parent() - self._remove_parameter_name(param) - - #self._notify_parent_change() + self.constraints.shift_left(start, param.size) self._connect_fixes() - + self._connect_parameters() + self._notify_parent_change() + + def _connect_parameters(self): # connect parameterlist to this parameterized object # This just sets up the right connection for the params objects diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index c8e79e6c..3ba54d34 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -74,7 +74,7 @@ def gplvm_oil_100(optimize=True, verbose=1, plot=True): data = GPy.util.datasets.oil_100() Y = data['X'] # create simple GP model - kernel = GPy.kern.RBF(6, ARD=True) + GPy.kern.bias(6) + kernel = GPy.kern.RBF(6, ARD=True) + GPy.kern.Bias(6) m = GPy.models.GPLVM(Y, 6, kernel=kernel) m.data_labels = data['Y'].argmax(axis=1) if optimize: m.optimize('scg', messages=verbose) @@ -190,17 +190,22 @@ def _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim=False): _np.random.seed(1234) x = _np.linspace(0, 4 * _np.pi, N)[:, None] - s1 = _np.vectorize(lambda x: _np.sin(x)) + s1 = _np.vectorize(lambda x: -_np.sin(x)) s2 = _np.vectorize(lambda x: _np.cos(x)) s3 = _np.vectorize(lambda x:-_np.exp(-_np.cos(2 * x))) - sS = _np.vectorize(lambda x: _np.sin(2 * x)) + sS = _np.vectorize(lambda x: x*_np.sin(x)) s1 = s1(x) s2 = s2(x) s3 = s3(x) sS = sS(x) - S1 = _np.hstack([s1, sS]) + s1 -= s1.mean(); s1 /= s1.std(0) + s2 -= s2.mean(); s2 /= s2.std(0) + s3 -= s3.mean(); s3 /= s3.std(0) + sS -= sS.mean(); sS /= sS.std(0) + + S1 = _np.hstack([s1, s2, sS]) S2 = _np.hstack([s2, s3, sS]) S3 = _np.hstack([s3, sS]) @@ -271,7 +276,7 @@ def bgplvm_simulation(optimize=True, verbose=1, D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 30, 3, 10 _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) Y = Ylist[0] - k = kern.linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) + k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k) if optimize: @@ -291,10 +296,10 @@ def bgplvm_simulation_missing_data(optimize=True, verbose=1, from GPy.models import BayesianGPLVM from GPy.inference.latent_function_inference.var_dtc import VarDTCMissingData - D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 30, 3, 10 + D1, D2, D3, N, num_inducing, Q = 15, 5, 8, 30, 5, 9 _, _, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) Y = Ylist[0] - k = kern.linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) + k = kern.Linear(Q, ARD=True)# + kern.white(Q, _np.exp(-2)) # + kern.bias(Q) inan = _np.random.binomial(1, .6, size=Y.shape).astype(bool) m = BayesianGPLVM(Y.copy(), Q, init="random", num_inducing=num_inducing, kernel=k) diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index a81bb711..5e88569c 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -308,14 +308,14 @@ class VarDTCMissingData(object): # gradients: if uncertain_inputs: grad_dict = {'dL_dKmm': dL_dKmm, - 'dL_dpsi0':dL_dpsi0, - 'dL_dpsi1':dL_dpsi1, - 'dL_dpsi2':dL_dpsi2, + 'dL_dpsi0':dL_dpsi0_all, + 'dL_dpsi1':dL_dpsi1_all, + 'dL_dpsi2':dL_dpsi2_all, 'partial_for_likelihood':partial_for_likelihood} else: grad_dict = {'dL_dKmm': dL_dKmm, - 'dL_dKdiag':dL_dpsi0, - 'dL_dKnm':dL_dpsi1, + 'dL_dKdiag':dL_dpsi0_all, + 'dL_dKnm':dL_dpsi1_all, 'partial_for_likelihood':partial_for_likelihood} #get sufficient things for posterior prediction diff --git a/GPy/kern/_src/kern.py b/GPy/kern/_src/kern.py index b3ee57cd..f436d322 100644 --- a/GPy/kern/_src/kern.py +++ b/GPy/kern/_src/kern.py @@ -67,7 +67,7 @@ class Kern(Parameterized): See GPy.plotting.matplot_dep.plot_ARD """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." - from ..plotting.matplot_dep import kernel_plots + from ...plotting.matplot_dep import kernel_plots return kernel_plots.plot_ARD(self,*args) diff --git a/GPy/plotting/matplot_dep/dim_reduction_plots.py b/GPy/plotting/matplot_dep/dim_reduction_plots.py index 74292c05..3f4ea9b0 100644 --- a/GPy/plotting/matplot_dep/dim_reduction_plots.py +++ b/GPy/plotting/matplot_dep/dim_reduction_plots.py @@ -1,8 +1,8 @@ import pylab as pb import numpy as np -from ... import util from latent_space_visualizations.controllers.imshow_controller import ImshowController,ImAnnotateController -from GPy.util.misc import param_to_array +from ...util.misc import param_to_array +from .base_plots import x_frame2D import itertools import Tango from matplotlib.cm import get_cmap @@ -37,7 +37,7 @@ def plot_latent(model, labels=None, which_indices=None, if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) - util.plot.Tango.reset() + Tango.reset() if labels is None: labels = np.ones(model.num_data) @@ -46,7 +46,7 @@ def plot_latent(model, labels=None, which_indices=None, X = param_to_array(model.X) # first, plot the output variance as a function of the latent space - Xtest, xx, yy, xmin, xmax = util.plot.x_frame2D(X[:, [input_1, input_2]], resolution=resolution) + Xtest, xx, yy, xmin, xmax = x_frame2D(X[:, [input_1, input_2]], resolution=resolution) Xtest_full = np.zeros((Xtest.shape[0], model.X.shape[1])) def plot_function(x): @@ -87,7 +87,7 @@ def plot_latent(model, labels=None, which_indices=None, else: x = X[index, input_1] y = X[index, input_2] - ax.scatter(x, y, marker=m, s=s, color=util.plot.Tango.nextMedium(), label=this_label) + ax.scatter(x, y, marker=m, s=s, color=Tango.nextMedium(), label=this_label) ax.set_xlabel('latent dimension %i' % input_1) ax.set_ylabel('latent dimension %i' % input_2) @@ -120,7 +120,7 @@ def plot_magnification(model, labels=None, which_indices=None, if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) - util.plot.Tango.reset() + Tango.reset() if labels is None: labels = np.ones(model.num_data) @@ -128,7 +128,7 @@ def plot_magnification(model, labels=None, which_indices=None, input_1, input_2 = most_significant_input_dimensions(model, which_indices) # first, plot the output variance as a function of the latent space - Xtest, xx, yy, xmin, xmax = util.plot.x_frame2D(model.X[:, [input_1, input_2]], resolution=resolution) + Xtest, xx, yy, xmin, xmax = x_frame2D(model.X[:, [input_1, input_2]], resolution=resolution) Xtest_full = np.zeros((Xtest.shape[0], model.X.shape[1])) def plot_function(x): @@ -165,7 +165,7 @@ def plot_magnification(model, labels=None, which_indices=None, else: x = model.X[index, input_1] y = model.X[index, input_2] - ax.scatter(x, y, marker=m, s=s, color=util.plot.Tango.nextMedium(), label=this_label) + ax.scatter(x, y, marker=m, s=s, color=Tango.nextMedium(), label=this_label) ax.set_xlabel('latent dimension %i' % input_1) ax.set_ylabel('latent dimension %i' % input_2) @@ -205,7 +205,7 @@ def plot_steepest_gradient_map(model, fignum=None, ax=None, which_indices=None, return dmu_dX[indices, argmax], np.array(labels)[argmax] if ax is None: - fig = pyplot.figure(num=fignum) + fig = pb.figure(num=fignum) ax = fig.add_subplot(111) if data_labels is None: @@ -241,7 +241,7 @@ def plot_steepest_gradient_map(model, fignum=None, ax=None, which_indices=None, ax.legend() ax.figure.tight_layout() if updates: - pyplot.show() + pb.show() clear = raw_input('Enter to continue') if clear.lower() in 'yes' or clear == '': controller.deactivate() diff --git a/GPy/plotting/matplot_dep/kernel_plots.py b/GPy/plotting/matplot_dep/kernel_plots.py index 30157294..3436c4ff 100644 --- a/GPy/plotting/matplot_dep/kernel_plots.py +++ b/GPy/plotting/matplot_dep/kernel_plots.py @@ -1,7 +1,6 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) -import sys import numpy as np import pylab as pb import Tango @@ -29,22 +28,23 @@ def plot_ARD(kernel, fignum=None, ax=None, title='', legend=False): xticklabels = [] bars = [] x0 = 0 - for p in kernel._parameters_: - c = Tango.nextMedium() - if hasattr(p, 'ARD') and p.ARD: - if title is None: - ax.set_title('ARD parameters, %s kernel' % p.name) - else: - ax.set_title(title) - if isinstance(p, Linear): - ard_params = p.variances - else: - ard_params = 1. / p.lengthscale - - x = np.arange(x0, x0 + len(ard_params)) - bars.append(ax.bar(x, ard_params, align='center', color=c, edgecolor='k', linewidth=1.2, label=p.name.replace("_"," "))) - xticklabels.extend([r"$\mathrm{{{name}}}\ {x}$".format(name=p.name, x=i) for i in np.arange(len(ard_params))]) - x0 += len(ard_params) + #for p in kernel._parameters_: + p = kernel + c = Tango.nextMedium() + if hasattr(p, 'ARD') and p.ARD: + if title is None: + ax.set_title('ARD parameters, %s kernel' % p.name) + else: + ax.set_title(title) + if isinstance(p, Linear): + ard_params = p.variances + else: + ard_params = 1. / p.lengthscale + x = np.arange(x0, x0 + len(ard_params)) + from ...util.misc import param_to_array + bars.append(ax.bar(x, param_to_array(ard_params), align='center', color=c, edgecolor='k', linewidth=1.2, label=p.name.replace("_"," "))) + xticklabels.extend([r"$\mathrm{{{name}}}\ {x}$".format(name=p.name, x=i) for i in np.arange(len(ard_params))]) + x0 += len(ard_params) x = np.arange(x0) transOffset = offset_copy(ax.transData, fig=fig, x=0., y= -2., units='points') diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 47c8642e..59c32775 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -56,7 +56,10 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', if ax is None: fig = pb.figure(num=fignum) ax = fig.add_subplot(111) - + + X, Y = param_to_array(model.X, model.Y) + if model.has_uncertain_inputs(): X_variance = model.X_variance + #work out what the inputs are for plotting (1D or 2D) fixed_dims = np.array([i for i,v in fixed_inputs]) free_dims = np.setdiff1d(np.arange(model.input_dim),fixed_dims) @@ -66,7 +69,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', #define the frame on which to plot resolution = resolution or 200 - Xnew, xmin, xmax = x_frame1D(model.X[:,free_dims], plot_limits=plot_limits) + Xnew, xmin, xmax = x_frame1D(X[:,free_dims], plot_limits=plot_limits) Xgrid = np.empty((Xnew.shape[0],model.input_dim)) Xgrid[:,free_dims] = Xnew for i,v in fixed_inputs: @@ -77,13 +80,13 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', m, v = model._raw_predict(Xgrid) lower = m - 2*np.sqrt(v) upper = m + 2*np.sqrt(v) - Y = model.Y + Y = Y else: m, v, lower, upper = model.predict(Xgrid) - Y = model.Y + Y = Y for d in which_data_ycols: gpplot(Xnew, m[:, d], lower[:, d], upper[:, d], axes=ax, edgecol=linecol, fillcol=fillcol) - ax.plot(model.X[which_data_rows,free_dims], Y[which_data_rows, d], 'kx', mew=1.5) + ax.plot(X[which_data_rows,free_dims], Y[which_data_rows, d], 'kx', mew=1.5) #optionally plot some samples if samples: #NOTE not tested with fixed_inputs @@ -95,8 +98,8 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', #add error bars for uncertain (if input uncertainty is being modelled) if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs(): - ax.errorbar(model.X[which_data_rows, free_dims], model.Y[which_data_rows, which_data_ycols], - xerr=2 * np.sqrt(model.X_variance[which_data_rows, free_dims]), + ax.errorbar(X[which_data_rows, free_dims].flatten(), Y[which_data_rows, which_data_ycols].flatten(), + xerr=2 * np.sqrt(X_variance[which_data_rows, free_dims].flatten()), ecolor='k', fmt=None, elinewidth=.5, alpha=.5) @@ -120,7 +123,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', #define the frame for plotting on resolution = resolution or 50 - Xnew, _, _, xmin, xmax = x_frame2D(model.X[:,free_dims], plot_limits, resolution) + Xnew, _, _, xmin, xmax = x_frame2D(X[:,free_dims], plot_limits, resolution) Xgrid = np.empty((Xnew.shape[0],model.input_dim)) Xgrid[:,free_dims] = Xnew for i,v in fixed_inputs: @@ -130,14 +133,14 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', #predict on the frame and plot if plot_raw: m, _ = model._raw_predict(Xgrid) - Y = model.Y + Y = Y else: m, _, _, _ = model.predict(Xgrid) Y = model.data for d in which_data_ycols: m_d = m[:,d].reshape(resolution, resolution).T ax.contour(x, y, m_d, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) - ax.scatter(model.X[which_data_rows, free_dims[0]], model.X[which_data_rows, free_dims[1]], 40, Y[which_data_rows, d], cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) + ax.scatter(X[which_data_rows, free_dims[0]], X[which_data_rows, free_dims[1]], 40, Y[which_data_rows, d], cmap=pb.cm.jet, vmin=m.min(), vmax=m.max(), linewidth=0.) #set the limits of the plot to some sensible values ax.set_xlim(xmin[0], xmax[0]) diff --git a/GPy/testing/index_operations_tests.py b/GPy/testing/index_operations_tests.py index d5ef7007..171db5cc 100644 --- a/GPy/testing/index_operations_tests.py +++ b/GPy/testing/index_operations_tests.py @@ -24,6 +24,13 @@ class Test(unittest.TestCase): self.param_index.remove(one, [1]) self.assertListEqual(self.param_index[one].tolist(), [3]) + def test_shift_left(self): + self.param_index.shift_left(1, 2) + self.assertListEqual(self.param_index[three].tolist(), [2,5]) + self.assertListEqual(self.param_index[two].tolist(), [0,3]) + self.assertListEqual(self.param_index[one].tolist(), [1]) + + def test_index_view(self): #======================================================================= # 0 1 2 3 4 5 6 7 8 9 diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index ff57606a..6f13d294 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -10,8 +10,8 @@ import numpy as np class Test(unittest.TestCase): def setUp(self): - self.rbf = GPy.kern.rbf(1) - self.white = GPy.kern.white(1) + self.rbf = GPy.kern.RBF(1) + self.white = GPy.kern.White(1) from GPy.core.parameterization import Param from GPy.core.parameterization.transformations import Logistic self.param = Param('param', np.random.rand(25,2), Logistic(0, 1)) @@ -39,14 +39,13 @@ class Test(unittest.TestCase): def test_remove_parameter(self): - from GPy.core.parameterization.transformations import FIXED, UNFIXED, __fixed__ + from GPy.core.parameterization.transformations import FIXED, UNFIXED, __fixed__, Logexp self.white.fix() self.test1.remove_parameter(self.white) self.assertIs(self.test1._fixes_,None) self.assertListEqual(self.white._fixes_.tolist(), [FIXED]) - self.assertIs(self.white.constraints,self.white.white.constraints._param_index_ops) - self.assertEquals(self.white.white.constraints._offset, 0) + self.assertEquals(self.white.constraints._offset, 0) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) @@ -57,18 +56,19 @@ class Test(unittest.TestCase): self.assertListEqual(self.test1.constraints[__fixed__].tolist(), [0]) self.assertIs(self.white._fixes_,None) self.assertListEqual(self.test1._fixes_.tolist(),[FIXED] + [UNFIXED] * 52) + self.test1.remove_parameter(self.white) self.assertIs(self.test1._fixes_,None) self.assertListEqual(self.white._fixes_.tolist(), [FIXED]) - self.assertIs(self.white.constraints,self.white.white.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) - self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) + self.assertIs(self.test1.constraints, self.param.constraints._param_index_ops) + self.assertListEqual(self.test1.constraints[Logexp()].tolist(), [0,1]) def test_add_parameter_already_in_hirarchy(self): self.test1.add_parameter(self.white._parameters_[0]) def test_default_constraints(self): - self.assertIs(self.rbf.rbf.variance.constraints._param_index_ops, self.rbf.constraints._param_index_ops) + self.assertIs(self.rbf.variance.constraints._param_index_ops, self.rbf.constraints._param_index_ops) self.assertIs(self.test1.constraints, self.rbf.constraints._param_index_ops) self.assertListEqual(self.rbf.constraints.indices()[0].tolist(), range(2)) from GPy.core.parameterization.transformations import Logexp