plot latent updated

This commit is contained in:
James Hensman 2014-02-26 09:16:31 +00:00
parent 3c0f89bf53
commit f6da5345d9
3 changed files with 32 additions and 203 deletions

View file

@ -20,14 +20,13 @@ class Model(Parameterized):
self.optimization_runs = []
self.sampling_runs = []
self.preferred_optimizer = 'scg'
# self._set_params(self._get_params()) has been taken out as it should only be called on leaf nodes
def log_likelihood(self):
raise NotImplementedError, "this needs to be implemented to use the model class"
def _log_likelihood_gradients(self):
# def dK_d(self, param, dL_dK, X, X2)
g = np.zeros(self.size)
try:
# [g.__setitem__(s, self.gradient_mapping[p]().flat) for p, s in itertools.izip(self._parameters_, self._param_slices_) if not p.is_fixed]
[p._collect_gradient(g[s]) for p, s in itertools.izip(self._parameters_, self._param_slices_) if not p.is_fixed]
except ValueError:
raise ValueError, 'Gradient for {} not defined, please specify gradients for parameters to optimize'.format(p.name)
@ -61,85 +60,6 @@ class Model(Parameterized):
self.priors = state.pop()
Parameterized._setstate(self, state)
# def set_prior(self, regexp, what):
# """
#
# Sets priors on the model parameters.
#
# **Notes**
#
# Asserts that the prior is suitable for the constraint. If the
# wrong constraint is in place, an error is raised. If no
# constraint is in place, one is added (warning printed).
#
# For tied parameters, the prior will only be "counted" once, thus
# a prior object is only inserted on the first tied index
#
# :param regexp: regular expression of parameters on which priors need to be set.
# :type param: string, regexp, or integer array
# :param what: prior to set on parameter.
# :type what: GPy.core.Prior type
#
# """
# if self.priors is None:
# self.priors = [None for i in range(self._get_params().size)]
#
# which = self.grep_param_names(regexp)
#
# # check tied situation
# tie_partial_matches = [tie for tie in self.tied_indices if (not set(tie).isdisjoint(set(which))) & (not set(tie) == set(which))]
# if len(tie_partial_matches):
# raise ValueError, "cannot place prior across partial ties"
# tie_matches = [tie for tie in self.tied_indices if set(which) == set(tie) ]
# if len(tie_matches) > 1:
# raise ValueError, "cannot place prior across multiple ties"
# elif len(tie_matches) == 1:
# which = which[:1] # just place a prior object on the first parameter
#
#
# # check constraints are okay
#
# if what.domain is _POSITIVE:
# constrained_positive_indices = [i for i, t in zip(self.constrained_indices, self.constraints) if t.domain is _POSITIVE]
# if len(constrained_positive_indices):
# constrained_positive_indices = np.hstack(constrained_positive_indices)
# else:
# constrained_positive_indices = np.zeros(shape=(0,))
# bad_constraints = np.setdiff1d(self.all_constrained_indices(), constrained_positive_indices)
# assert not np.any(which[:, None] == bad_constraints), "constraint and prior incompatible"
# unconst = np.setdiff1d(which, constrained_positive_indices)
# if len(unconst):
# print "Warning: constraining parameters to be positive:"
# print '\n'.join([n for i, n in enumerate(self._get_param_names()) if i in unconst])
# print '\n'
# self.constrain_positive(unconst)
# elif what.domain is _REAL:
# assert not np.any(which[:, None] == self.all_constrained_indices()), "constraint and prior incompatible"
# else:
# raise ValueError, "prior not recognised"
#
# # store the prior in a local list
# for w in which:
# self.priors[w] = what
def get_gradient(self, name, return_names=False):
"""
Get model gradient(s) by name. The name is applied as a regular expression and all parameters that match that regular expression are returned.
:param name: the name of parameters required (as a regular expression).
:type name: regular expression
:param return_names: whether or not to return the names matched (default False)
:type return_names: bool
"""
matches = self.grep_param_names(name)
if len(matches):
if return_names:
return self._log_likelihood_gradients()[matches], np.asarray(self._get_param_names())[matches].tolist()
else:
return self._log_likelihood_gradients()[matches]
else:
raise AttributeError, "no parameter matches %s" % name
def randomize(self):
"""
Randomize the model.
@ -183,7 +103,9 @@ class Model(Parameterized):
:param messages: whether to display during optimisation
:type messages: bool
.. note:: If num_processes is None, the number of workes in the multiprocessing pool is automatically set to the number of processors on the current machine.
.. note:: If num_processes is None, the number of workes in the
multiprocessing pool is automatically set to the number of processors
on the current machine.
"""
initial_parameters = self._get_params_transformed()
@ -227,45 +149,23 @@ class Model(Parameterized):
self._set_params_transformed(initial_parameters)
def ensure_default_constraints(self, warning=True):
"""
"""
Ensure that any variables which should clearly be positive
have been constrained somehow. The method performs a regular
expression search on parameter names looking for the terms
'variance', 'lengthscale', 'precision' and 'kappa'. If any of
these terms are present in the name the parameter is
constrained positive.
DEPRECATED.
"""
raise DeprecationWarning, 'parameters now have default constraints'
#positive_strings = ['variance', 'lengthscale', 'precision', 'kappa', 'sensitivity']
# param_names = self._get_param_names()
# for s in positive_strings:
# paramlist = self.grep_param_names(".*"+s)
# for param in paramlist:
# for p in param.flattened_parameters:
# rav_i = set(self._raveled_index_for(p))
# for constraint in self.constraints.iter_properties():
# rav_i -= set(self._constraint_indices(p, constraint))
# rav_i -= set(np.nonzero(self._fixes_for(p)!=UNFIXED)[0])
# ind = self._backtranslate_index(p, np.array(list(rav_i), dtype=int))
# if ind.size != 0:
# p[np.unravel_index(ind, p.shape)].constrain_positive(warning=warning)
# if paramlist:
# self.__getitem__(None, paramlist).constrain_positive(warning=warning)
# currently_constrained = self.all_constrained_indices()
# to_make_positive = []
# for s in positive_strings:
# for i in self.grep_param_names(".*" + s):
# if not (i in currently_constrained):
# to_make_positive.append(i)
# if len(to_make_positive):
# self.constrain_positive(np.asarray(to_make_positive), warning=warning)
def objective_function(self, x):
"""
The objective function passed to the optimizer. It combines
the likelihood and the priors.
Failures are handled robustly. The algorithm will try several times to
return the objective, and will raise the original exception if
the objective cannot be computed.
@ -336,7 +236,9 @@ class Model(Parameterized):
:messages: whether to display during optimisation
:type messages: bool
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
:type optimizer: string TODO: valid strings?
:type optimizer: string
TODO: valid args
"""
if optimizer is None:
optimizer = self.preferred_optimizer
@ -389,15 +291,15 @@ class Model(Parameterized):
indices = np.r_[:self.size]
which = (transformed_index[:,None]==indices[self._fixes_][None,:]).nonzero()
transformed_index = (indices-(~self._fixes_).cumsum())[transformed_index[which[0]]]
if transformed_index.size == 0:
print "No free parameters to check"
return
# just check the global ratio
dx = np.zeros_like(x)
dx[transformed_index] = step * np.sign(np.random.uniform(-1, 1, transformed_index.size))
# evaulate around the point x
f1 = self.objective_function(x + dx)
f2 = self.objective_function(x - dx)
@ -405,7 +307,7 @@ class Model(Parameterized):
dx = dx[transformed_index]
gradient = gradient[transformed_index]
numerical_gradient = (f1 - f2) / (2 * dx)
global_ratio = (f1 - f2) / (2 * np.dot(dx, np.where(gradient == 0, 1e-32, gradient)))
return (np.abs(1. - global_ratio) < tolerance) or (np.abs(gradient - numerical_gradient).mean() < tolerance)
@ -439,7 +341,7 @@ class Model(Parameterized):
#print param_index, transformed_index
else:
transformed_index = param_index
if param_index.size == 0:
print "No free parameters to check"
return
@ -472,82 +374,5 @@ class Model(Parameterized):
print grad_string
self._set_params_transformed(x)
return ret
def input_sensitivity(self):
"""
return an array describing the sesitivity of the model to each input
NB. Right now, we're basing this on the lengthscales (or
variances) of the kernel. TODO: proper sensitivity analysis
where we integrate across the model inputs and evaluate the
effect on the variance of the model output. """
if not hasattr(self, 'kern'):
raise ValueError, "this model has no kernel"
k = self.kern#[p for p in self.kern._parameters_ if hasattr(p, "ARD") and p.ARD]
from ..kern import RBF, Linear#, RBFInv
if isinstance(k, RBF):
return 1. / k.lengthscale
#elif isinstance(k, RBFInv):
# return k.inv_lengthscale
elif isinstance(k, Linear):
return k.variances
else:
raise ValueError, "cannot determine sensitivity for this kernel"
def pseudo_EM(self, stop_crit=.1, **kwargs):
"""
EM - like algorithm for Expectation Propagation and Laplace approximation
:param stop_crit: convergence criterion
:type stop_crit: float
.. Note: kwargs are passed to update_likelihood and optimize functions.
"""
assert isinstance(self.likelihood, likelihoods.EP) or isinstance(self.likelihood, likelihoods.EP_Mixed_Noise), "pseudo_EM is only available for EP likelihoods"
ll_change = stop_crit + 1.
iteration = 0
last_ll = -np.inf
convergence = False
alpha = 0
stop = False
# Handle **kwargs
ep_args = {}
for arg in kwargs.keys():
if arg in ('epsilon', 'power_ep'):
ep_args[arg] = kwargs[arg]
del kwargs[arg]
while not stop:
last_approximation = self.likelihood.copy()
last_params = self._get_params()
if len(ep_args) == 2:
self.update_likelihood_approximation(epsilon=ep_args['epsilon'], power_ep=ep_args['power_ep'])
elif len(ep_args) == 1:
if ep_args.keys()[0] == 'epsilon':
self.update_likelihood_approximation(epsilon=ep_args['epsilon'])
elif ep_args.keys()[0] == 'power_ep':
self.update_likelihood_approximation(power_ep=ep_args['power_ep'])
else:
self.update_likelihood_approximation()
new_ll = self.log_likelihood()
ll_change = new_ll - last_ll
if ll_change < 0:
self.likelihood = last_approximation # restore previous likelihood approximation
self._set_params(last_params) # restore model parameters
print "Log-likelihood decrement: %s \nLast likelihood update discarded." % ll_change
stop = True
else:
self.optimize(**kwargs)
last_ll = self.log_likelihood()
if ll_change < stop_crit:
stop = True
iteration += 1
if stop:
print "%s iterations." % iteration
self.update_likelihood_approximation()

View file

@ -225,7 +225,7 @@ class RBF(Stationary):
if self.ARD:
lengthscale2 = self.lengthscale **2
else:
lengthscale2 = np.ones(input_dim) * self.lengthscale2**2
lengthscale2 = np.ones(input_dim) * self.lengthscale**2
code = """
double tmp;

View file

@ -2,6 +2,7 @@ import pylab as pb
import numpy as np
from latent_space_visualizations.controllers.imshow_controller import ImshowController,ImAnnotateController
from ...util.misc import param_to_array
from ...core.parameterization.variational import VariationalPosterior
from .base_plots import x_frame2D
import itertools
import Tango
@ -19,7 +20,7 @@ def most_significant_input_dimensions(model, which_indices):
input_1, input_2 = 0, 1
else:
try:
input_1, input_2 = np.argsort(model.input_sensitivity())[::-1][:2]
input_1, input_2 = np.argsort(model.kern.input_sensitivity())[::-1][:2]
except:
raise ValueError, "cannot automatically determine which dimensions to plot, please pass 'which_indices'"
else:
@ -43,26 +44,29 @@ def plot_latent(model, labels=None, which_indices=None,
labels = np.ones(model.num_data)
input_1, input_2 = most_significant_input_dimensions(model, which_indices)
X = param_to_array(model.X)
# first, plot the output variance as a function of the latent space
Xtest, xx, yy, xmin, xmax = x_frame2D(X[:, [input_1, input_2]], resolution=resolution)
Xtest_full = np.zeros((Xtest.shape[0], model.X.shape[1]))
#fethch the data points X that we'd like to plot
X = model.X
if isinstance(X, VariationalPosterior):
X = param_to_array(X.mean)
else:
X = param_to_array(X)
# create a function which computes the shading of latent space according to the output variance
def plot_function(x):
Xtest_full = np.zeros((x.shape[0], model.X.shape[1]))
Xtest_full[:, [input_1, input_2]] = x
mu, var, low, up = model.predict(Xtest_full)
var = var[:, :1]
return np.log(var)
#Create an IMshow controller that can re-plot the latent space shading at a good resolution
view = ImshowController(ax, plot_function,
tuple(X[:, [input_1, input_2]].min(0)) + tuple(X[:, [input_1, input_2]].max(0)),
resolution, aspect=aspect, interpolation='bilinear',
cmap=pb.cm.binary)
# ax.imshow(var.reshape(resolution, resolution).T,
# extent=[xmin[0], xmax[0], xmin[1], xmax[1]], cmap=pb.cm.binary, interpolation='bilinear', origin='lower')
# make sure labels are in order of input:
ulabels = []
for lab in labels:
@ -95,8 +99,8 @@ def plot_latent(model, labels=None, which_indices=None,
if not np.all(labels == 1.) and legend:
ax.legend(loc=0, numpoints=1)
ax.set_xlim(xmin[0], xmax[0])
ax.set_ylim(xmin[1], xmax[1])
#ax.set_xlim(xmin[0], xmax[0])
#ax.set_ylim(xmin[1], xmax[1])
ax.grid(b=False) # remove the grid if present, it doesn't look good
ax.set_aspect('auto') # set a nice aspect ratio