mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-02 08:12:39 +02:00
changed the behaviour of checkgrad
checkgrad usd to check the passed string (for name matching) against the list of _get_param_names(). Then it would index along _get_param_names_transformed()! this led to inconsistensies when fixed or tied variables were used, which screwed up the ordering of the variable names. We now match against _get_param_names_transformed().
This commit is contained in:
parent
903e66486d
commit
dea4359b4e
3 changed files with 48 additions and 29 deletions
|
|
@ -392,7 +392,11 @@ class model(parameterised):
|
||||||
if target_param is None:
|
if target_param is None:
|
||||||
param_list = range(len(x))
|
param_list = range(len(x))
|
||||||
else:
|
else:
|
||||||
param_list = self.grep_param_names(target_param)
|
param_list = self.grep_param_names(target_param, transformed=True, search=True)
|
||||||
|
if not param_list:
|
||||||
|
print "No free parameters to check"
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
for i in param_list:
|
for i in param_list:
|
||||||
xx = x.copy()
|
xx = x.copy()
|
||||||
|
|
|
||||||
|
|
@ -119,7 +119,7 @@ class parameterised(object):
|
||||||
"""Unties all parameters by setting tied_indices to an empty list."""
|
"""Unties all parameters by setting tied_indices to an empty list."""
|
||||||
self.tied_indices = []
|
self.tied_indices = []
|
||||||
|
|
||||||
def grep_param_names(self, regexp):
|
def grep_param_names(self, regexp, transformed=False, search=False):
|
||||||
"""
|
"""
|
||||||
:param regexp: regular expression to select parameter names
|
:param regexp: regular expression to select parameter names
|
||||||
:type regexp: re | str | int
|
:type regexp: re | str | int
|
||||||
|
|
@ -129,13 +129,21 @@ class parameterised(object):
|
||||||
Other objects are passed through - i.e. integers which weren't meant for grepping
|
Other objects are passed through - i.e. integers which weren't meant for grepping
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if transformed:
|
||||||
|
names = self._get_param_names_transformed()
|
||||||
|
else:
|
||||||
|
names = self._get_param_names()
|
||||||
|
|
||||||
if type(regexp) in [str, np.string_, np.str]:
|
if type(regexp) in [str, np.string_, np.str]:
|
||||||
regexp = re.compile(regexp)
|
regexp = re.compile(regexp)
|
||||||
return np.nonzero([regexp.match(name) for name in self._get_param_names()])[0]
|
|
||||||
elif type(regexp) is re._pattern_type:
|
elif type(regexp) is re._pattern_type:
|
||||||
return np.nonzero([regexp.match(name) for name in self._get_param_names()])[0]
|
pass
|
||||||
else:
|
else:
|
||||||
return regexp
|
return regexp
|
||||||
|
if search:
|
||||||
|
return np.nonzero([regexp.search(name) for name in names])[0]
|
||||||
|
else:
|
||||||
|
return np.nonzero([regexp.match(name) for name in names])[0]
|
||||||
|
|
||||||
def Nparam_transformed(self):
|
def Nparam_transformed(self):
|
||||||
removed = 0
|
removed = 0
|
||||||
|
|
@ -223,7 +231,14 @@ class parameterised(object):
|
||||||
To fix multiple parameters to the same value, simply pass a regular expression which matches both parameter names, or pass both of the indexes
|
To fix multiple parameters to the same value, simply pass a regular expression which matches both parameter names, or pass both of the indexes
|
||||||
"""
|
"""
|
||||||
matches = self.grep_param_names(regexp)
|
matches = self.grep_param_names(regexp)
|
||||||
assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained"
|
overlap = set(matches).intersection(set(self.all_constrained_indices()))
|
||||||
|
if overlap:
|
||||||
|
self.unconstrain(np.asarray(list(overlap)))
|
||||||
|
print 'Warning: re-constraining these parameters'
|
||||||
|
pn = self._get_param_names()
|
||||||
|
for i in overlap:
|
||||||
|
print pn[i]
|
||||||
|
|
||||||
self.fixed_indices.append(matches)
|
self.fixed_indices.append(matches)
|
||||||
if value != None:
|
if value != None:
|
||||||
self.fixed_values.append(value)
|
self.fixed_values.append(value)
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ import numpy as np
|
||||||
import GPy
|
import GPy
|
||||||
|
|
||||||
|
|
||||||
def toy_rbf_1d(max_nb_eval_optim=100):
|
def toy_rbf_1d(optim_iters=100):
|
||||||
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
|
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
|
||||||
data = GPy.util.datasets.toy_rbf_1d()
|
data = GPy.util.datasets.toy_rbf_1d()
|
||||||
|
|
||||||
|
|
@ -19,13 +19,13 @@ def toy_rbf_1d(max_nb_eval_optim=100):
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize(max_f_eval=max_nb_eval_optim)
|
m.optimize(max_f_eval=optim_iters)
|
||||||
# plot
|
# plot
|
||||||
m.plot()
|
m.plot()
|
||||||
print(m)
|
print(m)
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def rogers_girolami_olympics(max_nb_eval_optim=100):
|
def rogers_girolami_olympics(optim_iters=100):
|
||||||
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
|
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
|
||||||
data = GPy.util.datasets.rogers_girolami_olympics()
|
data = GPy.util.datasets.rogers_girolami_olympics()
|
||||||
|
|
||||||
|
|
@ -37,14 +37,14 @@ def rogers_girolami_olympics(max_nb_eval_optim=100):
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize(max_f_eval=max_nb_eval_optim)
|
m.optimize(max_f_eval=optim_iters)
|
||||||
|
|
||||||
# plot
|
# plot
|
||||||
m.plot(plot_limits = (1850, 2050))
|
m.plot(plot_limits = (1850, 2050))
|
||||||
print(m)
|
print(m)
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def toy_rbf_1d_50(max_nb_eval_optim=100):
|
def toy_rbf_1d_50(optim_iters=100):
|
||||||
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
|
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
|
||||||
data = GPy.util.datasets.toy_rbf_1d_50()
|
data = GPy.util.datasets.toy_rbf_1d_50()
|
||||||
|
|
||||||
|
|
@ -53,14 +53,14 @@ def toy_rbf_1d_50(max_nb_eval_optim=100):
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize(max_f_eval=max_nb_eval_optim)
|
m.optimize(max_f_eval=optim_iters)
|
||||||
|
|
||||||
# plot
|
# plot
|
||||||
m.plot()
|
m.plot()
|
||||||
print(m)
|
print(m)
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def silhouette(max_nb_eval_optim=100):
|
def silhouette(optim_iters=100):
|
||||||
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
|
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
|
||||||
data = GPy.util.datasets.silhouette()
|
data = GPy.util.datasets.silhouette()
|
||||||
|
|
||||||
|
|
@ -69,12 +69,12 @@ def silhouette(max_nb_eval_optim=100):
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize(messages=True,max_f_eval=max_nb_eval_optim)
|
m.optimize(messages=True,max_f_eval=optim_iters)
|
||||||
|
|
||||||
print(m)
|
print(m)
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def coregionalisation_toy2(max_nb_eval_optim=100):
|
def coregionalisation_toy2(optim_iters=100):
|
||||||
"""
|
"""
|
||||||
A simple demonstration of coregionalisation on two sinusoidal functions.
|
A simple demonstration of coregionalisation on two sinusoidal functions.
|
||||||
"""
|
"""
|
||||||
|
|
@ -93,7 +93,7 @@ def coregionalisation_toy2(max_nb_eval_optim=100):
|
||||||
m.constrain_fixed('.*rbf_var',1.)
|
m.constrain_fixed('.*rbf_var',1.)
|
||||||
#m.constrain_positive('.*kappa')
|
#m.constrain_positive('.*kappa')
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize('sim',messages=1,max_f_eval=max_nb_eval_optim)
|
m.optimize('sim',messages=1,max_f_eval=optim_iters)
|
||||||
|
|
||||||
pb.figure()
|
pb.figure()
|
||||||
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
||||||
|
|
@ -106,7 +106,7 @@ def coregionalisation_toy2(max_nb_eval_optim=100):
|
||||||
pb.plot(X2[:,0],Y2[:,0],'gx',mew=2)
|
pb.plot(X2[:,0],Y2[:,0],'gx',mew=2)
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def coregionalisation_toy(max_nb_eval_optim=100):
|
def coregionalisation_toy(optim_iters=100):
|
||||||
"""
|
"""
|
||||||
A simple demonstration of coregionalisation on two sinusoidal functions.
|
A simple demonstration of coregionalisation on two sinusoidal functions.
|
||||||
"""
|
"""
|
||||||
|
|
@ -125,7 +125,7 @@ def coregionalisation_toy(max_nb_eval_optim=100):
|
||||||
m.constrain_fixed('.*rbf_var',1.)
|
m.constrain_fixed('.*rbf_var',1.)
|
||||||
#m.constrain_positive('kappa')
|
#m.constrain_positive('kappa')
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize(max_f_eval=max_nb_eval_optim)
|
m.optimize(max_f_eval=optim_iters)
|
||||||
|
|
||||||
pb.figure()
|
pb.figure()
|
||||||
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
||||||
|
|
@ -139,7 +139,7 @@ def coregionalisation_toy(max_nb_eval_optim=100):
|
||||||
return m
|
return m
|
||||||
|
|
||||||
|
|
||||||
def coregionalisation_sparse(max_nb_eval_optim=100):
|
def coregionalisation_sparse(optim_iters=100):
|
||||||
"""
|
"""
|
||||||
A simple demonstration of coregionalisation on two sinusoidal functions using sparse approximations.
|
A simple demonstration of coregionalisation on two sinusoidal functions using sparse approximations.
|
||||||
"""
|
"""
|
||||||
|
|
@ -162,9 +162,9 @@ def coregionalisation_sparse(max_nb_eval_optim=100):
|
||||||
m.scale_factor = 10000.
|
m.scale_factor = 10000.
|
||||||
m.constrain_fixed('.*rbf_var',1.)
|
m.constrain_fixed('.*rbf_var',1.)
|
||||||
#m.constrain_positive('kappa')
|
#m.constrain_positive('kappa')
|
||||||
m.constrain_fixed('iip')
|
m.constrain_fixed('Iip')
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=max_nb_eval_optim)
|
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
|
||||||
|
|
||||||
pb.figure()
|
pb.figure()
|
||||||
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
|
||||||
|
|
@ -181,7 +181,7 @@ def coregionalisation_sparse(max_nb_eval_optim=100):
|
||||||
return m
|
return m
|
||||||
|
|
||||||
|
|
||||||
def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000, max_nb_eval_optim=100):
|
def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000, optim_iters=100):
|
||||||
"""Show an example of a multimodal error surface for Gaussian process regression. Gene 939 has bimodal behaviour where the noisey mode is higher."""
|
"""Show an example of a multimodal error surface for Gaussian process regression. Gene 939 has bimodal behaviour where the noisey mode is higher."""
|
||||||
|
|
||||||
# Contour over a range of length scales and signal/noise ratios.
|
# Contour over a range of length scales and signal/noise ratios.
|
||||||
|
|
@ -219,7 +219,7 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize(xtol=1e-6, ftol=1e-6, max_f_eval=max_nb_eval_optim)
|
m.optimize(xtol=1e-6, ftol=1e-6, max_f_eval=optim_iters)
|
||||||
|
|
||||||
optim_point_x[1] = m.get('rbf_lengthscale')
|
optim_point_x[1] = m.get('rbf_lengthscale')
|
||||||
optim_point_y[1] = np.log10(m.get('rbf_variance')) - np.log10(m.get('white_variance'));
|
optim_point_y[1] = np.log10(m.get('rbf_variance')) - np.log10(m.get('white_variance'));
|
||||||
|
|
@ -266,7 +266,7 @@ def _contour_data(data, length_scales, log_SNRs, signal_kernel_call=GPy.kern.rbf
|
||||||
lls.append(length_scale_lls)
|
lls.append(length_scale_lls)
|
||||||
return np.array(lls)
|
return np.array(lls)
|
||||||
|
|
||||||
def sparse_GP_regression_1D(N = 400, M = 5, max_nb_eval_optim=100):
|
def sparse_GP_regression_1D(N = 400, M = 5, optim_iters=100):
|
||||||
"""Run a 1D example of a sparse GP regression."""
|
"""Run a 1D example of a sparse GP regression."""
|
||||||
# sample inputs and outputs
|
# sample inputs and outputs
|
||||||
X = np.random.uniform(-3.,3.,(N,1))
|
X = np.random.uniform(-3.,3.,(N,1))
|
||||||
|
|
@ -281,11 +281,11 @@ def sparse_GP_regression_1D(N = 400, M = 5, max_nb_eval_optim=100):
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
|
|
||||||
m.checkgrad(verbose=1)
|
m.checkgrad(verbose=1)
|
||||||
m.optimize('tnc', messages = 1, max_f_eval=max_nb_eval_optim)
|
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
|
||||||
m.plot()
|
m.plot()
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def sparse_GP_regression_2D(N = 400, M = 50, max_nb_eval_optim=100):
|
def sparse_GP_regression_2D(N = 400, M = 50, optim_iters=100):
|
||||||
"""Run a 2D example of a sparse GP regression."""
|
"""Run a 2D example of a sparse GP regression."""
|
||||||
X = np.random.uniform(-3.,3.,(N,2))
|
X = np.random.uniform(-3.,3.,(N,2))
|
||||||
Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(N,1)*0.05
|
Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(N,1)*0.05
|
||||||
|
|
@ -306,12 +306,12 @@ def sparse_GP_regression_2D(N = 400, M = 50, max_nb_eval_optim=100):
|
||||||
|
|
||||||
# optimize and plot
|
# optimize and plot
|
||||||
pb.figure()
|
pb.figure()
|
||||||
m.optimize('tnc', messages = 1, max_f_eval=max_nb_eval_optim)
|
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
|
||||||
m.plot()
|
m.plot()
|
||||||
print(m)
|
print(m)
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def uncertain_inputs_sparse_regression(max_nb_eval_optim=100):
|
def uncertain_inputs_sparse_regression(optim_iters=100):
|
||||||
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
|
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
|
||||||
fig, axes = pb.subplots(1,2,figsize=(12,5))
|
fig, axes = pb.subplots(1,2,figsize=(12,5))
|
||||||
|
|
||||||
|
|
@ -327,7 +327,7 @@ def uncertain_inputs_sparse_regression(max_nb_eval_optim=100):
|
||||||
# create simple GP model - no input uncertainty on this one
|
# create simple GP model - no input uncertainty on this one
|
||||||
m = GPy.models.sparse_GP_regression(X, Y, kernel=k, Z=Z)
|
m = GPy.models.sparse_GP_regression(X, Y, kernel=k, Z=Z)
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize('scg', messages=1, max_f_eval=max_nb_eval_optim)
|
m.optimize('scg', messages=1, max_f_eval=optim_iters)
|
||||||
m.plot(ax=axes[0])
|
m.plot(ax=axes[0])
|
||||||
axes[0].set_title('no input uncertainty')
|
axes[0].set_title('no input uncertainty')
|
||||||
|
|
||||||
|
|
@ -335,7 +335,7 @@ def uncertain_inputs_sparse_regression(max_nb_eval_optim=100):
|
||||||
#the same model with uncertainty
|
#the same model with uncertainty
|
||||||
m = GPy.models.sparse_GP_regression(X, Y, kernel=k, Z=Z, X_variance=S)
|
m = GPy.models.sparse_GP_regression(X, Y, kernel=k, Z=Z, X_variance=S)
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m.optimize('scg', messages=1, max_f_eval=max_nb_eval_optim)
|
m.optimize('scg', messages=1, max_f_eval=optim_iters)
|
||||||
m.plot(ax=axes[1])
|
m.plot(ax=axes[1])
|
||||||
axes[1].set_title('with input uncertainty')
|
axes[1].set_title('with input uncertainty')
|
||||||
print(m)
|
print(m)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue