diff --git a/GPy/core/model.py b/GPy/core/model.py index dea55319..c1fdd0bb 100644 --- a/GPy/core/model.py +++ b/GPy/core/model.py @@ -212,7 +212,7 @@ class model(parameterised): currently_constrained = self.all_constrained_indices() to_make_positive = [] for s in positive_strings: - for i in self.grep_param_names(s): + for i in self.grep_param_names(".*"+s): if not (i in currently_constrained): #to_make_positive.append(re.escape(param_names[i])) to_make_positive.append(i) diff --git a/GPy/core/parameterised.py b/GPy/core/parameterised.py index 7409402e..d1abb9c3 100644 --- a/GPy/core/parameterised.py +++ b/GPy/core/parameterised.py @@ -64,21 +64,21 @@ class parameterised(object): m['var'] = 2. # > sets all parameters matching 'var' to 2. m['var'] = # > sets parameters matching 'var' to """ - def get(self, name): + def get(self, regexp): warnings.warn(self._get_set_deprecation, FutureWarning, stacklevel=2) - return self[name] + return self[regexp] - def set(self, name, val): + def set(self, regexp, val): warnings.warn(self._get_set_deprecation, FutureWarning, stacklevel=2) - self[name] = val + self[regexp] = val - def __getitem__(self, name, return_names=False): + def __getitem__(self, regexp, return_names=False): """ Get a model parameter by name. The name is applied as a regular expression and all parameters that match that regular expression are returned. """ - matches = self.grep_param_names(name) + matches = self.grep_param_names(regexp) if len(matches): if return_names: return self._get_params()[matches], np.asarray(self._get_param_names())[matches].tolist() @@ -103,8 +103,8 @@ class parameterised(object): else: raise AttributeError, "no parameter matches %s" % name - def tie_params(self, which): - matches = self.grep_param_names(which) + def tie_params(self, regexp): + matches = self.grep_param_names(regexp) assert matches.size > 0, "need at least something to tie together" if len(self.tied_indices): assert not np.any(matches[:, None] == np.hstack(self.tied_indices)), "Some indices are already tied!" @@ -119,28 +119,23 @@ class parameterised(object): """Unties all parameters by setting tied_indices to an empty list.""" self.tied_indices = [] - def grep_param_names(self, expr): + def grep_param_names(self, regexp): """ - Arguments - --------- - expr -- can be a regular expression object or a string to be turned into regular expression object. + :param regexp: regular expression to select parameter names + :type regexp: re | str | int + :rtype: the indices of self._get_param_names which match the regular expression. - Returns - ------- - the indices of self._get_param_names which match the regular expression. - - Notes - ----- - Other objects are passed through - i.e. integers which weren't meant for grepping + Note:- + Other objects are passed through - i.e. integers which weren't meant for grepping """ - if type(expr) in [str, np.string_, np.str]: - expr = re.compile(expr) - return np.nonzero([expr.search(name) for name in self._get_param_names()])[0] - elif type(expr) is re._pattern_type: - return np.nonzero([expr.search(name) for name in self._get_param_names()])[0] + if type(regexp) in [str, np.string_, np.str]: + regexp = re.compile(regexp) + return np.nonzero([regexp.match(name) for name in self._get_param_names()])[0] + elif type(regexp) is re._pattern_type: + return np.nonzero([regexp.match(name) for name in self._get_param_names()])[0] else: - return expr + return regexp def Nparam_transformed(self): removed = 0 @@ -152,9 +147,9 @@ class parameterised(object): return len(self._get_params()) - removed - def unconstrain(self, which): + def unconstrain(self, regexp): """Unconstrain matching parameters. does not untie parameters""" - matches = self.grep_param_names(which) + matches = self.grep_param_names(regexp) #tranformed contraints: for match in matches: @@ -178,17 +173,17 @@ class parameterised(object): else: self.fixed_indices, self.fixed_values = [], [] - def constrain_negative(self, which): + def constrain_negative(self, regexp): """ Set negative constraints. """ - self.constrain(which, transformations.negative_exponent()) + self.constrain(regexp, transformations.negative_exponent()) - def constrain_positive(self, which): + def constrain_positive(self, regexp): """ Set positive constraints. """ - self.constrain(which, transformations.logexp()) + self.constrain(regexp, transformations.logexp()) - def constrain_bounded(self, which,lower, upper): + def constrain_bounded(self, regexp,lower, upper): """ Set bounded constraints. """ - self.constrain(which, transformations.logistic(lower, upper)) + self.constrain(regexp, transformations.logistic(lower, upper)) def all_constrained_indices(self): if len(self.constrained_indices) or len(self.fixed_indices): @@ -196,10 +191,10 @@ class parameterised(object): else: return np.empty(shape=(0,)) - def constrain(self,which,transform): + def constrain(self,regexp,transform): assert isinstance(transform,transformations.transformation) - matches = self.grep_param_names(which) + matches = self.grep_param_names(regexp) overlap = set(matches).intersection(set(self.all_constrained_indices())) if overlap: self.unconstrain(np.asarray(list(overlap))) @@ -214,11 +209,11 @@ class parameterised(object): x[matches] = transform.initialize(x[matches]) self._set_params(x) - def constrain_fixed(self, which, value=None): + def constrain_fixed(self, regexp, value=None): """ Arguments --------- - :param which: np.array(dtype=int), or regular expression object or string + :param regexp: np.array(dtype=int), or regular expression object or string :param value: a float to fix the matched values to. If the value is not specified, the parameter is fixed to the current value @@ -227,7 +222,7 @@ class parameterised(object): Fixing a parameter which is tied to another, or constrained in some way will result in an error. To fix multiple parameters to the same value, simply pass a regular expression which matches both parameter names, or pass both of the indexes """ - matches = self.grep_param_names(which) + matches = self.grep_param_names(regexp) assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained" self.fixed_indices.append(matches) if value != None: diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 029d812d..43e1bd78 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -7,6 +7,7 @@ from matplotlib import pyplot as plt import GPy from GPy.models.Bayesian_GPLVM import Bayesian_GPLVM from GPy.util.datasets import swiss_roll_generated +from GPy.core.transformations import logexp default_seed = np.random.seed(123344) @@ -17,11 +18,11 @@ def BGPLVM(seed=default_seed): D = 4 # generate GPLVM-like data X = np.random.rand(N, Q) - k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001) + k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001) K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N), K, D).T - k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q) + k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q) # k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q) # k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001) # k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001) @@ -187,8 +188,8 @@ def _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim=False): Y2 = S2.dot(np.random.randn(S2.shape[1], D2)) Y3 = S3.dot(np.random.randn(S3.shape[1], D3)) - Y1 += .1 * np.random.randn(*Y1.shape) - Y2 += .1 * np.random.randn(*Y2.shape) + Y1 += .3 * np.random.randn(*Y1.shape) + Y2 += .2 * np.random.randn(*Y2.shape) Y3 += .1 * np.random.randn(*Y3.shape) Y1 -= Y1.mean(0) @@ -262,13 +263,13 @@ def bgplvm_simulation(optimize='scg', # m.constrain('variance|noise', logexp_clipped()) m.ensure_default_constraints() m['noise'] = Y.var() / 100. - m['linear_variance'] = .01 + m['linear_variance'] = .001 if optimize: print "Optimizing model:" - m.optimize('bfgs', max_iters=max_f_eval, + m.optimize('scg', max_iters=max_f_eval, max_f_eval=max_f_eval, - messages=True, gtol=1e-2) + messages=True, gtol=1e-6) if plot: import pylab m.plot_X_1d() @@ -277,23 +278,21 @@ def bgplvm_simulation(optimize='scg', m.kern.plot_ARD() return m -def mrd_simulation(optimize=True, plot_sim=False, **kw): - D1, D2, D3, N, M, Q = 150, 200, 400, 300, 3, 7 +def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw): + D1, D2, D3, N, M, Q = 150, 200, 400, 500, 3, 7 slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim) from GPy.models import mrd from GPy import kern - from GPy.core.transformations import logexp_clipped reload(mrd); reload(kern) - k = kern.linear(Q, [0.05] * Q, True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) - m = mrd.MRD(Ylist, Q=Q, M=M, kernels=k, initx="concat", initz='permute', **kw) + k = kern.linear(Q, [.05] * Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) + m = mrd.MRD(Ylist, Q=Q, M=M, kernels=k, initx="", initz='permute', **kw) for i, Y in enumerate(Ylist): m['{}_noise'.format(i + 1)] = Y.var() / 100. - # m.constrain('variance|noise', logexp_clipped(1e-6)) m.ensure_default_constraints() # DEBUG @@ -301,8 +300,10 @@ def mrd_simulation(optimize=True, plot_sim=False, **kw): if optimize: print "Optimizing Model:" - m.optimize('bfgs', messages=1, max_iters=3e3) - + m.optimize('scg', messages=1, max_iters=5e4, max_f_eval=5e4) + if plot: + m.plot_X_1d() + m.plot_scales() return m def brendan_faces(): @@ -323,7 +324,7 @@ def brendan_faces(): m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=10000) - ax = m.plot_latent(which_indices=(0,1)) + ax = m.plot_latent(which_indices=(0, 1)) y = m.likelihood.Y[0, :] data_show = GPy.util.visualize.image_show(y[None, :], dimensions=(20, 28), transpose=True, invert=False, scale=False) lvm_visualizer = GPy.util.visualize.lvm(m.X[0, :].copy(), m, data_show, ax) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 1a35df2f..2a9d2b00 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -10,7 +10,7 @@ import numpy as np import GPy -def toy_rbf_1d(): +def toy_rbf_1d(max_nb_eval_optim=100): """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" data = GPy.util.datasets.toy_rbf_1d() @@ -19,13 +19,13 @@ def toy_rbf_1d(): # optimize m.ensure_default_constraints() - m.optimize() + m.optimize(max_f_eval=max_nb_eval_optim) # plot m.plot() print(m) return m -def rogers_girolami_olympics(): +def rogers_girolami_olympics(max_nb_eval_optim=100): """Run a standard Gaussian process regression on the Rogers and Girolami olympics data.""" data = GPy.util.datasets.rogers_girolami_olympics() @@ -34,14 +34,14 @@ def rogers_girolami_olympics(): # optimize m.ensure_default_constraints() - m.optimize() + m.optimize(max_f_eval=max_nb_eval_optim) # plot m.plot(plot_limits = (1850, 2050)) print(m) return m -def toy_rbf_1d_50(): +def toy_rbf_1d_50(max_nb_eval_optim=100): """Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance.""" data = GPy.util.datasets.toy_rbf_1d_50() @@ -50,14 +50,14 @@ def toy_rbf_1d_50(): # optimize m.ensure_default_constraints() - m.optimize() + m.optimize(max_f_eval=max_nb_eval_optim) # plot m.plot() print(m) return m -def silhouette(): +def silhouette(max_nb_eval_optim=100): """Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper.""" data = GPy.util.datasets.silhouette() @@ -66,12 +66,12 @@ def silhouette(): # optimize m.ensure_default_constraints() - m.optimize(messages=True) + m.optimize(messages=True,max_f_eval=max_nb_eval_optim) print(m) return m -def coregionalisation_toy2(): +def coregionalisation_toy2(max_nb_eval_optim=100): """ A simple demonstration of coregionalisation on two sinusoidal functions. """ @@ -90,8 +90,7 @@ def coregionalisation_toy2(): m.constrain_fixed('rbf_var',1.) m.constrain_positive('kappa') m.ensure_default_constraints() - m.optimize('sim',max_f_eval=5000,messages=1) - #m.optimize() + m.optimize('sim',messages=1,max_f_eval=max_nb_eval_optim) pb.figure() Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1)))) @@ -104,7 +103,7 @@ def coregionalisation_toy2(): pb.plot(X2[:,0],Y2[:,0],'gx',mew=2) return m -def coregionalisation_toy(): +def coregionalisation_toy(max_nb_eval_optim=100): """ A simple demonstration of coregionalisation on two sinusoidal functions. """ @@ -123,7 +122,7 @@ def coregionalisation_toy(): m.constrain_fixed('rbf_var',1.) m.constrain_positive('kappa') m.ensure_default_constraints() - m.optimize() + m.optimize(max_f_eval=max_nb_eval_optim) pb.figure() Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1)))) @@ -137,7 +136,7 @@ def coregionalisation_toy(): return m -def coregionalisation_sparse(): +def coregionalisation_sparse(max_nb_eval_optim=100): """ A simple demonstration of coregionalisation on two sinusoidal functions using sparse approximations. """ @@ -162,7 +161,7 @@ def coregionalisation_sparse(): m.constrain_positive('kappa') m.constrain_fixed('iip') m.ensure_default_constraints() - m.optimize_restarts(5,robust=True,messages=1) + m.optimize_restarts(5, robust=True, messages=1, max_f_eval=max_nb_eval_optim) pb.figure() Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1)))) @@ -179,7 +178,7 @@ def coregionalisation_sparse(): return m -def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000): +def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000, max_nb_eval_optim=100): """Show an example of a multimodal error surface for Gaussian process regression. Gene 939 has bimodal behaviour where the noisey mode is higher.""" # Contour over a range of length scales and signal/noise ratios. @@ -217,7 +216,7 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000 # optimize m.ensure_default_constraints() - m.optimize(xtol=1e-6,ftol=1e-6) + m.optimize(xtol=1e-6, ftol=1e-6, max_f_eval=max_nb_eval_optim) optim_point_x[1] = m.get('rbf_lengthscale') optim_point_y[1] = np.log10(m.get('rbf_variance')) - np.log10(m.get('white_variance')); @@ -264,7 +263,7 @@ def _contour_data(data, length_scales, log_SNRs, signal_kernel_call=GPy.kern.rbf lls.append(length_scale_lls) return np.array(lls) -def sparse_GP_regression_1D(N = 400, M = 5): +def sparse_GP_regression_1D(N = 400, M = 5, max_nb_eval_optim=100): """Run a 1D example of a sparse GP regression.""" # sample inputs and outputs X = np.random.uniform(-3.,3.,(N,1)) @@ -279,11 +278,11 @@ def sparse_GP_regression_1D(N = 400, M = 5): m.constrain_positive('(variance|lengthscale|precision)') m.checkgrad(verbose=1) - m.optimize('tnc', messages = 1) + m.optimize('tnc', messages = 1, max_f_eval=max_nb_eval_optim) m.plot() return m -def sparse_GP_regression_2D(N = 400, M = 50): +def sparse_GP_regression_2D(N = 400, M = 50, max_nb_eval_optim=100): """Run a 2D example of a sparse GP regression.""" X = np.random.uniform(-3.,3.,(N,2)) Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(N,1)*0.05 @@ -294,7 +293,7 @@ def sparse_GP_regression_2D(N = 400, M = 50): kernel = rbf + noise # create simple GP model - m = GPy.models.sparse_GP_regression(X,Y,kernel, M = M) + m = GPy.models.sparse_GP_regression(X,Y,kernel, M = M, max_nb_eval_optim=100) # contrain all parameters to be positive (but not inducing inputs) m.constrain_positive('(variance|lengthscale|precision)') @@ -304,12 +303,12 @@ def sparse_GP_regression_2D(N = 400, M = 50): # optimize and plot pb.figure() - m.optimize('tnc', messages = 1) + m.optimize('tnc', messages = 1, max_f_eval=max_nb_eval_optim) m.plot() print(m) return m -def uncertain_inputs_sparse_regression(): +def uncertain_inputs_sparse_regression(max_nb_eval_optim=100): """Run a 1D example of a sparse GP regression with uncertain inputs.""" # sample inputs and outputs S = np.ones((20,1)) @@ -327,7 +326,7 @@ def uncertain_inputs_sparse_regression(): m.constrain_positive('(variance|prec)') # optimize and plot - m.optimize('tnc', max_f_eval = 1000, messages=1) + m.optimize('tnc', messages=1, max_f_eval=max_nb_eval_optim) m.plot() print(m) return m diff --git a/GPy/inference/SCG.py b/GPy/inference/SCG.py index fc8ce21a..4318c197 100644 --- a/GPy/inference/SCG.py +++ b/GPy/inference/SCG.py @@ -52,7 +52,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto ftol = 1e-6 if gtol is None: gtol = 1e-5 - sigma0 = 1.0e-4 + sigma0 = 1.0e-8 fold = f(x, *optargs) # Initial function value. function_eval = 1 fnow = fold diff --git a/GPy/likelihoods/Gaussian.py b/GPy/likelihoods/Gaussian.py index e08fee90..d87b1b98 100644 --- a/GPy/likelihoods/Gaussian.py +++ b/GPy/likelihoods/Gaussian.py @@ -51,11 +51,15 @@ class Gaussian(likelihood): return ["noise_variance"] def _set_params(self, x): - x = float(x) + x = np.float64(x) if self._variance != x: - self.precision = 1. / x + if x == 0.: + self.precision = None + self.V = None + else: + self.precision = 1. / x + self.V = (self.precision) * self.Y self.covariance_matrix = np.eye(self.N) * x - self.V = (self.precision) * self.Y self._variance = x def predictive_values(self, mu, var, full_cov): diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 88930040..6f2a5f6a 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -33,8 +33,11 @@ class MRD(model): Initial latent space :param X_variance: Initial latent space variance - :param init: [PCA|random] - initialization method to use + :param init: [cooncat|single|random] + initialization method to use: + *concat: PCA on concatenated outputs + *single: PCA on each output + *random: random :param M: number of inducing inputs to use :param Z: diff --git a/GPy/notes.txt b/GPy/notes.txt deleted file mode 100644 index c80cedad..00000000 --- a/GPy/notes.txt +++ /dev/null @@ -1,61 +0,0 @@ -the predict method for GP_regression returns a covariance matrix which is a bad idea as this takes a lot to compute, it's also confusing for first time users. Should only be returned if the user explicitly requests it. -FIXED - -When computing kernel.K for kernels like rbf, you can't compute a version with rbf.K(X) you have to do rbf.K(X, X) -FIXED - -Change Youter to YYT (Youter doesn't mean anything for matrices). -FIXED - -Change get_param and set_param to get_params and set_params -FIXED - -Fails in weird ways if you pass a integer as the input instead of a double to the kernel. -FIXED - -The Matern kernels (at least the 52) still is working in the ARD manner which means it wouldn't run for very large input dimension. Needs to be fixed to match the RBF. -FIXED - -Implementing new covariances is too complicated at the moment. We need a barebones example of what to implement and where. Commenting in the covariance matrices needs to be improved. It's not clear to a user what all the psi parts are for. Maybe we need a cut down and simplified example to help with this (perhaps a cut down version of the RBF?). And then we should provide a simple list of what you need to do to get a new kernel going. -TODO, a priority for this release - -Missing kernels: polynomial, rational quadratic. -TODO, should be straightforward when the above is fixed. - -Need an implementation of scaled conjugate gradients for the optimizers. -UPSTREAM: scipy are tidying up the optimize module. let's wait for their next release. - -Need an implementation of gradient descent for the optimizers (works well with GP-LVM for small random initializations) -As above. - -Need Carl Rasmussen's permission to add his conjugate gradients algorithm. In fact, we can just provide a hook for it, and post a separate python implementation of his algorithm. -Any word from Carl yet? - -Get constrain param by default inside model creation. -Well, we have ensure_default_constraints. There are some techinical difficulties in doing it inside model creation, so perhaps this is something for a later release. - -Bug when running classification.crescent_data() -TODO. - -Do all optimizers work only in terms of function evaluations? Do we need to check for one that uses iterations? -Upstream: Waiting for the new scipy, where the optimisers have been unified. Obviously it's be much better to be able to specify a unified set of args. - -Tolerances for optimizers, do we need to introduce some standardization? At the moment does each have its own defaults? -Upstream, as above - -A dictionary for parameter storage? So we can go through names easily? -Wontfix. Dictionaries bring up all kinds of problems since they're not ordered. it's easy enough to do: -for val, name in zip(m._get_params(), m._get_param_names()): foobar - -A flag on covariance functions that indicates when they are not associated with an underlying function (like white noise or a coregionalization matrix). -TODO, agree this would be helpful. - -Diagonal noise covariance function -TODO this is now straightforward using the likelihood framework, or as a kern. NF also requires a similar kind of kern function (a fixed form kernel) - -Long term: automatic Lagrange multiplier calculation for optimizers: constrain two parameters in an unusual way and the model automatically does the Lagrangian. Also augment the parameters with new ones, so define data variance to be white noise plus RBF variance and optimize over that and signal to noise ratio ... for example constrain the sum of variances to equal the known variance of the data. - -Randomize doesn't seem to cover a wide enough range for restarts ... try it for a model where inputs are widely spaced apart and length scale is too short. Sampling from N(0,1) is too conservative. Dangerous for people who naively use restarts. Since we have the model we could maybe come up with some sensible heuristics for setting these things. Maybe we should also consider having '.initialize()'. If we can't do this well we should disable the restart method. -Excellent proposal, but lots of work: suggest leaving for the next release? - - diff --git a/GPy/testing/kernel_tests.py b/GPy/testing/kernel_tests.py index b27eee07..b48bc813 100644 --- a/GPy/testing/kernel_tests.py +++ b/GPy/testing/kernel_tests.py @@ -8,7 +8,7 @@ import GPy class KernelTests(unittest.TestCase): def test_kerneltie(self): K = GPy.kern.rbf(5, ARD=True) - K.tie_params('[01]') + K.tie_params('.*[01]') K.constrain_fixed('2') X = np.random.rand(5,5) Y = np.ones((5,1)) diff --git a/GPy/testing/unit_tests.py b/GPy/testing/unit_tests.py index ee8368ac..86d13c8c 100644 --- a/GPy/testing/unit_tests.py +++ b/GPy/testing/unit_tests.py @@ -22,7 +22,7 @@ class GradientTests(unittest.TestCase): self.X2D = np.random.uniform(-3.,3.,(40,2)) self.Y2D = np.sin(self.X2D[:,0:1]) * np.sin(self.X2D[:,1:2])+np.random.randn(40,1)*0.05 - def check_model_with_white(self, kern, model_type='GP_regression', dimension=1, constraint=''): + def check_model_with_white(self, kern, model_type='GP_regression', dimension=1): #Get the correct gradients if dimension == 1: X = self.X1D @@ -37,7 +37,7 @@ class GradientTests(unittest.TestCase): noise = GPy.kern.white(dimension) kern = kern + noise m = model_fit(X, Y, kernel=kern) - m.constrain_positive(constraint) + m.ensure_default_constraints() m.randomize() # contrain all parameters to be positive self.assertTrue(m.checkgrad()) @@ -135,12 +135,12 @@ class GradientTests(unittest.TestCase): def test_sparse_GP_regression_rbf_white_kern_1d(self): ''' Testing the sparse GP regression with rbf kernel with white kernel on 1d data ''' rbf = GPy.kern.rbf(1) - self.check_model_with_white(rbf, model_type='sparse_GP_regression', dimension=1, constraint='(variance|lengthscale|precision)') + self.check_model_with_white(rbf, model_type='sparse_GP_regression', dimension=1) def test_sparse_GP_regression_rbf_white_kern_2D(self): ''' Testing the sparse GP regression with rbf and white kernel on 2d data ''' rbf = GPy.kern.rbf(2) - self.check_model_with_white(rbf, model_type='sparse_GP_regression', dimension=2, constraint='(variance|lengthscale|precision)') + self.check_model_with_white(rbf, model_type='sparse_GP_regression', dimension=2) def test_GPLVM_rbf_bias_white_kern_2D(self): """ Testing GPLVM with rbf + bias and white kernel """ @@ -150,7 +150,7 @@ class GradientTests(unittest.TestCase): K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N),K,D).T m = GPy.models.GPLVM(Y, Q, kernel = k) - m.constrain_positive('(rbf|bias|white)') + m.ensure_default_constraints() self.assertTrue(m.checkgrad()) def test_GPLVM_rbf_linear_white_kern_2D(self): @@ -161,7 +161,7 @@ class GradientTests(unittest.TestCase): K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N),K,D).T m = GPy.models.GPLVM(Y, Q, init = 'PCA', kernel = k) - m.constrain_positive('(linear|bias|white)') + m.ensure_default_constraints() self.assertTrue(m.checkgrad()) def test_GP_EP_probit(self): diff --git a/GPy/util/plot.py b/GPy/util/plot.py index 295047b1..309c440e 100644 --- a/GPy/util/plot.py +++ b/GPy/util/plot.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012, GPy authors (see AUTHORS.txt). +# #Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt)