diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py index 4f1a4ebc..c7daa26b 100644 --- a/GPy/examples/classification.py +++ b/GPy/examples/classification.py @@ -25,7 +25,6 @@ def crescent_data(seed=default_seed): # FIXME Y[Y.flatten()==-1] = 0 m = GPy.models.GPClassification(data['X'], Y) - m.ensure_default_constraints() #m.update_likelihood_approximation() #m.optimize() m.pseudo_EM() @@ -75,7 +74,6 @@ def toy_linear_1d_classification(seed=default_seed): # Model definition m = GPy.models.GPClassification(data['X'], Y) - m.ensure_default_constraints() # Optimize #m.update_likelihood_approximation() @@ -106,7 +104,6 @@ def sparse_toy_linear_1d_classification(num_inducing=10,seed=default_seed): m = GPy.models.SparseGPClassification(data['X'], Y,num_inducing=num_inducing) m['.*len']= 4. - m.ensure_default_constraints() # Optimize #m.update_likelihood_approximation() # Parameters optimization: @@ -137,7 +134,6 @@ def sparse_crescent_data(num_inducing=10, seed=default_seed): Y[Y.flatten()==-1]=0 m = GPy.models.SparseGPClassification(data['X'], Y,num_inducing=num_inducing) - m.ensure_default_constraints() m['.*len'] = 10. #m.update_likelihood_approximation() #m.optimize() @@ -163,7 +159,6 @@ def FITC_crescent_data(num_inducing=10, seed=default_seed): m = GPy.models.FITCClassification(data['X'], Y,num_inducing=num_inducing) m.constrain_bounded('.*len',1.,1e3) - m.ensure_default_constraints() m['.*len'] = 3. #m.update_likelihood_approximation() #m.optimize() diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 28ee2bde..16afe5eb 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -37,7 +37,6 @@ def BGPLVM(seed=default_seed): # m.optimize(messages = 1) # m.plot() # pb.title('After optimisation') - m.ensure_default_constraints() m.randomize() m.checkgrad(verbose=1) @@ -53,7 +52,6 @@ def GPLVM_oil_100(optimize=True): m.data_labels = data['Y'].argmax(axis=1) # optimize - m.ensure_default_constraints() if optimize: m.optimize('scg', messages=1) @@ -108,7 +106,6 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False m.data_colors = c m.data_t = t - m.ensure_default_constraints() m['rbf_lengthscale'] = 1. # X.var(0).max() / X.var(0) m['noise_variance'] = Y.var() / 100. m['bias_variance'] = 0.05 @@ -134,7 +131,6 @@ def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_f_eval=50, plot= m['.*lengt'] = 1. # m.X.var(0).max() / m.X.var(0) m['noise'] = Yn.var() / 100. - m.ensure_default_constraints() # optimize if optimize: @@ -159,7 +155,6 @@ def oil_100(): m = GPy.models.GPLVM(data['X'], 2) # optimize - m.ensure_default_constraints() m.optimize(messages=1, max_iters=2) # plot @@ -239,7 +234,6 @@ def bgplvm_simulation_matlab_compare(): # X=mu, # X_variance=S, _debug=False) - m.ensure_default_constraints() m.auto_scale_factor = True m['noise'] = Y.var() / 100. m['linear_variance'] = .01 @@ -262,7 +256,6 @@ def bgplvm_simulation(optimize='scg', k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q) m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k, _debug=True) # m.constrain('variance|noise', logexp_clipped()) - m.ensure_default_constraints() m['noise'] = Y.var() / 100. m['linear_variance'] = .01 @@ -291,7 +284,6 @@ def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw): for i, Y in enumerate(Ylist): m['{}_noise'.format(i + 1)] = Y.var() / 100. - m.ensure_default_constraints() # DEBUG # np.seterr("raise") @@ -319,7 +311,6 @@ def brendan_faces(): # optimize m.constrain('rbf|noise|white', GPy.core.transformations.logexp_clipped()) - m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=10000) ax = m.plot_latent(which_indices=(0, 1)) @@ -336,7 +327,6 @@ def stick(): m = GPy.models.GPLVM(data['Y'], 2) # optimize - m.ensure_default_constraints() m.optimize(messages=1, max_f_eval=10000) m._set_params(m._get_params()) @@ -359,7 +349,6 @@ def cmu_mocap(subject='35', motion=['01'], in_place=True): m = GPy.models.GPLVM(data['Y'], 2, normalize_Y=True) # optimize - m.ensure_default_constraints() m.optimize(messages=1, max_f_eval=10000) ax = m.plot_latent() @@ -391,7 +380,6 @@ def cmu_mocap(subject='35', motion=['01'], in_place=True): # m.set('iip', Z) # m.set('bias', 1e-4) # # optimize -# # m.ensure_default_constraints() # # import pdb; pdb.set_trace() # m.optimize('tnc', messages=1) diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index 726a9085..21b435e7 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -18,7 +18,6 @@ def toy_rbf_1d(optimizer='tnc', max_nb_eval_optim=100): m = GPy.models.GPRegression(data['X'],data['Y']) # optimize - m.ensure_default_constraints() m.optimize(optimizer, max_f_eval=max_nb_eval_optim) # plot m.plot() @@ -36,7 +35,6 @@ def rogers_girolami_olympics(optim_iters=100): m['rbf_lengthscale'] = 10 # optimize - m.ensure_default_constraints() m.optimize(max_f_eval=optim_iters) # plot @@ -52,7 +50,6 @@ def toy_rbf_1d_50(optim_iters=100): m = GPy.models.GPRegression(data['X'],data['Y']) # optimize - m.ensure_default_constraints() m.optimize(max_f_eval=optim_iters) # plot @@ -68,7 +65,6 @@ def silhouette(optim_iters=100): m = GPy.models.GPRegression(data['X'],data['Y']) # optimize - m.ensure_default_constraints() m.optimize(messages=True,max_f_eval=optim_iters) print(m) @@ -92,7 +88,6 @@ def coregionalisation_toy2(optim_iters=100): m = GPy.models.GPRegression(X,Y,kernel=k) m.constrain_fixed('.*rbf_var',1.) #m.constrain_positive('.*kappa') - m.ensure_default_constraints() m.optimize('sim',messages=1,max_f_eval=optim_iters) pb.figure() @@ -124,7 +119,6 @@ def coregionalisation_toy(optim_iters=100): m = GPy.models.GPRegression(X,Y,kernel=k) m.constrain_fixed('.*rbf_var',1.) #m.constrain_positive('kappa') - m.ensure_default_constraints() m.optimize(max_f_eval=optim_iters) pb.figure() @@ -162,7 +156,6 @@ def coregionalisation_sparse(optim_iters=100): m.constrain_fixed('.*rbf_var',1.) m.constrain_fixed('iip') m.constrain_bounded('noise_variance',1e-3,1e-1) - m.ensure_default_constraints() m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters) #plotting: @@ -189,11 +182,9 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000 log_SNRs = np.linspace(-3., 4., resolution) data = GPy.util.datasets.della_gatta_TRP63_gene_expression(gene_number) - # Sub sample the data to ensure multiple optima #data['Y'] = data['Y'][0::2, :] #data['X'] = data['X'][0::2, :] - # Remove the mean (no bias kernel to ensure signal/noise is in RBF/white) data['Y'] = data['Y'] - np.mean(data['Y']) lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.rbf) @@ -220,7 +211,6 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000 optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']); # optimize - m.ensure_default_constraints() m.optimize('scg', xtol=1e-6, ftol=1e-6, max_f_eval=optim_iters) optim_point_x[1] = m['rbf_lengthscale'] @@ -273,7 +263,6 @@ def sparse_GP_regression_1D(N = 400, num_inducing = 5, optim_iters=100): # create simple GP Model m = GPy.models.SparseGPRegression(X, Y, kernel, num_inducing=num_inducing) - m.ensure_default_constraints() m.checkgrad(verbose=1) m.optimize('tnc', messages = 1, max_f_eval=optim_iters) @@ -294,7 +283,6 @@ def sparse_GP_regression_2D(N = 400, num_inducing = 50, optim_iters=100): m = GPy.models.SparseGPRegression(X,Y,kernel, num_inducing = num_inducing) # contrain all parameters to be positive (but not inducing inputs) - m.ensure_default_constraints() m.set('.*len',2.) m.checkgrad() @@ -320,7 +308,6 @@ def uncertain_inputs_sparse_regression(optim_iters=100): # create simple GP Model - no input uncertainty on this one m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z) - m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=optim_iters) m.plot(ax=axes[0]) axes[0].set_title('no input uncertainty') @@ -328,7 +315,6 @@ def uncertain_inputs_sparse_regression(optim_iters=100): #the same Model with uncertainty m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S) - m.ensure_default_constraints() m.optimize('scg', messages=1, max_f_eval=optim_iters) m.plot(ax=axes[1]) axes[1].set_title('with input uncertainty') diff --git a/GPy/examples/stochastic.py b/GPy/examples/stochastic.py index 45418d5c..533904d5 100644 --- a/GPy/examples/stochastic.py +++ b/GPy/examples/stochastic.py @@ -15,7 +15,6 @@ def toy_1d(): Y = np.sin(X) + np.cos(0.3*X) + np.random.randn(*X.shape)/np.sqrt(50.) m = GPy.models.SVIGPRegression(X,Y, batchsize=10, Z=Z) - m.ensure_default_constraints() m.constrain_bounded('noise_variance',1e-3,1e-1) m.param_steplength = 1e-4 diff --git a/GPy/examples/tutorials.py b/GPy/examples/tutorials.py index 6950af37..69fc2aaf 100644 --- a/GPy/examples/tutorials.py +++ b/GPy/examples/tutorials.py @@ -24,7 +24,6 @@ def tuto_GP_regression(): print m m.plot() - m.ensure_default_constraints() m.constrain_positive('') m.unconstrain('') # may be used to remove the previous constrains @@ -135,7 +134,6 @@ def tuto_kernel_overview(): pb.ylabel("+ ",rotation='horizontal',fontsize='30') m.plot(ax=axs, which_parts=[False,False,False,True]) - m.ensure_default_constraints() return(m) @@ -144,6 +142,5 @@ def model_interaction(): Y = np.sin(X) + np.random.randn(*X.shape)*0.01 + 5. k = GPy.kern.rbf(1) + GPy.kern.bias(1) m = GPy.models.GPRegression(X, Y, kernel=k) - m.ensure_default_constraints() return m diff --git a/GPy/kern/rbf.py b/GPy/kern/rbf.py index 5686d7bd..03b37b01 100644 --- a/GPy/kern/rbf.py +++ b/GPy/kern/rbf.py @@ -241,9 +241,9 @@ class rbf(Kernpart): # here are the "statistics" for psi1 and psi2 if not np.array_equal(Z, self._Z): #Z has changed, compute Z specific stuff - self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # num_inducing,num_inducing,input_dim - self._psi2_Zdist = 0.5*(Z[:,None,:]-Z[None,:,:]) # num_inducing,num_inducing,input_dim - self._psi2_Zdist_sq = np.square(self._psi2_Zdist/self.lengthscale) # num_inducing,num_inducing,input_dim + self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # M,M,Q + self._psi2_Zdist = 0.5*(Z[:,None,:]-Z[None,:,:]) # M,M,Q + self._psi2_Zdist_sq = np.square(self._psi2_Zdist/self.lengthscale) # M,M,Q self._Z = Z if not (np.array_equal(Z, self._Z) and np.array_equal(mu, self._mu) and np.array_equal(S, self._S)): @@ -257,12 +257,12 @@ class rbf(Kernpart): self._psi1 = self.variance*np.exp(self._psi1_exponent) #psi2 - self._psi2_denom = 2.*S[:,None,None,:]/self.lengthscale2+1. # N,num_inducing,num_inducing,input_dim + self._psi2_denom = 2.*S[:,None,None,:]/self.lengthscale2+1. # N,M,M,Q self._psi2_mudist, self._psi2_mudist_sq, self._psi2_exponent, _ = self.weave_psi2(mu,self._psi2_Zhat) - #self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,num_inducing,num_inducing,input_dim + #self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,M,M,Q #self._psi2_mudist_sq = np.square(self._psi2_mudist)/(self.lengthscale2*self._psi2_denom) - #self._psi2_exponent = np.sum(-self._psi2_Zdist_sq -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,num_inducing,num_inducing - self._psi2 = np.square(self.variance)*np.exp(self._psi2_exponent) # N,num_inducing,num_inducing + #self._psi2_exponent = np.sum(-self._psi2_Zdist_sq -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,M,M,Q + self._psi2 = np.square(self.variance)*np.exp(self._psi2_exponent) # N,M,M,Q #store matrices for caching self._Z, self._mu, self._S = Z, mu,S diff --git a/GPy/models/bayesian_gplvm.py b/GPy/models/bayesian_gplvm.py index 8043c635..c401f788 100644 --- a/GPy/models/bayesian_gplvm.py +++ b/GPy/models/bayesian_gplvm.py @@ -60,7 +60,7 @@ class BayesianGPLVM(SparseGP, GPLVM): self._savedABCD = [] SparseGP.__init__(self, X, likelihood, kernel, Z=Z, X_variance=X_variance, **kwargs) - self._set_params(self._get_params()) + self.ensure_default_constraints() @property def oldps(self): diff --git a/GPy/models/fitc_classification.py b/GPy/models/fitc_classification.py index 65178c8c..f4cf4e8d 100644 --- a/GPy/models/fitc_classification.py +++ b/GPy/models/fitc_classification.py @@ -44,4 +44,4 @@ class FITCClassification(FITC): assert Z.shape[1]==X.shape[1] FITC.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/gp_classification.py b/GPy/models/gp_classification.py index 376f0005..c6012988 100644 --- a/GPy/models/gp_classification.py +++ b/GPy/models/gp_classification.py @@ -38,4 +38,4 @@ class GPClassification(GP): raise Warning, 'likelihood.data and Y are different.' GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/gp_regression.py b/GPy/models/gp_regression.py index 8d0b02e0..db5d21b2 100644 --- a/GPy/models/gp_regression.py +++ b/GPy/models/gp_regression.py @@ -32,4 +32,4 @@ class GPRegression(GP): likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y) GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/gplvm.py b/GPy/models/gplvm.py index e602a59a..44a9d2ce 100644 --- a/GPy/models/gplvm.py +++ b/GPy/models/gplvm.py @@ -33,7 +33,7 @@ class GPLVM(GP): kernel = kern.rbf(input_dim, ARD=input_dim>1) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2)) likelihood = Gaussian(Y, normalize=normalize_Y) GP.__init__(self, X, likelihood, kernel, normalize_X=False) - self._set_params(self._get_params()) + self.ensure_default_constraints() def initialise_latent(self, init, input_dim, Y): if init == 'PCA': diff --git a/GPy/models/mrd.py b/GPy/models/mrd.py index 75c6fee9..1d521e5d 100644 --- a/GPy/models/mrd.py +++ b/GPy/models/mrd.py @@ -79,7 +79,7 @@ class MRD(Model): self.MQ = self.num_inducing * self.input_dim Model.__init__(self) - self._set_params(self._get_params()) + self.ensure_default_constraints() @property def X(self): diff --git a/GPy/models/sparse_gp_classification.py b/GPy/models/sparse_gp_classification.py index 9027ef07..9228fb89 100644 --- a/GPy/models/sparse_gp_classification.py +++ b/GPy/models/sparse_gp_classification.py @@ -44,4 +44,4 @@ class SparseGPClassification(SparseGP): assert Z.shape[1]==X.shape[1] SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/sparse_gp_regression.py b/GPy/models/sparse_gp_regression.py index 432d6e18..0dcef3e0 100644 --- a/GPy/models/sparse_gp_regression.py +++ b/GPy/models/sparse_gp_regression.py @@ -42,4 +42,4 @@ class SparseGPRegression(SparseGP): likelihood = likelihoods.Gaussian(Y, normalize=normalize_Y) SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X, X_variance=X_variance) - self._set_params(self._get_params()) + self.ensure_default_constraints() diff --git a/GPy/models/sparse_gplvm.py b/GPy/models/sparse_gplvm.py index ea2f8013..d6f4adb9 100644 --- a/GPy/models/sparse_gplvm.py +++ b/GPy/models/sparse_gplvm.py @@ -26,6 +26,7 @@ class SparseGPLVM(SparseGPRegression, GPLVM): def __init__(self, Y, input_dim, kernel=None, init='PCA', num_inducing=10): X = self.initialise_latent(init, input_dim, Y) SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing) + self.ensure_default_constraints() def _get_param_names(self): return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], []) diff --git a/GPy/testing/bgplvm_tests.py b/GPy/testing/bgplvm_tests.py index ff558f6d..6b91d999 100644 --- a/GPy/testing/bgplvm_tests.py +++ b/GPy/testing/bgplvm_tests.py @@ -16,7 +16,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -29,7 +28,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -42,7 +40,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -55,7 +52,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -69,7 +65,6 @@ class BGPLVMTests(unittest.TestCase): Y -= Y.mean(axis=0) k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/gplvm_tests.py b/GPy/testing/gplvm_tests.py index 8c2ba9fc..ebb5c4e5 100644 --- a/GPy/testing/gplvm_tests.py +++ b/GPy/testing/gplvm_tests.py @@ -14,7 +14,6 @@ class GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = GPy.models.GPLVM(Y, input_dim, kernel = k) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -26,7 +25,6 @@ class GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001) m = GPy.models.GPLVM(Y, input_dim, kernel = k) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -38,7 +36,6 @@ class GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) m = GPy.models.GPLVM(Y, input_dim, kernel = k) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/mrd_tests.py b/GPy/testing/mrd_tests.py index b0137709..40fcb86a 100644 --- a/GPy/testing/mrd_tests.py +++ b/GPy/testing/mrd_tests.py @@ -24,7 +24,6 @@ class MRDTests(unittest.TestCase): likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist] m = GPy.models.MRD(likelihood_list, input_dim=input_dim, kernels=k, num_inducing=num_inducing) - m.ensure_default_constraints() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/prior_tests.py b/GPy/testing/prior_tests.py index e0226751..c16057db 100644 --- a/GPy/testing/prior_tests.py +++ b/GPy/testing/prior_tests.py @@ -14,7 +14,6 @@ class PriorTests(unittest.TestCase): y += 0.05*np.random.randn(len(X)) X, y = X[:, None], y[:, None] m = GPy.models.GPRegression(X, y) - m.ensure_default_constraints() lognormal = GPy.priors.LogGaussian(1, 2) m.set_prior('rbf', lognormal) m.randomize() @@ -28,7 +27,6 @@ class PriorTests(unittest.TestCase): y += 0.05*np.random.randn(len(X)) X, y = X[:, None], y[:, None] m = GPy.models.GPRegression(X, y) - m.ensure_default_constraints() Gamma = GPy.priors.Gamma(1, 1) m.set_prior('rbf', Gamma) m.randomize() @@ -42,7 +40,6 @@ class PriorTests(unittest.TestCase): y += 0.05*np.random.randn(len(X)) X, y = X[:, None], y[:, None] m = GPy.models.GPRegression(X, y) - m.ensure_default_constraints() gaussian = GPy.priors.Gaussian(1, 1) success = False diff --git a/GPy/testing/psi_stat_gradient_tests.py b/GPy/testing/psi_stat_gradient_tests.py index c110d270..de670f41 100644 --- a/GPy/testing/psi_stat_gradient_tests.py +++ b/GPy/testing/psi_stat_gradient_tests.py @@ -113,7 +113,6 @@ if __name__ == "__main__": # Y -= Y.mean(axis=0) # k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) # m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) -# m.ensure_default_constraints() # m.randomize() # # self.assertTrue(m.checkgrad()) numpy.random.seed(0) @@ -146,7 +145,6 @@ if __name__ == "__main__": # num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim)) m3 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z, num_inducing=num_inducing, kernel=GPy.kern.linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim))) - m3.ensure_default_constraints() # + GPy.kern.bias(input_dim)) # m4 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z, # num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim)) diff --git a/GPy/testing/sparse_gplvm_tests.py b/GPy/testing/sparse_gplvm_tests.py index 6145f350..e27fccff 100644 --- a/GPy/testing/sparse_gplvm_tests.py +++ b/GPy/testing/sparse_gplvm_tests.py @@ -15,7 +15,6 @@ class sparse_GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001) m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -27,7 +26,6 @@ class sparse_GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001) m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) @@ -39,7 +37,6 @@ class sparse_GPLVMTests(unittest.TestCase): Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001) m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing) - m.ensure_default_constraints() m.randomize() self.assertTrue(m.checkgrad()) diff --git a/GPy/testing/unit_tests.py b/GPy/testing/unit_tests.py index 494ebf19..6e504a69 100644 --- a/GPy/testing/unit_tests.py +++ b/GPy/testing/unit_tests.py @@ -37,7 +37,6 @@ class GradientTests(unittest.TestCase): noise = GPy.kern.white(dimension) kern = kern + noise m = model_fit(X, Y, kernel=kern) - m.ensure_default_constraints() m.randomize() # contrain all parameters to be positive self.assertTrue(m.checkgrad()) @@ -150,7 +149,6 @@ class GradientTests(unittest.TestCase): K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T m = GPy.models.GPLVM(Y, input_dim, kernel=k) - m.ensure_default_constraints() self.assertTrue(m.checkgrad()) def test_GPLVM_rbf_linear_white_kern_2D(self): @@ -161,7 +159,6 @@ class GradientTests(unittest.TestCase): K = k.K(X) Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T m = GPy.models.GPLVM(Y, input_dim, init='PCA', kernel=k) - m.ensure_default_constraints() self.assertTrue(m.checkgrad()) def test_GP_EP_probit(self): @@ -195,7 +192,6 @@ class GradientTests(unittest.TestCase): k = GPy.kern.rbf(1) + GPy.kern.white(1) Y = np.hstack([np.ones(N/2),np.zeros(N/2)])[:,None] m = GPy.models.FITCClassification(X, Y=Y) - m.ensure_default_constraints() m.update_likelihood_approximation() self.assertTrue(m.checkgrad())