diff --git a/GPy/inference/latent_function_inference/var_dtc.py b/GPy/inference/latent_function_inference/var_dtc.py index 6599ac66..d61e7f0f 100644 --- a/GPy/inference/latent_function_inference/var_dtc.py +++ b/GPy/inference/latent_function_inference/var_dtc.py @@ -76,12 +76,14 @@ class VarDTC(LatentFunctionInference): # VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency! #self.YYTfactor = self.get_YYTfactor(Y) #VVT_factor = self.get_VVTfactor(self.YYTfactor, beta) + het_noise = beta.size > 1 + if beta.ndim == 1: + beta = beta[:, None] VVT_factor = beta*Y #VVT_factor = beta*Y trYYT = self.get_trYYT(Y) # do the inference: - het_noise = beta.size > 1 num_inducing = Z.shape[0] num_data = Y.shape[0] # kernel computations, using BGPLVM notation diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index fc78c55e..521baeb3 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -377,17 +377,14 @@ class GradientTests(np.testing.TestCase): m = GPy.models.GPLVM(Y, input_dim, init='PCA', kernel=k) self.assertTrue(m.checkgrad()) - @unittest.expectedFailure def test_GP_EP_probit(self): N = 20 X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None] Y = np.hstack([np.ones(N / 2), np.zeros(N / 2)])[:, None] kernel = GPy.kern.RBF(1) m = GPy.models.GPClassification(X, Y, kernel=kernel) - m.update_likelihood_approximation() self.assertTrue(m.checkgrad()) - @unittest.expectedFailure def test_sparse_EP_DTC_probit(self): N = 20 X = np.hstack([np.random.normal(5, 2, N / 2), np.random.normal(10, 2, N / 2)])[:, None] @@ -399,7 +396,6 @@ class GradientTests(np.testing.TestCase): # likelihood = GPy.likelihoods.EP(Y, distribution) # m = GPy.core.SparseGP(X, likelihood, kernel, Z) # m.ensure_default_constraints() - m.update_likelihood_approximation() self.assertTrue(m.checkgrad()) @unittest.expectedFailure @@ -412,7 +408,8 @@ class GradientTests(np.testing.TestCase): m.update_likelihood_approximation() self.assertTrue(m.checkgrad()) - def multioutput_regression_1D(self): + @unittest.expectedFailure + def test_multioutput_regression_1D(self): X1 = np.random.rand(50, 1) * 8 X2 = np.random.rand(30, 1) * 5 X = np.vstack((X1, X2)) @@ -422,10 +419,12 @@ class GradientTests(np.testing.TestCase): k1 = GPy.kern.RBF(1) m = GPy.models.GPMultioutputRegression(X_list=[X1, X2], Y_list=[Y1, Y2], kernel_list=[k1]) + import ipdb;ipdb.set_trace() m.constrain_fixed('.*rbf_var', 1.) self.assertTrue(m.checkgrad()) - def multioutput_sparse_regression_1D(self): + @unittest.expectedFailure + def test_multioutput_sparse_regression_1D(self): X1 = np.random.rand(500, 1) * 8 X2 = np.random.rand(300, 1) * 5 X = np.vstack((X1, X2)) @@ -447,6 +446,21 @@ class GradientTests(np.testing.TestCase): m = GPy.models.GPHeteroscedasticRegression(X, Y, kern) self.assertTrue(m.checkgrad()) + def test_sparse_gp_heteroscedastic_regression(self): + num_obs = 25 + X = np.random.randint(0, 140, num_obs) + X = X[:, None] + Y = 25. + np.sin(X / 20.) * 2. + np.random.rand(num_obs)[:, None] + kern = GPy.kern.Bias(1) + GPy.kern.RBF(1) + Y_metadata = {'output_index':np.arange(num_obs)[:,None]} + noise_terms = np.unique(Y_metadata['output_index'].flatten()) + likelihoods_list = [GPy.likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for j in noise_terms] + likelihood = GPy.likelihoods.MixedNoise(likelihoods_list=likelihoods_list) + m = GPy.core.SparseGP(X, Y, X[np.random.choice(num_obs, 10)], + kern, likelihood, + GPy.inference.latent_function_inference.VarDTC(), + Y_metadata=Y_metadata) + self.assertTrue(m.checkgrad()) def test_gp_kronecker_gaussian(self): N1, N2 = 30, 20