From b12438c0af22840cc4b0be99aa0690f3dcf2985f Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 16 Apr 2014 11:02:47 +0100 Subject: [PATCH 01/10] pseudo_EM is not available for the moment --- GPy/examples/classification.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/examples/classification.py b/GPy/examples/classification.py index 9190d3f3..2b0a201d 100644 --- a/GPy/examples/classification.py +++ b/GPy/examples/classification.py @@ -61,9 +61,9 @@ def toy_linear_1d_classification(seed=default_seed, optimize=True, plot=True): if optimize: #m.update_likelihood_approximation() # Parameters optimization: - #m.optimize() + m.optimize() #m.update_likelihood_approximation() - m.pseudo_EM() + #m.pseudo_EM() # Plot if plot: From 5d4e727521b48ca93bc9e31f44943eed4c985d49 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 16 Apr 2014 11:04:16 +0100 Subject: [PATCH 02/10] changes according to new definitions --- GPy/models/sparse_gp_classification.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/GPy/models/sparse_gp_classification.py b/GPy/models/sparse_gp_classification.py index 96f7ac5a..be6b36a8 100644 --- a/GPy/models/sparse_gp_classification.py +++ b/GPy/models/sparse_gp_classification.py @@ -7,6 +7,7 @@ from ..core import SparseGP from .. import likelihoods from .. import kern from ..likelihoods import likelihood +from ..inference.latent_function_inference import expectation_propagation_dtc class SparseGPClassification(SparseGP): """ @@ -26,16 +27,14 @@ class SparseGPClassification(SparseGP): """ - def __init__(self, X, Y=None, likelihood=None, kernel=None, normalize_X=False, normalize_Y=False, Z=None, num_inducing=10): - if kernel is None: - kernel = kern.rbf(X.shape[1])# + kern.white(X.shape[1],1e-3) + #def __init__(self, X, Y=None, likelihood=None, kernel=None, normalize_X=False, normalize_Y=False, Z=None, num_inducing=10): + def __init__(self, X, Y=None, likelihood=None, kernel=None, Z=None, num_inducing=10, Y_metadata=None): - if likelihood is None: - noise_model = likelihoods.binomial() - likelihood = likelihoods.EP(Y, noise_model) - elif Y is not None: - if not all(Y.flatten() == likelihood.data.flatten()): - raise Warning, 'likelihood.data and Y are different.' + + if kernel is None: + kernel = kern.RBF(X.shape[1]) + + likelihood = likelihoods.Bernoulli() if Z is None: i = np.random.permutation(X.shape[0])[:num_inducing] @@ -43,8 +42,9 @@ class SparseGPClassification(SparseGP): else: assert Z.shape[1] == X.shape[1] - SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X) - self.ensure_default_constraints() + SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method=expectation_propagation_dtc.EPDTC(), name='SparseGPClassification',Y_metadata=Y_metadata) + #def __init__(self, X, Y, Z, kernel, likelihood, inference_method=None, name='sparse gp', Y_metadata=None): + def _getstate(self): return SparseGP._getstate(self) @@ -52,5 +52,3 @@ class SparseGPClassification(SparseGP): def _setstate(self, state): return SparseGP._setstate(self, state) - - pass From a0ebc382dab2ee8e1ef27e2c33371be8b1c1aa70 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 16 Apr 2014 11:04:44 +0100 Subject: [PATCH 03/10] changes according to new definitions --- GPy/models/gp_classification.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/models/gp_classification.py b/GPy/models/gp_classification.py index 9d918cda..d2adc429 100644 --- a/GPy/models/gp_classification.py +++ b/GPy/models/gp_classification.py @@ -21,10 +21,10 @@ class GPClassification(GP): """ - def __init__(self, X, Y, kernel=None): + def __init__(self, X, Y, kernel=None,Y_metadata=None): if kernel is None: kernel = kern.RBF(X.shape[1]) likelihood = likelihoods.Bernoulli() - GP.__init__(self, X=X, Y=Y, kernel=kernel, likelihood=likelihood, name='gp_classification') + GP.__init__(self, X=X, Y=Y, kernel=kernel, likelihood=likelihood, name='GPClassification',Y_metadata=Y_metadata) From 6587f520043e288594037febc228b71dfeebfe9e Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 16 Apr 2014 11:05:34 +0100 Subject: [PATCH 04/10] minor change --- .../latent_function_inference/expectation_propagation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/GPy/inference/latent_function_inference/expectation_propagation.py b/GPy/inference/latent_function_inference/expectation_propagation.py index ff60d2e3..172f43fb 100644 --- a/GPy/inference/latent_function_inference/expectation_propagation.py +++ b/GPy/inference/latent_function_inference/expectation_propagation.py @@ -22,7 +22,7 @@ class EP(object): def reset(self): self.old_mutilde, self.old_vtilde = None, None - def inference(self, kern, X, likelihood, Y, Y_metadata=None): + def inference(self, kern, X, likelihood, Y, Y_metadata=None, Z=None): num_data, output_dim = X.shape assert output_dim ==1, "ep in 1D only (for now!)" From f88516308ed5b4bdc191e1396f1d10905d9c6853 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 16 Apr 2014 11:07:56 +0100 Subject: [PATCH 05/10] EPDTC added --- GPy/inference/latent_function_inference/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/GPy/inference/latent_function_inference/__init__.py b/GPy/inference/latent_function_inference/__init__.py index ee459a76..b28e46c4 100644 --- a/GPy/inference/latent_function_inference/__init__.py +++ b/GPy/inference/latent_function_inference/__init__.py @@ -29,6 +29,7 @@ from exact_gaussian_inference import ExactGaussianInference from laplace import Laplace from GPy.inference.latent_function_inference.var_dtc import VarDTC from expectation_propagation import EP +from expectation_propagation_dtc import EPDTC from dtc import DTC from fitc import FITC from var_dtc_parallel import VarDTC_minibatch From b8196a5408b7abed94802ee5e501fbea9dee19bb Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 16 Apr 2014 11:08:21 +0100 Subject: [PATCH 06/10] minor change --- GPy/models/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index d0988c9e..a253c63d 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -16,4 +16,3 @@ from gradient_checker import GradientChecker from ss_gplvm import SSGPLVM from gp_coregionalized_regression import GPCoregionalizedRegression from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression -#.py file not included!!! #from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression From 2e1a75aa39951fa8a0ca74e290bdebb88ebbc0f6 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Wed, 16 Apr 2014 11:08:38 +0100 Subject: [PATCH 07/10] new file --- .../expectation_propagation_dtc.py | 123 ++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 GPy/inference/latent_function_inference/expectation_propagation_dtc.py diff --git a/GPy/inference/latent_function_inference/expectation_propagation_dtc.py b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py new file mode 100644 index 00000000..3625a5bf --- /dev/null +++ b/GPy/inference/latent_function_inference/expectation_propagation_dtc.py @@ -0,0 +1,123 @@ +import numpy as np +from ...util.linalg import pdinv,jitchol,DSYR,tdot,dtrtrs, dpotrs +from expectation_propagation import EP +from posterior import Posterior +log_2_pi = np.log(2*np.pi) + +class EPDTC(EP): + #def __init__(self, epsilon=1e-6, eta=1., delta=1.): + + def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None): + num_data, output_dim = X.shape + assert output_dim ==1, "ep in 1D only (for now!)" + + Kmm = kern.K(Z) + Kmn = kern.K(Z,X) + + Lm = jitchol(Kmm) + Lmi = dtrtrs(Lm,np.eye(Lm.shape[0]))[0] + Kmmi = np.dot(Lmi.T,Lmi) + KmmiKmn = np.dot(Kmmi,Kmn) + K = np.dot(Kmn.T,KmmiKmn) + + + mu, Sigma, mu_tilde, tau_tilde, Z_hat = self.expectation_propagation(Kmm, Kmn, Y, likelihood, Y_metadata) + + Wi, LW, LWi, W_logdet = pdinv(K + np.diag(1./tau_tilde)) + + alpha, _ = dpotrs(LW, mu_tilde, lower=1) + + log_marginal = 0.5*(-num_data * log_2_pi - W_logdet - np.sum(alpha * mu_tilde)) # TODO: add log Z_hat?? + + dL_dK = 0.5 * (tdot(alpha[:,None]) - Wi) + + dL_dthetaL = np.zeros(likelihood.size)#TODO: derivatives of the likelihood parameters + + return Posterior(woodbury_inv=Wi, woodbury_vector=alpha, K=K), log_marginal, {'dL_dK':dL_dK, 'dL_dthetaL':dL_dthetaL} + + + + def expectation_propagation(self, Kmm, Kmn, Y, likelihood, Y_metadata): + + num_data, data_dim = Y.shape + assert data_dim == 1, "This EP methods only works for 1D outputs" + + KmnKnm = np.dot(Kmn,Kmn.T) + Lm = jitchol(Kmm) + Lmi = dtrtrs(Lm,np.eye(Lm.shape[0]))[0] #chol_inv(Lm) + Kmmi = np.dot(Lmi.T,Lmi) + KmmiKmn = np.dot(Kmmi,Kmn) + Qnn_diag = np.sum(Kmn*KmmiKmn,-2) + LLT0 = Kmm.copy() + + #Initial values - Posterior distribution parameters: q(f|X,Y) = N(f|mu,Sigma) + mu = np.zeros(num_data) + LLT = Kmm.copy() #Sigma = K.copy() + Sigma_diag = Qnn_diag.copy() + + #Initial values - Marginal moments + Z_hat = np.empty(num_data,dtype=np.float64) + mu_hat = np.empty(num_data,dtype=np.float64) + sigma2_hat = np.empty(num_data,dtype=np.float64) + + #initial values - Gaussian factors + if self.old_mutilde is None: + tau_tilde, mu_tilde, v_tilde = np.zeros((3, num_data)) + else: + assert old_mutilde.size == num_data, "data size mis-match: did you change the data? try resetting!" + mu_tilde, v_tilde = self.old_mutilde, self.old_vtilde + tau_tilde = v_tilde/mu_tilde + + #Approximation + tau_diff = self.epsilon + 1. + v_diff = self.epsilon + 1. + iterations = 0 + while (tau_diff > self.epsilon) or (v_diff > self.epsilon): + update_order = np.random.permutation(num_data) + for i in update_order: + #Cavity distribution parameters + tau_cav = 1./Sigma_diag[i] - self.eta*tau_tilde[i] + v_cav = mu[i]/Sigma_diag[i] - self.eta*v_tilde[i] + #Marginal moments + Z_hat[i], mu_hat[i], sigma2_hat[i] = likelihood.moments_match_ep(Y[i], tau_cav, v_cav)#, Y_metadata=None)#=(None if Y_metadata is None else Y_metadata[i])) + #Site parameters update + delta_tau = self.delta/self.eta*(1./sigma2_hat[i] - 1./Sigma_diag[i]) + delta_v = self.delta/self.eta*(mu_hat[i]/sigma2_hat[i] - mu[i]/Sigma_diag[i]) + tau_tilde[i] += delta_tau + v_tilde[i] += delta_v + #Posterior distribution parameters update + + #DSYR(Sigma, Sigma[:,i].copy(), -delta_tau/(1.+ delta_tau*Sigma[i,i])) + DSYR(LLT,Kmn[:,i].copy(),delta_tau) + L = jitchol(LLT) + + V,info = dtrtrs(L,Kmn,lower=1) + Sigma_diag = np.sum(V*V,-2) + si = np.sum(V.T*V[:,i],-1) + mu += (delta_v-delta_tau*mu[i])*si + #mu = np.dot(Sigma, v_tilde) + + #(re) compute Sigma and mu using full Cholesky decompy + LLT = LLT0 + np.dot(Kmn*tau_tilde[None,:],Kmn.T) + L = jitchol(LLT) + V,info = dtrtrs(L,Kmn,lower=1) + V2,info = dtrtrs(L.T,V,lower=0) + #Sigma_diag = np.sum(V*V,-2) + #Knmv_tilde = np.dot(Kmn,v_tilde) + #mu = np.dot(V2.T,Knmv_tilde) + Sigma = np.dot(V2.T,V2) + mu = np.dot(Sigma,v_tilde) + + #monitor convergence + if iterations>0: + tau_diff = np.mean(np.square(tau_tilde-tau_tilde_old)) + v_diff = np.mean(np.square(v_tilde-v_tilde_old)) + tau_tilde_old = tau_tilde.copy() + v_tilde_old = v_tilde.copy() + + tau_diff = 0 + v_diff = 0 + iterations += 1 + + mu_tilde = v_tilde/tau_tilde + return mu, Sigma, mu_tilde, tau_tilde, Z_hat From 8abc45c4caff22c7c3742af2108496c529c61b99 Mon Sep 17 00:00:00 2001 From: mzwiessele Date: Thu, 17 Apr 2014 15:01:43 +0100 Subject: [PATCH 08/10] bugfix: mixed up global and local index in unfixing --- GPy/core/parameterization/index_operations.py | 2 +- GPy/core/parameterization/parameter_core.py | 35 ++++++++++++------- GPy/kern/__init__.py | 4 +-- GPy/testing/index_operations_tests.py | 15 ++++++-- GPy/testing/parameterized_tests.py | 12 +++++++ 5 files changed, 49 insertions(+), 19 deletions(-) diff --git a/GPy/core/parameterization/index_operations.py b/GPy/core/parameterization/index_operations.py index 12b3a298..1f3ac934 100644 --- a/GPy/core/parameterization/index_operations.py +++ b/GPy/core/parameterization/index_operations.py @@ -184,7 +184,7 @@ class ParameterIndexOperationsView(object): def remove(self, prop, indices): removed = self._param_index_ops.remove(prop, numpy.array(indices)+self._offset) if removed.size > 0: - return removed - self._size + 1 + return removed-self._offset return removed diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index b513ba44..68140763 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -312,7 +312,8 @@ class Indexable(object): This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param. """ - raise NotImplementedError, "shouldnt happen, offset required from non parameterization object?" + return 0 + #raise NotImplementedError, "shouldnt happen, offset required from non parameterization object?" def _raveled_index_for(self, param): """ @@ -320,7 +321,8 @@ class Indexable(object): that is an int array, containing the indexes for the flattened param inside this parameterized logic. """ - raise NotImplementedError, "shouldnt happen, raveld index transformation required from non parameterization object?" + return param._raveled_index() + #raise NotImplementedError, "shouldnt happen, raveld index transformation required from non parameterization object?" class Constrainable(Nameable, Indexable, Observable): @@ -368,10 +370,10 @@ class Constrainable(Nameable, Indexable, Observable): if value is not None: self[:] = value reconstrained = self.unconstrain() - self._add_to_index_operations(self.constraints, reconstrained, __fixed__, warning) - rav_i = self._highest_parent_._raveled_index_for(self) - self._highest_parent_._set_fixed(rav_i) + index = self._add_to_index_operations(self.constraints, reconstrained, __fixed__, warning) + self._highest_parent_._set_fixed(self, index) self.notify_observers(self, None if trigger_parent else -np.inf) + return index fix = constrain_fixed def unconstrain_fixed(self): @@ -379,7 +381,8 @@ class Constrainable(Nameable, Indexable, Observable): This parameter will no longer be fixed. """ unconstrained = self.unconstrain(__fixed__) - self._highest_parent_._set_unfixed(unconstrained) + self._highest_parent_._set_unfixed(self, unconstrained) + return unconstrained unfix = unconstrain_fixed def _ensure_fixes(self): @@ -388,14 +391,16 @@ class Constrainable(Nameable, Indexable, Observable): # Param: ones(self._realsize_ if not self._has_fixes(): self._fixes_ = np.ones(self.size, dtype=bool) - def _set_fixed(self, index): + def _set_fixed(self, param, index): self._ensure_fixes() - self._fixes_[index] = FIXED + offset = self._offset_for(param) + self._fixes_[index+offset] = FIXED if np.all(self._fixes_): self._fixes_ = None # ==UNFIXED - def _set_unfixed(self, index): + def _set_unfixed(self, param, index): self._ensure_fixes() - self._fixes_[index] = UNFIXED + offset = self._offset_for(param) + self._fixes_[index+offset] = UNFIXED if np.all(self._fixes_): self._fixes_ = None # ==UNFIXED def _connect_fixes(self): @@ -469,8 +474,9 @@ class Constrainable(Nameable, Indexable, Observable): """ self.param_array[...] = transform.initialize(self.param_array) reconstrained = self.unconstrain() - self._add_to_index_operations(self.constraints, reconstrained, transform, warning) + added = self._add_to_index_operations(self.constraints, reconstrained, transform, warning) self.notify_observers(self, None if trigger_parent else -np.inf) + return added def unconstrain(self, *transforms): """ @@ -549,7 +555,9 @@ class Constrainable(Nameable, Indexable, Observable): if warning and reconstrained.size > 0: # TODO: figure out which parameters have changed and only print those print "WARNING: reconstraining parameters {}".format(self.parameter_names() or self.name) - which.add(what, self._raveled_index()) + index = self._raveled_index() + which.add(what, index) + return index def _remove_from_index_operations(self, which, transforms): """ @@ -561,9 +569,10 @@ class Constrainable(Nameable, Indexable, Observable): removed = np.empty((0,), dtype=int) for t in transforms: unconstrained = which.remove(t, self._raveled_index()) + print unconstrained removed = np.union1d(removed, unconstrained) if t is __fixed__: - self._highest_parent_._set_unfixed(unconstrained) + self._highest_parent_._set_unfixed(self, unconstrained) return removed diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index ef99e9a6..14378f55 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -20,6 +20,6 @@ except ImportError: if sympy_available: from _src.symbolic import Symbolic - from _src.heat_eqinit import Heat_eqinit - from _src.ode1_eq_lfm import Ode1_eq_lfm + #from _src.heat_eqinit import Heat_eqinit + #from _src.ode1_eq_lfm import Ode1_eq_lfm diff --git a/GPy/testing/index_operations_tests.py b/GPy/testing/index_operations_tests.py index 37cec10b..49cc844a 100644 --- a/GPy/testing/index_operations_tests.py +++ b/GPy/testing/index_operations_tests.py @@ -24,12 +24,14 @@ class Test(unittest.TestCase): self.assertDictEqual(self.param_index._properties, {}) def test_remove(self): - self.param_index.remove(three, np.r_[3:10]) + removed = self.param_index.remove(three, np.r_[3:10]) + self.assertListEqual(removed.tolist(), [4, 7]) self.assertListEqual(self.param_index[three].tolist(), [2]) - self.param_index.remove(one, [1]) + removed = self.param_index.remove(one, [1]) + self.assertListEqual(removed.tolist(), []) self.assertListEqual(self.param_index[one].tolist(), [3]) self.assertListEqual(self.param_index.remove('not in there', []).tolist(), []) - self.param_index.remove(one, [9]) + removed = self.param_index.remove(one, [9]) self.assertListEqual(self.param_index[one].tolist(), [3]) self.assertListEqual(self.param_index.remove('not in there', [2,3,4]).tolist(), []) @@ -78,6 +80,13 @@ class Test(unittest.TestCase): self.assertEqual(i, i2) self.assertTrue(np.all(v == v2)) + def test_indexview_remove(self): + removed = self.view.remove(two, [3]) + self.assertListEqual(removed.tolist(), [3]) + removed = self.view.remove(three, np.r_[:5]) + self.assertListEqual(removed.tolist(), [0, 2]) + + def test_misc(self): for k,v in self.param_index.copy()._properties.iteritems(): self.assertListEqual(self.param_index[k].tolist(), v.tolist()) diff --git a/GPy/testing/parameterized_tests.py b/GPy/testing/parameterized_tests.py index 57669e93..fbdedc61 100644 --- a/GPy/testing/parameterized_tests.py +++ b/GPy/testing/parameterized_tests.py @@ -153,6 +153,18 @@ class ParameterizedTest(unittest.TestCase): self.testmodel.randomize() np.testing.assert_equal(variances, self.testmodel['.*var'].values()) + def test_fix_unfix(self): + fixed = self.testmodel.kern.lengthscale.fix() + self.assertListEqual(fixed.tolist(), [0]) + unfixed = self.testmodel.kern.lengthscale.unfix() + self.testmodel.kern.lengthscale.constrain_positive() + self.assertListEqual(unfixed.tolist(), [0]) + + fixed = self.testmodel.kern.fix() + self.assertListEqual(fixed.tolist(), [0,1]) + unfixed = self.testmodel.kern.unfix() + self.assertListEqual(unfixed.tolist(), [0,1]) + def test_printing(self): print self.test1 print self.param From 10800c3c57608305dae1890f0c222c2878b3ab34 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Thu, 17 Apr 2014 15:27:53 +0100 Subject: [PATCH 09/10] New file, special request. --- GPy/models/__init__.py | 1 + GPy/models/gp_heteroscedastic_regression.py | 39 +++++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 GPy/models/gp_heteroscedastic_regression.py diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index a253c63d..299d5e65 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -16,3 +16,4 @@ from gradient_checker import GradientChecker from ss_gplvm import SSGPLVM from gp_coregionalized_regression import GPCoregionalizedRegression from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression +from gp_heteroscedastic_regression import GPHeteroscedasticRegression diff --git a/GPy/models/gp_heteroscedastic_regression.py b/GPy/models/gp_heteroscedastic_regression.py new file mode 100644 index 00000000..5a05fc98 --- /dev/null +++ b/GPy/models/gp_heteroscedastic_regression.py @@ -0,0 +1,39 @@ +# Copyright (c) 2012 - 2014 the GPy Austhors (see AUTHORS.txt) +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import numpy as np +from ..core import GP +from .. import likelihoods +from .. import kern +from .. import util + +class GPHeteroscedasticRegression(GP): + """ + Gaussian Process model for heteroscedastic regression + + This is a thin wrapper around the models.GP class, with a set of sensible defaults + + :param X: input observations + :param Y: observed values + :param kernel: a GPy kernel, defaults to rbf + """ + def __init__(self, X, Y, kernel=None, Y_metadata=None): + + Ny = Y.shape[0] + + if Y_metadata is None: + Y_metadata = {'output_index':np.arange(Ny)[:,None]} + else: + assert Y_metadata['output_index'].shape[0] == Ny + + if kernel is None: + kernel = kern.RBF(X.shape[1]) + + #Likelihood + likelihoods_list = [likelihoods.Gaussian(name="Gaussian_noise_%s" %j) for j in range(Ny)] + likelihood = likelihoods.MixedNoise(likelihoods_list=likelihoods_list) + + super(GPHeteroscedasticRegression, self).__init__(X,Y,kernel,likelihood, Y_metadata=Y_metadata) + + def plot(self,*args): + return NotImplementedError From ac4103a537b7bf5d5901cf10512e68b92bff7d10 Mon Sep 17 00:00:00 2001 From: Ricardo Date: Thu, 17 Apr 2014 15:51:47 +0100 Subject: [PATCH 10/10] new test heteroscedastic noise model --- GPy/testing/model_tests.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/GPy/testing/model_tests.py b/GPy/testing/model_tests.py index 6f80f418..0eb45096 100644 --- a/GPy/testing/model_tests.py +++ b/GPy/testing/model_tests.py @@ -401,6 +401,16 @@ class GradientTests(np.testing.TestCase): m.constrain_fixed('.*rbf_var', 1.) self.assertTrue(m.checkgrad()) + def test_gp_heteroscedastic_regression(self): + num_obs = 25 + X = np.random.randint(0,140,num_obs) + X = X[:,None] + Y = 25. + np.sin(X/20.) * 2. + np.random.rand(num_obs)[:,None] + kern = GPy.kern.Bias(1) + GPy.kern.RBF(1) + m = GPy.models.GPHeteroscedasticRegression(X,Y,kern) + self.assertTrue(m.checkgrad()) + + if __name__ == "__main__": print "Running unit tests, please be (very) patient..." unittest.main()