diff --git a/GPy/examples/uncertain_input_GP_regression_demo.py b/GPy/examples/uncertain_input_GP_regression_demo.py index 3d2c51f0..f0be5fe2 100644 --- a/GPy/examples/uncertain_input_GP_regression_demo.py +++ b/GPy/examples/uncertain_input_GP_regression_demo.py @@ -1,7 +1,6 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) - import pylab as pb import numpy as np import GPy @@ -9,18 +8,15 @@ pb.ion() pb.close('all') -###################################### -## 1 dimensional example - # sample inputs and outputs S = np.ones((20,1)) X = np.random.uniform(-3.,3.,(20,1)) Y = np.sin(X)+np.random.randn(20,1)*0.05 -k = GPy.kern.bias(1) + GPy.kern.white(1) +k = GPy.kern.rbf(1) + GPy.kern.white(1) # create simple GP model -m = GPy.models.uncertain_input_GP_regression(X,Y,S,kernel=k) +m = GPy.models.sparse_GP_regression(X,Y,X_uncertainty=S,kernel=k) # contrain all parameters to be positive m.constrain_positive('(variance|prec)') diff --git a/GPy/models/sparse_GPLVM.py b/GPy/models/sparse_GPLVM.py index 96c3e530..c5125c85 100644 --- a/GPy/models/sparse_GPLVM.py +++ b/GPy/models/sparse_GPLVM.py @@ -54,5 +54,6 @@ class sparse_GPLVM(sparse_GP_regression, GPLVM): def plot(self): GPLVM.plot(self) + #passing Z without a small amout of jitter will induce the white kernel where we don;t want it! mu, var = sparse_GP_regression.predict(self, self.Z+np.random.randn(*self.Z.shape)*0.0001) pb.plot(mu[:, 0] , mu[:, 1], 'ko') diff --git a/GPy/models/sparse_GP_regression.py b/GPy/models/sparse_GP_regression.py index da19d80a..f34819dc 100644 --- a/GPy/models/sparse_GP_regression.py +++ b/GPy/models/sparse_GP_regression.py @@ -1,7 +1,6 @@ # Copyright (c) 2012, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) - import numpy as np import pylab as pb from ..util.linalg import mdot, jitchol, chol_inv, pdinv @@ -50,7 +49,7 @@ class sparse_GP_regression(GP_regression): self.has_uncertain_inputs=False else: assert X_uncertainty.shape==X.shape - self.has_uncertain_inputs=False + self.has_uncertain_inputs=True self.X_uncertainty = X_uncertainty GP_regression.__init__(self, X, Y, kernel=kernel, normalize_X=normalize_X, normalize_Y=normalize_Y) @@ -99,8 +98,7 @@ class sparse_GP_regression(GP_regression): # Compute dL_dpsi self.dL_dpsi0 = - 0.5 * self.D * self.beta * np.ones(self.N) - dC_dpsi1 = (self.LLambdai.T[:,:, None, None] * self.V) - self.dL_dpsi1 = (dC_dpsi1*self.C[None,:,None,:]).sum(1).sum(-1) + self.dL_dpsi1 = (self.LLambdai.T[:,:,None,None]*self.V*self.C[None,:,None,:]).sum(1).sum(-1) self.dL_dpsi2 = - 0.5 * self.beta * (self.D*(self.LBL_inv - self.Kmmi) + self.G) # Compute dL_dKmm