Merge branch 'master' of github.com:SheffieldML/GPy

This commit is contained in:
James Hensman 2013-03-11 18:56:43 +00:00
commit 9b8c4eae25
8 changed files with 90 additions and 42 deletions

View file

@ -194,7 +194,7 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000
# Remove the mean (no bias kernel to ensure signal/noise is in RBF/white)
data['Y'] = data['Y'] - np.mean(data['Y'])
lls = GPy.examples.regression.contour_data(data, length_scales, log_SNRs, GPy.kern.rbf)
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.rbf)
pb.contour(length_scales, log_SNRs, np.exp(lls), 20)
ax = pb.gca()
pb.xlabel('length scale')
@ -229,7 +229,7 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000
ax.set_ylim(ylim)
return (models, lls)
def contour_data(data, length_scales, log_SNRs, signal_kernel_call=GPy.kern.rbf):
def _contour_data(data, length_scales, log_SNRs, signal_kernel_call=GPy.kern.rbf):
"""Evaluate the GP objective function for a given data set for a range of signal to noise ratios and a range of lengthscales.
:data_set: A data set from the utils.datasets director.

View file

@ -6,14 +6,14 @@
Code of Tutorials
"""
import pylab as pb
pb.ion()
import numpy as np
import GPy
def tuto_GP_regression():
"""The detailed explanations of the commands used in this file can be found in the tutorial section"""
import pylab as pb
pb.ion()
import numpy as np
import GPy
X = np.random.uniform(-3.,3.,(20,1))
Y = np.sin(X) + np.random.randn(20,1)*0.05
@ -39,11 +39,6 @@ def tuto_GP_regression():
# 2-dimensional example #
###########################
import pylab as pb
pb.ion()
import numpy as np
import GPy
# sample inputs and outputs
X = np.random.uniform(-3.,3.,(50,2))
Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(50,1)*0.05
@ -67,9 +62,6 @@ def tuto_GP_regression():
def tuto_kernel_overview():
"""The detailed explanations of the commands used in this file can be found in the tutorial section"""
import pylab as pb
import numpy as np
import GPy
pb.ion()
ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(D=1, variance=1., lengthscale=1.)

View file

@ -12,7 +12,7 @@ class rbf(kernpart):
.. math::
k(r) = \sigma^2 \exp(- \frac{1}{2}r^2) \ \ \ \ \ \\text{ where } r^2 = \sum_{i=1}^d \frac{ (x_i-x^\prime_i)^2}{\ell_i^2}}
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) \ \ \ \ \ \\text{ where } r^2 = \sum_{i=1}^d \\frac{ (x_i-x^\prime_i)^2}{\ell_i^2}
where \ell_i is the lengthscale, \sigma^2 the variance and d the dimensionality of the input.
@ -55,7 +55,6 @@ class rbf(kernpart):
self._X, self._X2, self._params = np.empty(shape=(3,1))
def _get_params(self):
foo
return np.hstack((self.variance,self.lengthscale))
def _set_params(self,x):

View file

@ -83,3 +83,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
def _log_likelihood_gradients(self):
return np.hstack((self.dL_dmuS().flatten(), sparse_GP._log_likelihood_gradients(self)))
def plot_latent(self, *args, **kwargs):
input_1, input_2 = GPLVM.plot_latent(*args, **kwargs)
pb.plot(m.Z[:, input_1], m.Z[:, input_2], '^w')

View file

@ -117,6 +117,4 @@ class GPLVM(GP):
pb.xlim(xmin[0],xmax[0])
pb.ylim(xmin[1],xmax[1])
return input_1, input_2

View file

@ -55,3 +55,7 @@ class sparse_GPLVM(sparse_GP_regression, GPLVM):
#passing Z without a small amout of jitter will induce the white kernel where we don;t want it!
mu, var, upper, lower = sparse_GP_regression.predict(self, self.Z+np.random.randn(*self.Z.shape)*0.0001)
pb.plot(mu[:, 0] , mu[:, 1], 'ko')
def plot_latent(self, *args, **kwargs):
input_1, input_2 = GPLVM.plot_latent(*args, **kwargs)
pb.plot(m.Z[:, input_1], m.Z[:, input_2], '^w')

View file

@ -4,22 +4,73 @@
import unittest
import numpy as np
import GPy
import inspect
import pkgutil
import os
import random
class ExamplesTests(unittest.TestCase):
def test_check_model_returned(self):
pass
def _checkgrad(self, model):
self.assertTrue(model.checkgrad())
def test_model_checkgrads(self):
pass
def _model_instance(self, model):
self.assertTrue(isinstance(model, GPy.models))
def test_all_examples(self):
pass
#Load models
"""
def model_instance_generator(model):
def check_model_returned(self):
self._model_instance(model)
return check_model_returned
#Loop through models
#for model in models:
#self.assertTrue(m.checkgrad())
def checkgrads_generator(model):
def model_checkgrads(self):
self._checkgrad(model)
return model_checkgrads
"""
def model_checkgrads(model):
model.randomize()
assert model.checkgrad()
def model_instance(model):
assert isinstance(model, GPy.core.model)
def test_models():
examples_path = os.path.dirname(GPy.examples.__file__)
#Load modules
for loader, module_name, is_pkg in pkgutil.iter_modules([examples_path]):
#Load examples
module_examples = loader.find_module(module_name).load_module(module_name)
print "MODULE", module_examples
print "Before"
print inspect.getmembers(module_examples, predicate=inspect.isfunction)
functions = [ func for func in inspect.getmembers(module_examples, predicate=inspect.isfunction) if func[0].startswith('_') is False ][::-1]
print "After"
print functions
for example in functions:
print "Testing example: ", example[0]
#Generate model
model = example[1]()
print model
#Create tests for instance check
"""
test = model_instance_generator(model)
test.__name__ = 'test_instance_%s' % example[0]
setattr(ExamplesTests, test.__name__, test)
#Create tests for checkgrads check
test = checkgrads_generator(model)
test.__name__ = 'test_checkgrads_%s' % example[0]
setattr(ExamplesTests, test.__name__, test)
"""
model_checkgrads.description = 'test_checkgrads_%s' % example[0]
yield model_checkgrads, model
model_instance.description = 'test_instance_%s' % example[0]
yield model_instance, model
if __name__ == "__main__":
print "Running unit tests, please be (very) patient..."

View file

@ -22,7 +22,7 @@ We advise the reader to start with copy-pasting an existing kernel and to modify
**Header**
The header is similar to all kernels::
The header is similar to all kernels: ::
from kernpart import kernpart
import numpy as np
@ -35,7 +35,7 @@ The implementation of this function in mandatory.
For all kernparts the first parameter ``D`` corresponds to the dimension of the input space, and the following parameters stand for the parameterization of the kernel.
The following attributes are compulsory: ``self.D`` (the dimension, integer), ``self.name`` (name of the kernel, string), ``self.Nparam`` (number of parameters, integer).::
The following attributes are compulsory: ``self.D`` (the dimension, integer), ``self.name`` (name of the kernel, string), ``self.Nparam`` (number of parameters, integer). ::
def __init__(self,D,variance=1.,lengthscale=1.,power=1.):
assert D == 1, "For this kernel we assume D=1"
@ -50,7 +50,7 @@ The following attributes are compulsory: ``self.D`` (the dimension, integer), ``
The implementation of this function in mandatory.
This function returns a one dimensional array of length ``self.Nparam`` containing the value of the parameters.::
This function returns a one dimensional array of length ``self.Nparam`` containing the value of the parameters. ::
def _get_params(self):
return np.hstack((self.variance,self.lengthscale,self.power))
@ -59,7 +59,7 @@ This function returns a one dimensional array of length ``self.Nparam`` containi
The implementation of this function in mandatory.
The input is a one dimensional array of length ``self.Nparam`` containing the value of the parameters. The function has no output but it updates the values of the attribute associated to the parameters (such as ``self.variance``, ``self.lengthscale``, ...).::
The input is a one dimensional array of length ``self.Nparam`` containing the value of the parameters. The function has no output but it updates the values of the attribute associated to the parameters (such as ``self.variance``, ``self.lengthscale``, ...). ::
def _set_params(self,x):
self.variance = x[0]
@ -70,7 +70,7 @@ The input is a one dimensional array of length ``self.Nparam`` containing the va
The implementation of this function in mandatory.
It returns a list of strings of length ``self.Nparam`` corresponding to the parameter names.::
It returns a list of strings of length ``self.Nparam`` corresponding to the parameter names. ::
def _get_param_names(self):
return ['variance','lengthscale','power']
@ -79,7 +79,7 @@ It returns a list of strings of length ``self.Nparam`` corresponding to the para
The implementation of this function in mandatory.
This function is used to compute the covariance matrix associated with the inputs X, X2 (np.arrays with arbitrary number of line (say :math:`n_1`, :math:`n_2`) and ``self.D`` columns). This function does not returns anything but it adds the :math:`n_1 \times n_2` covariance matrix to the kernpart to the object ``target`` (a :math:`n_1 \times n_2` np.array). This trick allows to compute the covariance matrix of a kernel containing many kernparts with a limited memory use.::
This function is used to compute the covariance matrix associated with the inputs X, X2 (np.arrays with arbitrary number of line (say :math:`n_1`, :math:`n_2`) and ``self.D`` columns). This function does not returns anything but it adds the :math:`n_1 \times n_2` covariance matrix to the kernpart to the object ``target`` (a :math:`n_1 \times n_2` np.array). This trick allows to compute the covariance matrix of a kernel containing many kernparts with a limited memory use. ::
def K(self,X,X2,target):
if X2 is None: X2 = X
@ -90,7 +90,7 @@ This function is used to compute the covariance matrix associated with the input
The implementation of this function in mandatory.
This function is similar to ``K`` but it computes only the values of the kernel on the diagonal. Thus, ``target`` is a 1-dimensional np.array of length :math:`n_1`.::
This function is similar to ``K`` but it computes only the values of the kernel on the diagonal. Thus, ``target`` is a 1-dimensional np.array of length :math:`n_1`. ::
def Kdiag(self,X,target):
target += self.variance
@ -100,7 +100,7 @@ This function is similar to ``K`` but it computes only the values of the kernel
This function is required for the optimization of the parameters.
Computes the derivative of the likelihood. As previously, the values are added to the object target which is a 1-dimensional np.array of length ``self.Nparam``. For example, if the kernel is parameterized by :math:`\sigma^2,\ \theta`, then :math:`\frac{dL}{d\sigma^2} = \frac{dL}{d K} \frac{dK}{d\sigma^2}` is added to the first element of target and :math:`\frac{dL}{d\theta} = \frac{dL}{d K} \frac{dK}{d\theta}` to the second.::
Computes the derivative of the likelihood. As previously, the values are added to the object target which is a 1-dimensional np.array of length ``self.Nparam``. For example, if the kernel is parameterized by :math:`\sigma^2,\ \theta`, then :math:`\frac{dL}{d\sigma^2} = \frac{dL}{d K} \frac{dK}{d\sigma^2}` is added to the first element of target and :math:`\frac{dL}{d\theta} = \frac{dL}{d K} \frac{dK}{d\theta}` to the second. ::
def dK_dtheta(self,dL_dK,X,X2,target):
if X2 is None: X2 = X
@ -119,7 +119,7 @@ Computes the derivative of the likelihood. As previously, the values are added t
This function is required for BGPLVM, sparse models and uncertain inputs.
As previously, target is an ``self.Nparam`` array and :math:`\frac{dL}{d Kdiag} \frac{dKdiag}{dparam}` is added to each element.::
As previously, target is an ``self.Nparam`` array and :math:`\frac{dL}{d Kdiag} \frac{dKdiag}{dparam}` is added to each element. ::
def dKdiag_dtheta(self,dL_dKdiag,X,target):
target[0] += np.sum(dL_dKdiag)
@ -129,7 +129,7 @@ As previously, target is an ``self.Nparam`` array and :math:`\frac{dL}{d Kdiag}
This function is required for GPLVM, BGPLVM, sparse models and uncertain inputs.
Computes the derivative of the likelihood with respect to the inputs ``X`` (a :math:`n \times D` np.array). The result is added to target which is a :math:`n \times D` np.array.::
Computes the derivative of the likelihood with respect to the inputs ``X`` (a :math:`n \times D` np.array). The result is added to target which is a :math:`n \times D` np.array. ::
def dK_dX(self,dL_dK,X,X2,target):
"""derivative of the covariance matrix with respect to X."""
@ -141,7 +141,7 @@ Computes the derivative of the likelihood with respect to the inputs ``X`` (a :m
**dKdiag_dX(self,dL_dKdiag,X,target)**
This function is required for BGPLVM, sparse models and uncertain inputs. As for ``dKdiag_dtheta``, :math:`\frac{dL}{d Kdiag} \frac{dKdiag}{dX}` is added to each element of target.::
This function is required for BGPLVM, sparse models and uncertain inputs. As for ``dKdiag_dtheta``, :math:`\frac{dL}{d Kdiag} \frac{dKdiag}{dX}` is added to each element of target. ::
def dKdiag_dX(self,dL_dKdiag,X,target):
pass
@ -167,7 +167,7 @@ The following line should be added in the preamble of the file::
from rational_quadratic import rational_quadratic as rational_quadratic_part
as well as the following block::
as well as the following block ::
def rational_quadratic(D,variance=1., lengthscale=1., power=1.):
part = rational_quadraticpart(D,variance, lengthscale, power)