coregionalisation changed to coregionalization

This commit is contained in:
Ricardo 2013-09-14 17:23:17 +01:00
parent 1bc9374717
commit 4bb2ea9606
9 changed files with 24 additions and 79 deletions

View file

@ -9,9 +9,9 @@ import pylab as pb
import numpy as np
import GPy
def coregionalisation_toy2(max_iters=100):
def coregionalization_toy2(max_iters=100):
"""
A simple demonstration of coregionalisation on two sinusoidal functions.
A simple demonstration of coregionalization on two sinusoidal functions.
"""
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
@ -22,7 +22,7 @@ def coregionalisation_toy2(max_iters=100):
Y = np.vstack((Y1, Y2))
k1 = GPy.kern.rbf(1) + GPy.kern.bias(1)
k2 = GPy.kern.coregionalise(2,1)
k2 = GPy.kern.coregionalize(2,1)
k = k1**k2 #k = k1.prod(k2,tensor=True)
m = GPy.models.GPRegression(X, Y, kernel=k)
m.constrain_fixed('.*rbf_var', 1.)
@ -40,9 +40,9 @@ def coregionalisation_toy2(max_iters=100):
pb.plot(X2[:, 0], Y2[:, 0], 'gx', mew=2)
return m
def coregionalisation_toy(max_iters=100):
def coregionalization_toy(max_iters=100):
"""
A simple demonstration of coregionalisation on two sinusoidal functions.
A simple demonstration of coregionalization on two sinusoidal functions.
"""
X1 = np.random.rand(50, 1) * 8
X2 = np.random.rand(30, 1) * 5
@ -63,9 +63,9 @@ def coregionalisation_toy(max_iters=100):
axes[1].set_title('Output 1')
return m
def coregionalisation_sparse(max_iters=100):
def coregionalization_sparse(max_iters=100):
"""
A simple demonstration of coregionalisation on two sinusoidal functions using sparse approximations.
A simple demonstration of coregionalization on two sinusoidal functions using sparse approximations.
"""
X1 = np.random.rand(500, 1) * 8
X2 = np.random.rand(300, 1) * 5
@ -76,19 +76,14 @@ def coregionalisation_sparse(max_iters=100):
Y = np.vstack((Y1, Y2))
num_inducing = 40
Z = np.hstack((np.random.rand(num_inducing, 1) * 8, np.random.randint(0, 2, num_inducing)[:, None]))
Z = np.hstack((np.random.rand(num_inducing, 1) * 8, np.random.randint(0, 2, num_inducing)[:, None]))
#Z = np.hstack((np.random.rand(num_inducing, 1) * 8, np.random.randint(0, 2, num_inducing)[:, None]))
k1 = GPy.kern.rbf(1)
m = GPy.models.SparseGPMultioutputRegression(X_list=[X1,X2],Y_list=[Y1,Y2],kernel_list=[k1],num_inducing=20)
#k2 = GPy.kern.coregionalise(2, 2)
#k = k1**k2 #.prod(k2, tensor=True) # + GPy.kern.white(2,0.001)
#m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
m.constrain_fixed('.*rbf_var', 1.)
#m.constrain_fixed('iip')
#m.constrain_bounded('noise_variance', 1e-3, 1e-1)
m.constrain_bounded('noise_variance', 1e-3, 1e-1)
# m.optimize_restarts(5, robust=True, messages=1, max_iters=max_iters, optimizer='bfgs')
m.optimize(max_iters=max_iters)
@ -97,19 +92,6 @@ def coregionalisation_sparse(max_iters=100):
m.plot(output=1,ax=axes[1])
axes[0].set_title('Output 0')
axes[1].set_title('Output 1')
# plotting:
#pb.figure()
#Xtest1 = np.hstack((np.linspace(0, 9, 100)[:, None], np.zeros((100, 1))))
#Xtest2 = np.hstack((np.linspace(0, 9, 100)[:, None], np.ones((100, 1))))
#mean, var, low, up = m.predict(Xtest1)
#GPy.util.plot.gpplot(Xtest1[:, 0], mean, low, up)
#mean, var, low, up = m.predict(Xtest2)
#GPy.util.plot.gpplot(Xtest2[:, 0], mean, low, up)
#pb.plot(X1[:, 0], Y1[:, 0], 'rx', mew=2)
#pb.plot(X2[:, 0], Y2[:, 0], 'gx', mew=2)
#y = pb.ylim()[0]
#pb.plot(Z[:, 0][Z[:, 1] == 0], np.zeros(np.sum(Z[:, 1] == 0)) + y, 'r|', mew=2)
#pb.plot(Z[:, 0][Z[:, 1] == 1], np.zeros(np.sum(Z[:, 1] == 1)) + y, 'g|', mew=2)
return m
def epomeo_gpx(max_iters=100):
@ -135,8 +117,8 @@ def epomeo_gpx(max_iters=100):
np.random.randint(0, 4, num_inducing)[:, None]))
k1 = GPy.kern.rbf(1)
k2 = GPy.kern.coregionalise(output_dim=5, rank=5)
k = k1**k2
k2 = GPy.kern.coregionalize(output_dim=5, rank=5)
k = k1**k2
m = GPy.models.SparseGPRegression(t, Y, kernel=k, Z=Z, normalize_Y=True)
m.constrain_fixed('.*rbf_var', 1.)

View file

@ -340,7 +340,7 @@ def symmetric(k):
k_.parts = [symmetric.Symmetric(p) for p in k.parts]
return k_
def coregionalise(num_outputs,W_columns=1, W=None, kappa=None):
def coregionalize(num_outputs,W_columns=1, W=None, kappa=None):
"""
Coregionlization matrix B, of the form:
.. math::
@ -352,18 +352,18 @@ def coregionalise(num_outputs,W_columns=1, W=None, kappa=None):
it is obtainded as the tensor product between a kernel k(x,y) and B.
:param num_outputs: the number of outputs to corregionalise
:param num_outputs: the number of outputs to coregionalize
:type num_outputs: int
:param W_columns: number of columns of the W matrix (this parameter is ignored if parameter W is not None)
:type W_colunns: int
:param W: a low rank matrix that determines the correlations between the different outputs, together with kappa it forms the coregionalisation matrix B
:param W: a low rank matrix that determines the correlations between the different outputs, together with kappa it forms the coregionalization matrix B
:type W: numpy array of dimensionality (num_outpus, W_columns)
:param kappa: a vector which allows the outputs to behave independently
:type kappa: numpy array of dimensionality (num_outputs,)
:rtype: kernel object
"""
p = parts.coregionalise.Coregionalise(num_outputs,W_columns,W,kappa)
p = parts.coregionalize.Coregionalize(num_outputs,W_columns,W,kappa)
return kern(1,[p])
@ -442,11 +442,11 @@ def build_lcm(input_dim, num_outputs, kernel_list = [], W_columns=1,W=None,kappa
k.input_dim = input_dim
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
k_coreg = coregionalise(num_outputs,W_columns,W,kappa)
k_coreg = coregionalize(num_outputs,W_columns,W,kappa)
kernel = kernel_list[0]**k_coreg.copy()
for k in kernel_list[1:]:
k_coreg = coregionalise(num_outputs,W_columns,W,kappa)
k_coreg = coregionalize(num_outputs,W_columns,W,kappa)
kernel += k**k_coreg.copy()
return kernel

View file

@ -1,6 +1,6 @@
import bias
import Brownian
import coregionalise
import coregionalize
import exponential
import finite_dimensional
import fixed

View file

@ -7,7 +7,7 @@ from GPy.util.linalg import mdot, pdinv
import pdb
from scipy import weave
class Coregionalise(Kernpart):
class Coregionalize(Kernpart):
"""
Kernel for intrinsic/linear coregionalization models
@ -25,12 +25,12 @@ class Coregionalise(Kernpart):
:type num_outputs: int
:param W_columns: number of columns of the W matrix (this parameter is ignored if parameter W is not None)
:type W_colunns: int
:param W: a low rank matrix that determines the correlations between the different outputs, together with kappa it forms the coregionalisation matrix B
:param W: a low rank matrix that determines the correlations between the different outputs, together with kappa it forms the coregionalization matrix B
:type W: numpy array of dimensionality (num_outpus, W_columns)
:param kappa: a vector which allows the outputs to behave independently
:type kappa: numpy array of dimensionality (num_outputs,)
.. Note: see coregionalisation examples in GPy.examples.regression for some usage.
.. Note: see coregionalization examples in GPy.examples.regression for some usage.
"""
def __init__(self,num_outputs,W_columns=1, W=None, kappa=None):
self.input_dim = 1

View file

@ -18,7 +18,7 @@ class Prod(Kernpart):
"""
def __init__(self,k1,k2,tensor=False):
self.num_params = k1.num_params + k2.num_params
self.name = '['+k1.name + '(x)' + k2.name +']'
self.name = '['+k1.name + '**' + k2.name +']'
self.k1 = k1
self.k2 = k2
if tensor:

View file

@ -6,7 +6,6 @@ import numpy as np
from ..core import GP
from .. import likelihoods
from .. import kern
#from ..util import multioutput
class GPMultioutputRegression(GP):
"""

View file

@ -50,7 +50,7 @@ class KernelTests(unittest.TestCase):
m = GPy.models.GPRegression(X,Y,kernel=kernel)
self.assertTrue(m.checkgrad())
def test_coregionalisation(self):
def test_coregionalization(self):
X1 = np.random.rand(50,1)*8
X2 = np.random.rand(30,1)*5
index = np.vstack((np.zeros_like(X1),np.ones_like(X2)))
@ -60,7 +60,7 @@ class KernelTests(unittest.TestCase):
Y = np.vstack((Y1,Y2))
k1 = GPy.kern.rbf(1) + GPy.kern.bias(1)
k2 = GPy.kern.coregionalise(2,1)
k2 = GPy.kern.coregionalize(2,1)
k = k1.prod(k2,tensor=True)
m = GPy.models.GPRegression(X,Y,kernel=k)
self.assertTrue(m.checkgrad())

View file

@ -14,4 +14,3 @@ import visualize
import decorators
import classification
import latent_space_visualizations
#import multioutput

View file

@ -1,35 +0,0 @@
import numpy as np
import warnings
from .. import kern
def build_lcm(input_dim, num_outputs, CK = [], NC = [], W_columns=1,W=None,kappa=None):
#TODO build_icm or build_lcm
"""
Builds a kernel for a linear coregionalization model
:input_dim: Input dimensionality
:num_outputs: Number of outputs
:param CK: List of coregionalized kernels (i.e., this will be multiplied by a coregionalise kernel).
:param K: List of kernels that will be added up together with CK, but won't be multiplied by a coregionalise kernel
:param W_columns: number tuples of the corregionalization parameters 'coregion_W'
:type W_columns: integer
"""
for k in CK:
if k.input_dim <> input_dim:
k.input_dim = input_dim
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
for k in NC:
if k.input_dim <> input_dim + 1:
k.input_dim = input_dim + 1
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
kernel = CK[0].prod(kern.coregionalise(num_outputs,W_columns,W,kappa),tensor=True)
for k in CK[1:]:
k_coreg = kern.coregionalise(num_outputs,W_columns,W,kappa)
kernel += k.prod(k_coreg,tensor=True)
for k in NC:
kernel += k
return kernel