mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-30 15:26:23 +02:00
Merge branch 'devel' of github.com:SheffieldML/GPy into devel
This commit is contained in:
commit
9e6a98e485
12 changed files with 498 additions and 43 deletions
|
|
@ -96,7 +96,7 @@ class GP(GPBase):
|
|||
model for a new variable Y* = v_tilde/tau_tilde, with a covariance
|
||||
matrix K* = K + diag(1./tau_tilde) plus a normalization term.
|
||||
"""
|
||||
return -0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z
|
||||
return - 0.5 * self.num_data * self.output_dim * np.log(2.*np.pi) - 0.5 * self.output_dim * self.K_logdet + self._model_fit_term() + self.likelihood.Z
|
||||
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
|
|
|
|||
|
|
@ -60,6 +60,28 @@ def GPLVM_oil_100(optimize=True):
|
|||
m.plot_latent(labels=m.data_labels)
|
||||
return m
|
||||
|
||||
def sparseGPLVM_oil(optimize=True, N=100, Q=6, num_inducing=15, max_iters=50):
|
||||
np.random.seed(0)
|
||||
data = GPy.util.datasets.oil()
|
||||
|
||||
Y = data['X'][:N]
|
||||
Y = Y - Y.mean(0)
|
||||
Y /= Y.std(0)
|
||||
|
||||
# create simple GP model
|
||||
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q)
|
||||
m = GPy.models.SparseGPLVM(Y, Q, kernel=kernel, num_inducing = num_inducing)
|
||||
m.data_labels = data['Y'].argmax(axis=1)
|
||||
|
||||
# optimize
|
||||
if optimize:
|
||||
m.optimize('scg', messages=1, max_iters = max_iters)
|
||||
|
||||
# plot
|
||||
print(m)
|
||||
#m.plot_latent(labels=m.data_labels)
|
||||
return m
|
||||
|
||||
def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False):
|
||||
from GPy.util.datasets import swiss_roll_generated
|
||||
from GPy.core.transformations import logexp_clipped
|
||||
|
|
@ -114,7 +136,7 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False
|
|||
m.optimize('scg', messages=1)
|
||||
return m
|
||||
|
||||
def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_f_eval=50, plot=False, **k):
|
||||
def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_iters=50, plot=False, **k):
|
||||
np.random.seed(0)
|
||||
data = GPy.util.datasets.oil()
|
||||
|
||||
|
|
@ -135,9 +157,9 @@ def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_f_eval=50, plot=
|
|||
# optimize
|
||||
if optimize:
|
||||
m.constrain_fixed('noise')
|
||||
m.optimize('scg', messages=1, max_f_eval=100, gtol=.05)
|
||||
m.optimize('scg', messages=1, max_iters=100, gtol=.05)
|
||||
m.constrain_positive('noise')
|
||||
m.optimize('scg', messages=1, max_f_eval=max_f_eval, gtol=.05)
|
||||
m.optimize('scg', messages=1, max_iters=max_iters, gtol=.05)
|
||||
|
||||
if plot:
|
||||
y = m.likelihood.Y[0, :]
|
||||
|
|
@ -241,7 +263,7 @@ def bgplvm_simulation_matlab_compare():
|
|||
|
||||
def bgplvm_simulation(optimize='scg',
|
||||
plot=True,
|
||||
max_f_eval=2e4):
|
||||
max_iters=2e4):
|
||||
# from GPy.core.transformations import logexp_clipped
|
||||
D1, D2, D3, N, num_inducing, Q = 15, 8, 8, 100, 3, 5
|
||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot)
|
||||
|
|
@ -262,8 +284,7 @@ def bgplvm_simulation(optimize='scg',
|
|||
|
||||
if optimize:
|
||||
print "Optimizing model:"
|
||||
m.optimize(optimize, max_iters=max_f_eval,
|
||||
max_f_eval=max_f_eval,
|
||||
m.optimize(optimize, max_iters=max_iters,
|
||||
messages=True, gtol=.05)
|
||||
if plot:
|
||||
m.plot_X_1d("BGPLVM Latent Space 1D")
|
||||
|
|
|
|||
|
|
@ -57,6 +57,79 @@ def toy_rbf_1d_50(optim_iters=100):
|
|||
print(m)
|
||||
return m
|
||||
|
||||
def toy_ARD(optim_iters=1000, kernel_type='linear', N=300, D=4):
|
||||
# Create an artificial dataset where the values in the targets (Y)
|
||||
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
|
||||
# see if this dependency can be recovered
|
||||
X1 = np.sin(np.sort(np.random.rand(N,1)*10,0))
|
||||
X2 = np.cos(np.sort(np.random.rand(N,1)*10,0))
|
||||
X3 = np.exp(np.sort(np.random.rand(N,1),0))
|
||||
X4 = np.log(np.sort(np.random.rand(N,1),0))
|
||||
X = np.hstack((X1, X2, X3, X4))
|
||||
|
||||
Y1 = np.asarray(2*X[:,0]+3).T
|
||||
Y2 = np.asarray(4*(X[:,2]-1.5*X[:,0])).T
|
||||
Y = np.hstack((Y1, Y2))
|
||||
|
||||
Y = np.dot(Y, np.random.rand(2,D));
|
||||
Y = Y + 0.2*np.random.randn(Y.shape[0], Y.shape[1])
|
||||
Y -= Y.mean()
|
||||
Y /= Y.std()
|
||||
|
||||
if kernel_type == 'linear':
|
||||
kernel = GPy.kern.linear(X.shape[1], ARD = 1)
|
||||
elif kernel_type == 'rbf_inv':
|
||||
kernel = GPy.kern.rbf_inv(X.shape[1], ARD = 1)
|
||||
else:
|
||||
kernel = GPy.kern.rbf(X.shape[1], ARD = 1)
|
||||
kernel += GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
m = GPy.models.GPRegression(X, Y, kernel)
|
||||
#len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
#m.set_prior('.*lengthscale',len_prior)
|
||||
|
||||
m.optimize(optimizer = 'scg', max_iters = optim_iters, messages = 1)
|
||||
|
||||
m.kern.plot_ARD()
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def toy_ARD_sparse(optim_iters=1000, kernel_type='linear', N=300, D=4):
|
||||
# Create an artificial dataset where the values in the targets (Y)
|
||||
# only depend in dimensions 1 and 3 of the inputs (X). Run ARD to
|
||||
# see if this dependency can be recovered
|
||||
X1 = np.sin(np.sort(np.random.rand(N,1)*10,0))
|
||||
X2 = np.cos(np.sort(np.random.rand(N,1)*10,0))
|
||||
X3 = np.exp(np.sort(np.random.rand(N,1),0))
|
||||
X4 = np.log(np.sort(np.random.rand(N,1),0))
|
||||
X = np.hstack((X1, X2, X3, X4))
|
||||
|
||||
Y1 = np.asarray(2*X[:,0]+3)[:,None]
|
||||
Y2 = np.asarray(4*(X[:,2]-1.5*X[:,0]))[:,None]
|
||||
Y = np.hstack((Y1, Y2))
|
||||
|
||||
Y = np.dot(Y, np.random.rand(2,D));
|
||||
Y = Y + 0.2*np.random.randn(Y.shape[0], Y.shape[1])
|
||||
Y -= Y.mean()
|
||||
Y /= Y.std()
|
||||
|
||||
if kernel_type == 'linear':
|
||||
kernel = GPy.kern.linear(X.shape[1], ARD = 1)
|
||||
elif kernel_type == 'rbf_inv':
|
||||
kernel = GPy.kern.rbf_inv(X.shape[1], ARD = 1)
|
||||
else:
|
||||
kernel = GPy.kern.rbf(X.shape[1], ARD = 1)
|
||||
kernel += GPy.kern.white(X.shape[1]) + GPy.kern.bias(X.shape[1])
|
||||
X_variance = np.ones(X.shape)*0.5
|
||||
m = GPy.models.SparseGPRegression(X, Y, kernel, X_variance = X_variance)
|
||||
#len_prior = GPy.priors.inverse_gamma(1,18) # 1, 25
|
||||
#m.set_prior('.*lengthscale',len_prior)
|
||||
|
||||
m.optimize(optimizer = 'scg', max_iters = optim_iters, messages = 1)
|
||||
|
||||
m.kern.plot_ARD()
|
||||
print(m)
|
||||
return m
|
||||
|
||||
def silhouette(optim_iters=100):
|
||||
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
|
||||
data = GPy.util.datasets.silhouette()
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
import pylab as pb
|
||||
import datetime as dt
|
||||
from scipy import optimize
|
||||
from warnings import warn
|
||||
|
||||
try:
|
||||
import rasmussens_minimize as rasm
|
||||
|
|
@ -198,17 +199,22 @@ class opt_rasm(Optimizer):
|
|||
|
||||
class opt_SCG(Optimizer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
if 'max_f_eval' in kwargs:
|
||||
warn("max_f_eval deprecated for SCG optimizer: use max_iters instead!\nIgnoring max_f_eval!", FutureWarning)
|
||||
Optimizer.__init__(self, *args, **kwargs)
|
||||
|
||||
self.opt_name = "Scaled Conjugate Gradients"
|
||||
|
||||
def opt(self, f_fp=None, f=None, fp=None):
|
||||
assert not f is None
|
||||
assert not fp is None
|
||||
|
||||
opt_result = SCG(f, fp, self.x_init, display=self.messages,
|
||||
maxiters=self.max_iters,
|
||||
max_f_eval=self.max_f_eval,
|
||||
xtol=self.xtol, ftol=self.ftol,
|
||||
gtol=self.gtol)
|
||||
|
||||
self.x_opt = opt_result[0]
|
||||
self.trace = opt_result[1]
|
||||
self.f_opt = self.trace[-1]
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ def exponents(fnow, current_grad):
|
|||
exps = [np.abs(fnow), current_grad]
|
||||
return np.sign(exps) * np.log10(exps).astype(int)
|
||||
|
||||
def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xtol=None, ftol=None, gtol=None):
|
||||
def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=np.inf, display=True, xtol=None, ftol=None, gtol=None):
|
||||
"""
|
||||
Optimisation through Scaled Conjugate Gradients (SCG)
|
||||
|
||||
|
|
@ -68,7 +68,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto
|
|||
nsuccess = 0 # nsuccess counts number of successes.
|
||||
beta = 1.0 # Initial scale parameter.
|
||||
betamin = 1.0e-60 # Lower bound on scale.
|
||||
betamax = 1.0e100 # Upper bound on scale.
|
||||
betamax = 1.0e50 # Upper bound on scale.
|
||||
status = "Not converged"
|
||||
|
||||
flog = [fold]
|
||||
|
|
@ -109,9 +109,9 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto
|
|||
fnew = f(xnew, *optargs)
|
||||
function_eval += 1
|
||||
|
||||
if function_eval >= max_f_eval:
|
||||
status = "maximum number of function evaluations exceeded"
|
||||
break
|
||||
# if function_eval >= max_f_eval:
|
||||
# status = "maximum number of function evaluations exceeded"
|
||||
# break
|
||||
# return x, flog, function_eval, status
|
||||
|
||||
Delta = 2.*(fnew - fold) / (alpha * mu)
|
||||
|
|
@ -131,13 +131,12 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto
|
|||
if display:
|
||||
print_out(len_maxiters, fnow, current_grad, beta, iteration)
|
||||
n_exps = exponents(fnow, current_grad)
|
||||
if iteration - p_iter >= 6:
|
||||
if iteration - p_iter >= 20 * np.random.rand():
|
||||
a = iteration >= p_iter * 2.78
|
||||
b = np.any(n_exps < exps)
|
||||
if a or b:
|
||||
print ''
|
||||
if a:
|
||||
p_iter = iteration
|
||||
print ''
|
||||
if b:
|
||||
exps = n_exps
|
||||
|
||||
|
|
@ -184,7 +183,6 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto
|
|||
status = "maxiter exceeded"
|
||||
|
||||
if display:
|
||||
print ""
|
||||
print_out(len_maxiters, fnow, current_grad, beta, iteration)
|
||||
print ""
|
||||
print status
|
||||
|
|
|
|||
|
|
@ -5,6 +5,23 @@ import numpy as np
|
|||
from kern import kern
|
||||
import parts
|
||||
|
||||
|
||||
def rbf_inv(input_dim,variance=1., inv_lengthscale=None,ARD=False):
|
||||
"""
|
||||
Construct an RBF kernel
|
||||
|
||||
:param input_dim: dimensionality of the kernel, obligatory
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the lengthscale of the kernel
|
||||
:type lengthscale: float
|
||||
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
|
||||
:type ARD: Boolean
|
||||
"""
|
||||
part = parts.rbf_inv.RBFInv(input_dim,variance,inv_lengthscale,ARD)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def rbf(input_dim,variance=1., lengthscale=None,ARD=False):
|
||||
"""
|
||||
Construct an RBF kernel
|
||||
|
|
@ -306,4 +323,4 @@ def hierarchical(k):
|
|||
# for sl in k.input_slices:
|
||||
# assert (sl.start is None) and (sl.stop is None), "cannot adjust input slices! (TODO)"
|
||||
_parts = [parts.hierarchical.Hierarchical(k.parts)]
|
||||
return kern(k.input_dim+1,_parts)
|
||||
return kern(k.input_dim+len(k.parts),_parts)
|
||||
|
|
|
|||
|
|
@ -20,3 +20,4 @@ import spline
|
|||
import symmetric
|
||||
import white
|
||||
import hierarchical
|
||||
import rbf_inv
|
||||
|
|
|
|||
|
|
@ -24,26 +24,26 @@ class Hierarchical(Kernpart):
|
|||
return np.hstack([k._get_params() for k in self.parts])
|
||||
|
||||
def _set_params(self,x):
|
||||
[k._set_params(x[start:stop]) for start, stop in zip(self.param_starts, self.param_stops)]
|
||||
[k._set_params(x[start:stop]) for k, start, stop in zip(self.parts, self.param_starts, self.param_stops)]
|
||||
|
||||
def _get_param_names(self):
|
||||
return self.k._get_param_names()
|
||||
return sum([[str(i)+'_'+k.name+'_'+n for n in k._get_param_names()] for i,k in enumerate(self.parts)],[])
|
||||
|
||||
def _sort_slices(self,X,X2):
|
||||
slices = [index_to_slices(x) for x in X[-self.levels:].T]
|
||||
X = X[:-self.levels]
|
||||
slices = [index_to_slices(x) for x in X[:,-self.levels:].T]
|
||||
X = X[:,:-self.levels]
|
||||
if X2 is None:
|
||||
slices2 = slices
|
||||
X2 = X
|
||||
else:
|
||||
slices2 = [index_to_slices(x) for x in X2[-self.levels:].T]
|
||||
X2 = X2[:-self.levels]
|
||||
slices2 = [index_to_slices(x) for x in X2[:,-self.levels:].T]
|
||||
X2 = X2[:,:-self.levels]
|
||||
return X, X2, slices, slices2
|
||||
|
||||
def K(self,X,X2,target):
|
||||
X, X2, slices, slices2 = self._sort_slices(X,X2)
|
||||
|
||||
[[[k.K(X[s],X2[s2],target[s,s2]) for s in slices_i] for s2 in slices_j] for k,slices_i,slices_j in zip(self.parts,slices,slices2)]
|
||||
[[[[k.K(X[s],X2[s2],target[s,s2]) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices_,slices2_)] for k, slices_, slices2_ in zip(self.parts,slices,slices2)]
|
||||
|
||||
def Kdiag(self,X,target):
|
||||
raise NotImplementedError
|
||||
|
|
@ -51,7 +51,8 @@ class Hierarchical(Kernpart):
|
|||
#[[self.k.Kdiag(X[s],target[s]) for s in slices_i] for slices_i in slices]
|
||||
|
||||
def dK_dtheta(self,dL_dK,X,X2,target):
|
||||
[[[k.dK_dtheta(dL_dK[s,s2],X[s],X2[s2],target[p_start:p_stop]) for s in slices_i] for s2 in slices_j] for k,slices_i,slices_j, p_start, p_stop in zip(self.parts, slices, slices2, self.param_starts, self.param_stops)]
|
||||
X, X2, slices, slices2 = self._sort_slices(X,X2)
|
||||
[[[[k.dK_dtheta(dL_dK[s,s2],X[s],X2[s2],target[p_start:p_stop]) for s in slices_i] for s2 in slices_j] for slices_i,slices_j in zip(slices_, slices2_)] for k, p_start, p_stop, slices_, slices2_ in zip(self.parts, self.param_starts, self.param_stops, slices, slices2)]
|
||||
|
||||
|
||||
def dK_dX(self,dL_dK,X,X2,target):
|
||||
|
|
|
|||
335
GPy/kern/parts/rbf_inv.py
Normal file
335
GPy/kern/parts/rbf_inv.py
Normal file
|
|
@ -0,0 +1,335 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
from kernpart import Kernpart
|
||||
import numpy as np
|
||||
import hashlib
|
||||
from scipy import weave
|
||||
from ...util.linalg import tdot
|
||||
|
||||
class RBFInv(Kernpart):
|
||||
"""
|
||||
Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel:
|
||||
|
||||
.. math::
|
||||
|
||||
k(r) = \sigma^2 \exp \\bigg(- \\frac{1}{2} r^2 \\bigg) \ \ \ \ \ \\text{ where } r^2 = \sum_{i=1}^d \\frac{ (x_i-x^\prime_i)^2}{\ell_i^2}
|
||||
|
||||
where \ell_i is the lengthscale, \sigma^2 the variance and d the dimensionality of the input.
|
||||
|
||||
:param input_dim: the number of input dimensions
|
||||
:type input_dim: int
|
||||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
:param lengthscale: the vector of lengthscale of the kernel
|
||||
:type lengthscale: array or list of the appropriate size (or float if there is only one lengthscale parameter)
|
||||
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one single lengthscale parameter \ell), otherwise there is one lengthscale parameter per dimension.
|
||||
:type ARD: Boolean
|
||||
:rtype: kernel object
|
||||
|
||||
.. Note: this object implements both the ARD and 'spherical' version of the function
|
||||
"""
|
||||
|
||||
def __init__(self, input_dim, variance=1., inv_lengthscale=None, ARD=False):
|
||||
self.input_dim = input_dim
|
||||
self.name = 'rbf'
|
||||
self.ARD = ARD
|
||||
if not ARD:
|
||||
self.num_params = 2
|
||||
if inv_lengthscale is not None:
|
||||
inv_lengthscale = np.asarray(inv_lengthscale)
|
||||
assert inv_lengthscale.size == 1, "Only one lengthscale needed for non-ARD kernel"
|
||||
else:
|
||||
inv_lengthscale = np.ones(1)
|
||||
else:
|
||||
self.num_params = self.input_dim + 1
|
||||
if inv_lengthscale is not None:
|
||||
inv_lengthscale = np.asarray(inv_lengthscale)
|
||||
assert inv_lengthscale.size == self.input_dim, "bad number of lengthscales"
|
||||
else:
|
||||
inv_lengthscale = np.ones(self.input_dim)
|
||||
|
||||
self._set_params(np.hstack((variance, inv_lengthscale.flatten())))
|
||||
|
||||
# initialize cache
|
||||
self._Z, self._mu, self._S = np.empty(shape=(3, 1))
|
||||
self._X, self._X2, self._params = np.empty(shape=(3, 1))
|
||||
|
||||
# a set of optional args to pass to weave
|
||||
self.weave_options = {'headers' : ['<omp.h>'],
|
||||
'extra_compile_args': ['-fopenmp -O3'], # -march=native'],
|
||||
'extra_link_args' : ['-lgomp']}
|
||||
|
||||
|
||||
|
||||
def _get_params(self):
|
||||
return np.hstack((self.variance, self.inv_lengthscale))
|
||||
|
||||
def _set_params(self, x):
|
||||
assert x.size == (self.num_params)
|
||||
self.variance = x[0]
|
||||
self.inv_lengthscale = x[1:]
|
||||
self.lengthscale = 1./self.inv_lengthscale
|
||||
self.lengthscale2 = np.square(self.lengthscale)
|
||||
# reset cached results
|
||||
self._X, self._X2, self._params = np.empty(shape=(3, 1))
|
||||
self._Z, self._mu, self._S = np.empty(shape=(3, 1)) # cached versions of Z,mu,S
|
||||
|
||||
def _get_param_names(self):
|
||||
if self.num_params == 2:
|
||||
return ['variance', 'inv_lengthscale']
|
||||
else:
|
||||
return ['variance'] + ['inv_lengthscale_%i' % i for i in range(self.inv_lengthscale.size)]
|
||||
|
||||
def K(self, X, X2, target):
|
||||
self._K_computations(X, X2)
|
||||
target += self.variance * self._K_dvar
|
||||
|
||||
def Kdiag(self, X, target):
|
||||
np.add(target, self.variance, target)
|
||||
|
||||
def dK_dtheta(self, dL_dK, X, X2, target):
|
||||
self._K_computations(X, X2)
|
||||
target[0] += np.sum(self._K_dvar * dL_dK)
|
||||
if self.ARD:
|
||||
dvardLdK = self._K_dvar * dL_dK
|
||||
var_len3 = self.variance / np.power(self.lengthscale, 3)
|
||||
len2 = self.lengthscale2
|
||||
if X2 is None:
|
||||
# save computation for the symmetrical case
|
||||
dvardLdK = dvardLdK + dvardLdK.T
|
||||
code = """
|
||||
int q,i,j;
|
||||
double tmp;
|
||||
for(q=0; q<input_dim; q++){
|
||||
tmp = 0;
|
||||
for(i=0; i<num_data; i++){
|
||||
for(j=0; j<i; j++){
|
||||
tmp += (X(i,q)-X(j,q))*(X(i,q)-X(j,q))*dvardLdK(i,j);
|
||||
}
|
||||
}
|
||||
target(q+1) += var_len3(q)*tmp*(-len2(q));
|
||||
}
|
||||
"""
|
||||
num_data, num_inducing, input_dim = X.shape[0], X.shape[0], self.input_dim
|
||||
weave.inline(code, arg_names=['num_data','num_inducing','input_dim','X','X2','target','dvardLdK','var_len3', 'len2'], type_converters=weave.converters.blitz, **self.weave_options)
|
||||
else:
|
||||
code = """
|
||||
int q,i,j;
|
||||
double tmp;
|
||||
for(q=0; q<input_dim; q++){
|
||||
tmp = 0;
|
||||
for(i=0; i<num_data; i++){
|
||||
for(j=0; j<num_inducing; j++){
|
||||
tmp += (X(i,q)-X2(j,q))*(X(i,q)-X2(j,q))*dvardLdK(i,j);
|
||||
}
|
||||
}
|
||||
target(q+1) += var_len3(q)*tmp*(-len2(q));
|
||||
}
|
||||
"""
|
||||
num_data, num_inducing, input_dim = X.shape[0], X2.shape[0], self.input_dim
|
||||
#[np.add(target[1+q:2+q],var_len3[q]*np.sum(dvardLdK*np.square(X[:,q][:,None]-X2[:,q][None,:])),target[1+q:2+q]) for q in range(self.input_dim)]
|
||||
weave.inline(code, arg_names=['num_data','num_inducing','input_dim','X','X2','target','dvardLdK','var_len3', 'len2'], type_converters=weave.converters.blitz, **self.weave_options)
|
||||
else:
|
||||
target[1] += (self.variance / self.lengthscale) * np.sum(self._K_dvar * self._K_dist2 * dL_dK)*(-self.lengthscale2)
|
||||
|
||||
|
||||
def dKdiag_dtheta(self, dL_dKdiag, X, target):
|
||||
# NB: derivative of diagonal elements wrt lengthscale is 0
|
||||
target[0] += np.sum(dL_dKdiag)
|
||||
|
||||
def dK_dX(self, dL_dK, X, X2, target):
|
||||
self._K_computations(X, X2)
|
||||
_K_dist = X[:, None, :] - X2[None, :, :] # don't cache this in _K_computations because it is high memory. If this function is being called, chances are we're not in the high memory arena.
|
||||
dK_dX = (-self.variance / self.lengthscale2) * np.transpose(self._K_dvar[:, :, np.newaxis] * _K_dist, (1, 0, 2))
|
||||
target += np.sum(dK_dX * dL_dK.T[:, :, None], 0)
|
||||
|
||||
def dKdiag_dX(self, dL_dKdiag, X, target):
|
||||
pass
|
||||
|
||||
|
||||
#---------------------------------------#
|
||||
# PSI statistics #
|
||||
#---------------------------------------#
|
||||
|
||||
def psi0(self, Z, mu, S, target):
|
||||
target += self.variance
|
||||
|
||||
def dpsi0_dtheta(self, dL_dpsi0, Z, mu, S, target):
|
||||
target[0] += np.sum(dL_dpsi0)
|
||||
|
||||
def dpsi0_dmuS(self, dL_dpsi0, Z, mu, S, target_mu, target_S):
|
||||
pass
|
||||
|
||||
def psi1(self, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
target += self._psi1
|
||||
|
||||
def dpsi1_dtheta(self, dL_dpsi1, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
denom_deriv = S[:, None, :] / (self.lengthscale ** 3 + self.lengthscale * S[:, None, :])
|
||||
d_length = self._psi1[:, :, None] * (self.lengthscale * np.square(self._psi1_dist / (self.lengthscale2 + S[:, None, :])) + denom_deriv)
|
||||
target[0] += np.sum(dL_dpsi1 * self._psi1 / self.variance)
|
||||
dpsi1_dlength = d_length * dL_dpsi1[:, :, None]
|
||||
if not self.ARD:
|
||||
target[1] += dpsi1_dlength.sum()*(-self.lengthscale2)
|
||||
else:
|
||||
target[1:] += dpsi1_dlength.sum(0).sum(0)*(-self.lengthscale2)
|
||||
#target[1:] = target[1:]*(-self.lengthscale2)
|
||||
|
||||
def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
denominator = (self.lengthscale2 * (self._psi1_denom))
|
||||
dpsi1_dZ = -self._psi1[:, :, None] * ((self._psi1_dist / denominator))
|
||||
target += np.sum(dL_dpsi1[:, :, None] * dpsi1_dZ, 0)
|
||||
|
||||
def dpsi1_dmuS(self, dL_dpsi1, Z, mu, S, target_mu, target_S):
|
||||
self._psi_computations(Z, mu, S)
|
||||
tmp = self._psi1[:, :, None] / self.lengthscale2 / self._psi1_denom
|
||||
target_mu += np.sum(dL_dpsi1[:, :, None] * tmp * self._psi1_dist, 1)
|
||||
target_S += np.sum(dL_dpsi1[:, :, None] * 0.5 * tmp * (self._psi1_dist_sq - 1), 1)
|
||||
|
||||
def psi2(self, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
target += self._psi2
|
||||
|
||||
def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S, target):
|
||||
"""Shape N,num_inducing,num_inducing,Ntheta"""
|
||||
self._psi_computations(Z, mu, S)
|
||||
d_var = 2.*self._psi2 / self.variance
|
||||
d_length = 2.*self._psi2[:, :, :, None] * (self._psi2_Zdist_sq * self._psi2_denom + self._psi2_mudist_sq + S[:, None, None, :] / self.lengthscale2) / (self.lengthscale * self._psi2_denom)
|
||||
|
||||
target[0] += np.sum(dL_dpsi2 * d_var)
|
||||
dpsi2_dlength = d_length * dL_dpsi2[:, :, :, None]
|
||||
if not self.ARD:
|
||||
target[1] += dpsi2_dlength.sum()*(-self.lengthscale2)
|
||||
else:
|
||||
target[1:] += dpsi2_dlength.sum(0).sum(0).sum(0)*(-self.lengthscale2)
|
||||
#target[1:] = target[1:]*(-self.lengthscale2)
|
||||
|
||||
def dpsi2_dZ(self, dL_dpsi2, Z, mu, S, target):
|
||||
self._psi_computations(Z, mu, S)
|
||||
term1 = self._psi2_Zdist / self.lengthscale2 # num_inducing, num_inducing, input_dim
|
||||
term2 = self._psi2_mudist / self._psi2_denom / self.lengthscale2 # N, num_inducing, num_inducing, input_dim
|
||||
dZ = self._psi2[:, :, :, None] * (term1[None] + term2)
|
||||
target += (dL_dpsi2[:, :, :, None] * dZ).sum(0).sum(0)
|
||||
|
||||
def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S, target_mu, target_S):
|
||||
"""Think N,num_inducing,num_inducing,input_dim """
|
||||
self._psi_computations(Z, mu, S)
|
||||
tmp = self._psi2[:, :, :, None] / self.lengthscale2 / self._psi2_denom
|
||||
target_mu += -2.*(dL_dpsi2[:, :, :, None] * tmp * self._psi2_mudist).sum(1).sum(1)
|
||||
target_S += (dL_dpsi2[:, :, :, None] * tmp * (2.*self._psi2_mudist_sq - 1)).sum(1).sum(1)
|
||||
|
||||
#---------------------------------------#
|
||||
# Precomputations #
|
||||
#---------------------------------------#
|
||||
|
||||
def _K_computations(self, X, X2):
|
||||
if not (np.array_equal(X, self._X) and np.array_equal(X2, self._X2) and np.array_equal(self._params , self._get_params())):
|
||||
self._X = X.copy()
|
||||
self._params == self._get_params().copy()
|
||||
if X2 is None:
|
||||
self._X2 = None
|
||||
X = X / self.lengthscale
|
||||
Xsquare = np.sum(np.square(X), 1)
|
||||
self._K_dist2 = -2.*tdot(X) + (Xsquare[:, None] + Xsquare[None, :])
|
||||
else:
|
||||
self._X2 = X2.copy()
|
||||
X = X / self.lengthscale
|
||||
X2 = X2 / self.lengthscale
|
||||
self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X), 1)[:, None] + np.sum(np.square(X2), 1)[None, :])
|
||||
self._K_dvar = np.exp(-0.5 * self._K_dist2)
|
||||
|
||||
def _psi_computations(self, Z, mu, S):
|
||||
# here are the "statistics" for psi1 and psi2
|
||||
if not np.array_equal(Z, self._Z):
|
||||
#Z has changed, compute Z specific stuff
|
||||
self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # M,M,Q
|
||||
self._psi2_Zdist = 0.5*(Z[:,None,:]-Z[None,:,:]) # M,M,Q
|
||||
self._psi2_Zdist_sq = np.square(self._psi2_Zdist/self.lengthscale) # M,M,Q
|
||||
self._Z = Z
|
||||
|
||||
if not (np.array_equal(Z, self._Z) and np.array_equal(mu, self._mu) and np.array_equal(S, self._S)):
|
||||
#something's changed. recompute EVERYTHING
|
||||
|
||||
#psi1
|
||||
self._psi1_denom = S[:,None,:]/self.lengthscale2 + 1.
|
||||
self._psi1_dist = Z[None,:,:]-mu[:,None,:]
|
||||
self._psi1_dist_sq = np.square(self._psi1_dist)/self.lengthscale2/self._psi1_denom
|
||||
self._psi1_exponent = -0.5*np.sum(self._psi1_dist_sq+np.log(self._psi1_denom),-1)
|
||||
self._psi1 = self.variance*np.exp(self._psi1_exponent)
|
||||
|
||||
#psi2
|
||||
self._psi2_denom = 2.*S[:,None,None,:]/self.lengthscale2+1. # N,M,M,Q
|
||||
self._psi2_mudist, self._psi2_mudist_sq, self._psi2_exponent, _ = self.weave_psi2(mu,self._psi2_Zhat)
|
||||
#self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,M,M,Q
|
||||
#self._psi2_mudist_sq = np.square(self._psi2_mudist)/(self.lengthscale2*self._psi2_denom)
|
||||
#self._psi2_exponent = np.sum(-self._psi2_Zdist_sq -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,M,M,Q
|
||||
self._psi2 = np.square(self.variance)*np.exp(self._psi2_exponent) # N,M,M,Q
|
||||
|
||||
#store matrices for caching
|
||||
self._Z, self._mu, self._S = Z, mu,S
|
||||
|
||||
def weave_psi2(self,mu,Zhat):
|
||||
N,input_dim = mu.shape
|
||||
num_inducing = Zhat.shape[0]
|
||||
|
||||
mudist = np.empty((N,num_inducing,num_inducing,input_dim))
|
||||
mudist_sq = np.empty((N,num_inducing,num_inducing,input_dim))
|
||||
psi2_exponent = np.zeros((N,num_inducing,num_inducing))
|
||||
psi2 = np.empty((N,num_inducing,num_inducing))
|
||||
|
||||
psi2_Zdist_sq = self._psi2_Zdist_sq
|
||||
_psi2_denom = self._psi2_denom.squeeze().reshape(N, self.input_dim)
|
||||
half_log_psi2_denom = 0.5 * np.log(self._psi2_denom).squeeze().reshape(N, self.input_dim)
|
||||
variance_sq = float(np.square(self.variance))
|
||||
if self.ARD:
|
||||
lengthscale2 = self.lengthscale2
|
||||
else:
|
||||
lengthscale2 = np.ones(input_dim) * self.lengthscale2
|
||||
code = """
|
||||
double tmp;
|
||||
|
||||
#pragma omp parallel for private(tmp)
|
||||
for (int n=0; n<N; n++){
|
||||
for (int m=0; m<num_inducing; m++){
|
||||
for (int mm=0; mm<(m+1); mm++){
|
||||
for (int q=0; q<input_dim; q++){
|
||||
//compute mudist
|
||||
tmp = mu(n,q) - Zhat(m,mm,q);
|
||||
mudist(n,m,mm,q) = tmp;
|
||||
mudist(n,mm,m,q) = tmp;
|
||||
|
||||
//now mudist_sq
|
||||
tmp = tmp*tmp/lengthscale2(q)/_psi2_denom(n,q);
|
||||
mudist_sq(n,m,mm,q) = tmp;
|
||||
mudist_sq(n,mm,m,q) = tmp;
|
||||
|
||||
//now psi2_exponent
|
||||
tmp = -psi2_Zdist_sq(m,mm,q) - tmp - half_log_psi2_denom(n,q);
|
||||
psi2_exponent(n,mm,m) += tmp;
|
||||
if (m !=mm){
|
||||
psi2_exponent(n,m,mm) += tmp;
|
||||
}
|
||||
//psi2 would be computed like this, but np is faster
|
||||
//tmp = variance_sq*exp(psi2_exponent(n,m,mm));
|
||||
//psi2(n,m,mm) = tmp;
|
||||
//psi2(n,mm,m) = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
support_code = """
|
||||
#include <omp.h>
|
||||
#include <math.h>
|
||||
"""
|
||||
weave.inline(code, support_code=support_code, libraries=['gomp'],
|
||||
arg_names=['N','num_inducing','input_dim','mu','Zhat','mudist_sq','mudist','lengthscale2','_psi2_denom','psi2_Zdist_sq','psi2_exponent','half_log_psi2_denom','psi2','variance_sq'],
|
||||
type_converters=weave.converters.blitz, **self.weave_options)
|
||||
|
||||
return mudist, mudist_sq, psi2_exponent, psi2
|
||||
|
|
@ -8,6 +8,7 @@ from svigp_regression import SVIGPRegression
|
|||
from sparse_gp_classification import SparseGPClassification
|
||||
from fitc_classification import FITCClassification
|
||||
from gplvm import GPLVM
|
||||
from sparse_gplvm import SparseGPLVM
|
||||
from warped_gp import WarpedGP
|
||||
from bayesian_gplvm import BayesianGPLVM
|
||||
from mrd import MRD
|
||||
|
|
|
|||
|
|
@ -41,6 +41,12 @@ class GPLVM(GP):
|
|||
else:
|
||||
return np.random.randn(Y.shape[0], input_dim)
|
||||
|
||||
def getstate(self):
|
||||
return GP.getstate(self)
|
||||
|
||||
def setstate(self, state):
|
||||
GP.setstate(self, state)
|
||||
|
||||
def _get_param_names(self):
|
||||
return sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], []) + GP._get_param_names(self)
|
||||
|
||||
|
|
|
|||
|
|
@ -18,29 +18,25 @@ class MRD(Model):
|
|||
All Ys in likelihood_list are in [N x Dn], where Dn can be different per Yn,
|
||||
N must be shared across datasets though.
|
||||
|
||||
:param likelihood_list...: likelihoods of observed datasets
|
||||
:type likelihood_list: [GPy.likelihood] | [Y1..Yy]
|
||||
:param likelihood_list: list of observed datasets (:py:class:`~GPy.likelihoods.gaussian.Gaussian` if not supplied directly)
|
||||
:type likelihood_list: [:py:class:`~GPy.likelihoods.likelihood.likelihood` | :py:class:`ndarray`]
|
||||
:param names: names for different gplvm models
|
||||
:type names: [str]
|
||||
:param input_dim: latent dimensionality (will raise
|
||||
:param input_dim: latent dimensionality
|
||||
:type input_dim: int
|
||||
:param initx: initialisation method for the latent space
|
||||
:type initx: 'PCA'|'random'
|
||||
:param initx: initialisation method for the latent space :
|
||||
|
||||
* 'concat' - PCA on concatenation of all datasets
|
||||
* 'single' - Concatenation of PCA on datasets, respectively
|
||||
* 'random' - Random draw from a normal
|
||||
|
||||
:type initx: ['concat'|'single'|'random']
|
||||
:param initz: initialisation method for inducing inputs
|
||||
:type initz: 'permute'|'random'
|
||||
:param X:
|
||||
Initial latent space
|
||||
:param X_variance:
|
||||
Initial latent space variance
|
||||
:param init: [cooncat|single|random]
|
||||
initialization method to use:
|
||||
*concat: PCA on concatenated outputs
|
||||
*single: PCA on each output
|
||||
*random: random
|
||||
:param num_inducing:
|
||||
number of inducing inputs to use
|
||||
:param Z:
|
||||
initial inducing inputs
|
||||
:param X: Initial latent space
|
||||
:param X_variance: Initial latent space variance
|
||||
:param Z: initial inducing inputs
|
||||
:param num_inducing: number of inducing inputs to use
|
||||
:param kernels: list of kernels or kernel shared for all BGPLVMS
|
||||
:type kernels: [GPy.kern.kern] | GPy.kern.kern | None (default)
|
||||
"""
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue