mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-02 08:12:39 +02:00
Merge branch 'devel' into mrd
This commit is contained in:
commit
68dd416640
5 changed files with 333 additions and 277 deletions
488
GPy/kern/kern.py
488
GPy/kern/kern.py
|
|
@ -11,7 +11,7 @@ from prod_orthogonal import prod_orthogonal
|
|||
from prod import prod
|
||||
|
||||
class kern(parameterised):
|
||||
def __init__(self,D,parts=[], input_slices=None):
|
||||
def __init__(self, D, parts=[], input_slices=None):
|
||||
"""
|
||||
This kernel does 'compound' structures.
|
||||
|
||||
|
|
@ -37,15 +37,15 @@ class kern(parameterised):
|
|||
|
||||
self.D = D
|
||||
|
||||
#deal with input_slices
|
||||
# deal with input_slices
|
||||
if input_slices is None:
|
||||
self.input_slices = [slice(None) for p in self.parts]
|
||||
else:
|
||||
assert len(input_slices)==len(self.parts)
|
||||
assert len(input_slices) == len(self.parts)
|
||||
self.input_slices = [sl if type(sl) is slice else slice(None) for sl in input_slices]
|
||||
|
||||
for p in self.parts:
|
||||
assert isinstance(p,kernpart), "bad kernel part"
|
||||
assert isinstance(p, kernpart), "bad kernel part"
|
||||
|
||||
self.compute_param_slices()
|
||||
|
||||
|
|
@ -67,22 +67,22 @@ class kern(parameterised):
|
|||
if p.name == 'linear':
|
||||
ard_params = p.variances
|
||||
else:
|
||||
ard_params = 1./p.lengthscale
|
||||
ard_params = 1. / p.lengthscale
|
||||
|
||||
ax.bar(np.arange(len(ard_params)) - 0.4, ard_params)
|
||||
ax.set_xticks(np.arange(len(ard_params)))
|
||||
ax.set_xticklabels([r"${}$".format(i + 1) for i in range(len(ard_params))])
|
||||
return ax
|
||||
|
||||
def _transform_gradients(self,g):
|
||||
def _transform_gradients(self, g):
|
||||
x = self._get_params()
|
||||
g[self.constrained_positive_indices] = g[self.constrained_positive_indices]*x[self.constrained_positive_indices]
|
||||
g[self.constrained_negative_indices] = g[self.constrained_negative_indices]*x[self.constrained_negative_indices]
|
||||
[np.put(g,i,g[i]*(x[i]-l)*(h-x[i])/(h-l)) for i,l,h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
|
||||
[np.put(g,i,v) for i,v in [(t[0],np.sum(g[t])) for t in self.tied_indices]]
|
||||
g[self.constrained_positive_indices] = g[self.constrained_positive_indices] * x[self.constrained_positive_indices]
|
||||
g[self.constrained_negative_indices] = g[self.constrained_negative_indices] * x[self.constrained_negative_indices]
|
||||
[np.put(g, i, g[i] * (x[i] - l) * (h - x[i]) / (h - l)) for i, l, h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
|
||||
[np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]]
|
||||
if len(self.tied_indices) or len(self.constrained_fixed_indices):
|
||||
to_remove = np.hstack((self.constrained_fixed_indices+[t[1:] for t in self.tied_indices]))
|
||||
return np.delete(g,to_remove)
|
||||
to_remove = np.hstack((self.constrained_fixed_indices + [t[1:] for t in self.tied_indices]))
|
||||
return np.delete(g, to_remove)
|
||||
else:
|
||||
return g
|
||||
|
||||
|
|
@ -91,10 +91,10 @@ class kern(parameterised):
|
|||
self.param_slices = []
|
||||
count = 0
|
||||
for p in self.parts:
|
||||
self.param_slices.append(slice(count,count+p.Nparam))
|
||||
self.param_slices.append(slice(count, count + p.Nparam))
|
||||
count += p.Nparam
|
||||
|
||||
def _process_slices(self,slices1=None,slices2=None):
|
||||
def _process_slices(self, slices1=None, slices2=None):
|
||||
"""
|
||||
Format the slices so that they can easily be used.
|
||||
Both slices can be any of three things:
|
||||
|
|
@ -107,13 +107,13 @@ class kern(parameterised):
|
|||
returns actual lists of slice objects
|
||||
"""
|
||||
if slices1 is None:
|
||||
slices1 = [slice(None)]*self.Nparts
|
||||
slices1 = [slice(None)] * self.Nparts
|
||||
elif all([type(s_i) is bool for s_i in slices1]):
|
||||
slices1 = [slice(None) if s_i else slice(0) for s_i in slices1]
|
||||
else:
|
||||
assert all([type(s_i) is slice for s_i in slices1]), "invalid slice objects"
|
||||
if slices2 is None:
|
||||
slices2 = [slice(None)]*self.Nparts
|
||||
slices2 = [slice(None)] * self.Nparts
|
||||
elif slices2 is False:
|
||||
return slices1
|
||||
elif all([type(s_i) is bool for s_i in slices2]):
|
||||
|
|
@ -122,10 +122,10 @@ class kern(parameterised):
|
|||
assert all([type(s_i) is slice for s_i in slices2]), "invalid slice objects"
|
||||
return slices1, slices2
|
||||
|
||||
def __add__(self,other):
|
||||
def __add__(self, other):
|
||||
assert self.D == other.D
|
||||
newkern = kern(self.D,self.parts+other.parts, self.input_slices + other.input_slices)
|
||||
#transfer constraints:
|
||||
newkern = kern(self.D, self.parts + other.parts, self.input_slices + other.input_slices)
|
||||
# transfer constraints:
|
||||
newkern.constrained_positive_indices = np.hstack((self.constrained_positive_indices, self.Nparam + other.constrained_positive_indices))
|
||||
newkern.constrained_negative_indices = np.hstack((self.constrained_negative_indices, self.Nparam + other.constrained_negative_indices))
|
||||
newkern.constrained_bounded_indices = self.constrained_bounded_indices + [self.Nparam + x for x in other.constrained_bounded_indices]
|
||||
|
|
@ -136,29 +136,29 @@ class kern(parameterised):
|
|||
newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices]
|
||||
return newkern
|
||||
|
||||
def add(self,other):
|
||||
def add(self, other):
|
||||
"""
|
||||
Add another kernel to this one. Both kernels are defined on the same _space_
|
||||
:param other: the other kernel to be added
|
||||
:type other: GPy.kern
|
||||
"""
|
||||
return self + other
|
||||
return self +other
|
||||
|
||||
def add_orthogonal(self,other):
|
||||
def add_orthogonal(self, other):
|
||||
"""
|
||||
Add another kernel to this one. Both kernels are defined on separate spaces
|
||||
:param other: the other kernel to be added
|
||||
:type other: GPy.kern
|
||||
"""
|
||||
#deal with input slices
|
||||
# deal with input slices
|
||||
D = self.D + other.D
|
||||
self_input_slices = [slice(*sl.indices(self.D)) for sl in self.input_slices]
|
||||
other_input_indices = [sl.indices(other.D) for sl in other.input_slices]
|
||||
other_input_slices = [slice(i[0]+self.D,i[1]+self.D,i[2]) for i in other_input_indices]
|
||||
other_input_slices = [slice(i[0] + self.D, i[1] + self.D, i[2]) for i in other_input_indices]
|
||||
|
||||
newkern = kern(D, self.parts + other.parts, self_input_slices + other_input_slices)
|
||||
|
||||
#transfer constraints:
|
||||
# transfer constraints:
|
||||
newkern.constrained_positive_indices = np.hstack((self.constrained_positive_indices, self.Nparam + other.constrained_positive_indices))
|
||||
newkern.constrained_negative_indices = np.hstack((self.constrained_negative_indices, self.Nparam + other.constrained_negative_indices))
|
||||
newkern.constrained_bounded_indices = self.constrained_bounded_indices + [self.Nparam + x for x in other.constrained_bounded_indices]
|
||||
|
|
@ -169,13 +169,13 @@ class kern(parameterised):
|
|||
newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices]
|
||||
return newkern
|
||||
|
||||
def __mul__(self,other):
|
||||
def __mul__(self, other):
|
||||
"""
|
||||
Shortcut for `prod_orthogonal`. Note that `+` assumes that we sum 2 kernels defines on the same space whereas `*` assumes that the kernels are defined on different subspaces.
|
||||
"""
|
||||
return self.prod(other)
|
||||
|
||||
def prod(self,other):
|
||||
def prod(self, other):
|
||||
"""
|
||||
multiply two kernels defined on the same spaces.
|
||||
:param other: the other kernel to be added
|
||||
|
|
@ -184,20 +184,20 @@ class kern(parameterised):
|
|||
K1 = self.copy()
|
||||
K2 = other.copy()
|
||||
|
||||
newkernparts = [prod(k1,k2) for k1, k2 in itertools.product(K1.parts,K2.parts)]
|
||||
newkernparts = [prod(k1, k2) for k1, k2 in itertools.product(K1.parts, K2.parts)]
|
||||
|
||||
slices = []
|
||||
for sl1, sl2 in itertools.product(K1.input_slices,K2.input_slices):
|
||||
s1, s2 = [False]*K1.D, [False]*K2.D
|
||||
for sl1, sl2 in itertools.product(K1.input_slices, K2.input_slices):
|
||||
s1, s2 = [False] * K1.D, [False] * K2.D
|
||||
s1[sl1], s2[sl2] = [True], [True]
|
||||
slices += [s1+s2]
|
||||
slices += [s1 + s2]
|
||||
|
||||
newkern = kern(K1.D, newkernparts, slices)
|
||||
newkern._follow_constrains(K1,K2)
|
||||
newkern._follow_constrains(K1, K2)
|
||||
|
||||
return newkern
|
||||
|
||||
def prod_orthogonal(self,other):
|
||||
def prod_orthogonal(self, other):
|
||||
"""
|
||||
multiply two kernels. Both kernels are defined on separate spaces.
|
||||
:param other: the other kernel to be added
|
||||
|
|
@ -206,31 +206,31 @@ class kern(parameterised):
|
|||
K1 = self.copy()
|
||||
K2 = other.copy()
|
||||
|
||||
newkernparts = [prod_orthogonal(k1,k2) for k1, k2 in itertools.product(K1.parts,K2.parts)]
|
||||
newkernparts = [prod_orthogonal(k1, k2) for k1, k2 in itertools.product(K1.parts, K2.parts)]
|
||||
|
||||
slices = []
|
||||
for sl1, sl2 in itertools.product(K1.input_slices,K2.input_slices):
|
||||
s1, s2 = [False]*K1.D, [False]*K2.D
|
||||
for sl1, sl2 in itertools.product(K1.input_slices, K2.input_slices):
|
||||
s1, s2 = [False] * K1.D, [False] * K2.D
|
||||
s1[sl1], s2[sl2] = [True], [True]
|
||||
slices += [s1+s2]
|
||||
slices += [s1 + s2]
|
||||
|
||||
newkern = kern(K1.D + K2.D, newkernparts, slices)
|
||||
newkern._follow_constrains(K1,K2)
|
||||
newkern._follow_constrains(K1, K2)
|
||||
|
||||
return newkern
|
||||
|
||||
def _follow_constrains(self,K1,K2):
|
||||
def _follow_constrains(self, K1, K2):
|
||||
|
||||
# Build the array that allows to go from the initial indices of the param to the new ones
|
||||
K1_param = []
|
||||
n = 0
|
||||
for k1 in K1.parts:
|
||||
K1_param += [range(n,n+k1.Nparam)]
|
||||
K1_param += [range(n, n + k1.Nparam)]
|
||||
n += k1.Nparam
|
||||
n = 0
|
||||
K2_param = []
|
||||
for k2 in K2.parts:
|
||||
K2_param += [range(K1.Nparam+n,K1.Nparam+n+k2.Nparam)]
|
||||
K2_param += [range(K1.Nparam + n, K1.Nparam + n + k2.Nparam)]
|
||||
n += k2.Nparam
|
||||
index_param = []
|
||||
for p1 in K1_param:
|
||||
|
|
@ -254,47 +254,47 @@ class kern(parameterised):
|
|||
# follow the previous ties
|
||||
for arr in prev_ties:
|
||||
for j in arr:
|
||||
index_param[np.where(index_param==j)[0]] = arr[0]
|
||||
index_param[np.where(index_param == j)[0]] = arr[0]
|
||||
|
||||
# ties and constrains
|
||||
for i in range(K1.Nparam + K2.Nparam):
|
||||
index = np.where(index_param==i)[0]
|
||||
index = np.where(index_param == i)[0]
|
||||
if index.size > 1:
|
||||
self.tie_params(index)
|
||||
for i in prev_constr_pos:
|
||||
self.constrain_positive(np.where(index_param==i)[0])
|
||||
self.constrain_positive(np.where(index_param == i)[0])
|
||||
for i in prev_constr_neg:
|
||||
self.constrain_neg(np.where(index_param==i)[0])
|
||||
self.constrain_neg(np.where(index_param == i)[0])
|
||||
for j, i in enumerate(prev_constr_fix):
|
||||
self.constrain_fixed(np.where(index_param==i)[0],prev_constr_fix_values[j])
|
||||
self.constrain_fixed(np.where(index_param == i)[0], prev_constr_fix_values[j])
|
||||
for j, i in enumerate(prev_constr_bou):
|
||||
self.constrain_bounded(np.where(index_param==i)[0],prev_constr_bou_low[j],prev_constr_bou_upp[j])
|
||||
self.constrain_bounded(np.where(index_param == i)[0], prev_constr_bou_low[j], prev_constr_bou_upp[j])
|
||||
|
||||
def _get_params(self):
|
||||
return np.hstack([p._get_params() for p in self.parts])
|
||||
|
||||
def _set_params(self,x):
|
||||
def _set_params(self, x):
|
||||
[p._set_params(x[s]) for p, s in zip(self.parts, self.param_slices)]
|
||||
|
||||
def _get_param_names(self):
|
||||
#this is a bit nasty: we wat to distinguish between parts with the same name by appending a count
|
||||
part_names = np.array([k.name for k in self.parts],dtype=np.str)
|
||||
counts = [np.sum(part_names==ni) for i, ni in enumerate(part_names)]
|
||||
cum_counts = [np.sum(part_names[i:]==ni) for i, ni in enumerate(part_names)]
|
||||
names = [name+'_'+str(cum_count) if count>1 else name for name,count,cum_count in zip(part_names,counts,cum_counts)]
|
||||
# this is a bit nasty: we wat to distinguish between parts with the same name by appending a count
|
||||
part_names = np.array([k.name for k in self.parts], dtype=np.str)
|
||||
counts = [np.sum(part_names == ni) for i, ni in enumerate(part_names)]
|
||||
cum_counts = [np.sum(part_names[i:] == ni) for i, ni in enumerate(part_names)]
|
||||
names = [name + '_' + str(cum_count) if count > 1 else name for name, count, cum_count in zip(part_names, counts, cum_counts)]
|
||||
|
||||
return sum([[name+'_'+n for n in k._get_param_names()] for name,k in zip(names,self.parts)],[])
|
||||
return sum([[name + '_' + n for n in k._get_param_names()] for name, k in zip(names, self.parts)], [])
|
||||
|
||||
def K(self,X,X2=None,slices1=None,slices2=None):
|
||||
assert X.shape[1]==self.D
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
def K(self, X, X2=None, slices1=None, slices2=None):
|
||||
assert X.shape[1] == self.D
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
if X2 is None:
|
||||
X2 = X
|
||||
target = np.zeros((X.shape[0],X2.shape[0]))
|
||||
[p.K(X[s1,i_s],X2[s2,i_s],target=target[s1,s2]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)]
|
||||
target = np.zeros((X.shape[0], X2.shape[0]))
|
||||
[p.K(X[s1, i_s], X2[s2, i_s], target=target[s1, s2]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)]
|
||||
return target
|
||||
|
||||
def dK_dtheta(self,dL_dK,X,X2=None,slices1=None,slices2=None):
|
||||
def dK_dtheta(self, dL_dK, X, X2=None, slices1=None, slices2=None):
|
||||
"""
|
||||
:param dL_dK: An array of dL_dK derivaties, dL_dK
|
||||
:type dL_dK: Np.ndarray (N x M)
|
||||
|
|
@ -306,282 +306,288 @@ class kern(parameterised):
|
|||
:type slices1: list of slice objects, or list of booleans
|
||||
:param slices2: slices for X2
|
||||
"""
|
||||
assert X.shape[1]==self.D
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
assert X.shape[1] == self.D
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
if X2 is None:
|
||||
X2 = X
|
||||
target = np.zeros(self.Nparam)
|
||||
[p.dK_dtheta(dL_dK[s1,s2],X[s1,i_s],X2[s2,i_s],target[ps]) for p,i_s,ps,s1,s2 in zip(self.parts, self.input_slices, self.param_slices, slices1, slices2)]
|
||||
[p.dK_dtheta(dL_dK[s1, s2], X[s1, i_s], X2[s2, i_s], target[ps]) for p, i_s, ps, s1, s2 in zip(self.parts, self.input_slices, self.param_slices, slices1, slices2)]
|
||||
|
||||
return self._transform_gradients(target)
|
||||
|
||||
def dK_dX(self,dL_dK,X,X2=None,slices1=None,slices2=None):
|
||||
def dK_dX(self, dL_dK, X, X2=None, slices1=None, slices2=None):
|
||||
if X2 is None:
|
||||
X2 = X
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
target = np.zeros_like(X)
|
||||
[p.dK_dX(dL_dK[s1,s2],X[s1,i_s],X2[s2,i_s],target[s1,i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)]
|
||||
[p.dK_dX(dL_dK[s1, s2], X[s1, i_s], X2[s2, i_s], target[s1, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)]
|
||||
return target
|
||||
|
||||
def Kdiag(self,X,slices=None):
|
||||
assert X.shape[1]==self.D
|
||||
slices = self._process_slices(slices,False)
|
||||
def Kdiag(self, X, slices=None):
|
||||
assert X.shape[1] == self.D
|
||||
slices = self._process_slices(slices, False)
|
||||
target = np.zeros(X.shape[0])
|
||||
[p.Kdiag(X[s,i_s],target=target[s]) for p,i_s,s in zip(self.parts,self.input_slices,slices)]
|
||||
[p.Kdiag(X[s, i_s], target=target[s]) for p, i_s, s in zip(self.parts, self.input_slices, slices)]
|
||||
return target
|
||||
|
||||
def dKdiag_dtheta(self,dL_dKdiag,X,slices=None):
|
||||
assert X.shape[1]==self.D
|
||||
assert len(dL_dKdiag.shape)==1
|
||||
assert dL_dKdiag.size==X.shape[0]
|
||||
slices = self._process_slices(slices,False)
|
||||
def dKdiag_dtheta(self, dL_dKdiag, X, slices=None):
|
||||
assert X.shape[1] == self.D
|
||||
assert len(dL_dKdiag.shape) == 1
|
||||
assert dL_dKdiag.size == X.shape[0]
|
||||
slices = self._process_slices(slices, False)
|
||||
target = np.zeros(self.Nparam)
|
||||
[p.dKdiag_dtheta(dL_dKdiag[s],X[s,i_s],target[ps]) for p,i_s,s,ps in zip(self.parts,self.input_slices,slices,self.param_slices)]
|
||||
[p.dKdiag_dtheta(dL_dKdiag[s], X[s, i_s], target[ps]) for p, i_s, s, ps in zip(self.parts, self.input_slices, slices, self.param_slices)]
|
||||
return self._transform_gradients(target)
|
||||
|
||||
def dKdiag_dX(self, dL_dKdiag, X, slices=None):
|
||||
assert X.shape[1]==self.D
|
||||
slices = self._process_slices(slices,False)
|
||||
assert X.shape[1] == self.D
|
||||
slices = self._process_slices(slices, False)
|
||||
target = np.zeros_like(X)
|
||||
[p.dKdiag_dX(dL_dKdiag[s],X[s,i_s],target[s,i_s]) for p,i_s,s in zip(self.parts,self.input_slices,slices)]
|
||||
[p.dKdiag_dX(dL_dKdiag[s], X[s, i_s], target[s, i_s]) for p, i_s, s in zip(self.parts, self.input_slices, slices)]
|
||||
return target
|
||||
|
||||
def psi0(self,Z,mu,S,slices=None):
|
||||
slices = self._process_slices(slices,False)
|
||||
def psi0(self, Z, mu, S, slices=None):
|
||||
slices = self._process_slices(slices, False)
|
||||
target = np.zeros(mu.shape[0])
|
||||
[p.psi0(Z,mu[s],S[s],target[s]) for p,s in zip(self.parts,slices)]
|
||||
[p.psi0(Z, mu[s], S[s], target[s]) for p, s in zip(self.parts, slices)]
|
||||
return target
|
||||
|
||||
def dpsi0_dtheta(self,dL_dpsi0,Z,mu,S,slices=None):
|
||||
slices = self._process_slices(slices,False)
|
||||
def dpsi0_dtheta(self, dL_dpsi0, Z, mu, S, slices=None):
|
||||
slices = self._process_slices(slices, False)
|
||||
target = np.zeros(self.Nparam)
|
||||
[p.dpsi0_dtheta(dL_dpsi0[s],Z,mu[s],S[s],target[ps]) for p,ps,s in zip(self.parts, self.param_slices,slices)]
|
||||
[p.dpsi0_dtheta(dL_dpsi0[s], Z, mu[s], S[s], target[ps]) for p, ps, s in zip(self.parts, self.param_slices, slices)]
|
||||
return self._transform_gradients(target)
|
||||
|
||||
def dpsi0_dmuS(self,dL_dpsi0,Z,mu,S,slices=None):
|
||||
slices = self._process_slices(slices,False)
|
||||
target_mu,target_S = np.zeros_like(mu),np.zeros_like(S)
|
||||
[p.dpsi0_dmuS(dL_dpsi0,Z,mu[s],S[s],target_mu[s],target_S[s]) for p,s in zip(self.parts,slices)]
|
||||
return target_mu,target_S
|
||||
|
||||
def psi1(self,Z,mu,S,slices1=None,slices2=None):
|
||||
"""Think N,M,Q """
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
target = np.zeros((mu.shape[0],Z.shape[0]))
|
||||
[p.psi1(Z[s2],mu[s1],S[s1],target[s1,s2]) for p,s1,s2 in zip(self.parts,slices1,slices2)]
|
||||
return target
|
||||
|
||||
def dpsi1_dtheta(self,dL_dpsi1,Z,mu,S,slices1=None,slices2=None):
|
||||
"""N,M,(Ntheta)"""
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
target = np.zeros((self.Nparam))
|
||||
[p.dpsi1_dtheta(dL_dpsi1[s2,s1],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[ps]) for p,ps,s1,s2,i_s in zip(self.parts, self.param_slices,slices1,slices2,self.input_slices)]
|
||||
return self._transform_gradients(target)
|
||||
|
||||
def dpsi1_dZ(self,dL_dpsi1,Z,mu,S,slices1=None,slices2=None):
|
||||
"""N,M,Q"""
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
target = np.zeros_like(Z)
|
||||
[p.dpsi1_dZ(dL_dpsi1[s2,s1],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[s2,i_s]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)]
|
||||
return target
|
||||
|
||||
def dpsi1_dmuS(self,dL_dpsi1,Z,mu,S,slices1=None,slices2=None):
|
||||
"""return shapes are N,M,Q"""
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
target_mu, target_S = np.zeros((2,mu.shape[0],mu.shape[1]))
|
||||
[p.dpsi1_dmuS(dL_dpsi1[s2,s1],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target_mu[s1,i_s],target_S[s1,i_s]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)]
|
||||
def dpsi0_dmuS(self, dL_dpsi0, Z, mu, S, slices=None):
|
||||
slices = self._process_slices(slices, False)
|
||||
target_mu, target_S = np.zeros_like(mu), np.zeros_like(S)
|
||||
[p.dpsi0_dmuS(dL_dpsi0, Z, mu[s], S[s], target_mu[s], target_S[s]) for p, s in zip(self.parts, slices)]
|
||||
return target_mu, target_S
|
||||
|
||||
def psi2(self,Z,mu,S,slices1=None,slices2=None):
|
||||
def psi1(self, Z, mu, S, slices1=None, slices2=None):
|
||||
"""Think N,M,Q """
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
target = np.zeros((mu.shape[0], Z.shape[0]))
|
||||
[p.psi1(Z[s2], mu[s1], S[s1], target[s1, s2]) for p, s1, s2 in zip(self.parts, slices1, slices2)]
|
||||
return target
|
||||
|
||||
def dpsi1_dtheta(self, dL_dpsi1, Z, mu, S, slices1=None, slices2=None):
|
||||
"""N,M,(Ntheta)"""
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
target = np.zeros((self.Nparam))
|
||||
[p.dpsi1_dtheta(dL_dpsi1[s2, s1], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[ps]) for p, ps, s1, s2, i_s in zip(self.parts, self.param_slices, slices1, slices2, self.input_slices)]
|
||||
return self._transform_gradients(target)
|
||||
|
||||
def dpsi1_dZ(self, dL_dpsi1, Z, mu, S, slices1=None, slices2=None):
|
||||
"""N,M,Q"""
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
target = np.zeros_like(Z)
|
||||
[p.dpsi1_dZ(dL_dpsi1[s2, s1], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[s2, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)]
|
||||
return target
|
||||
|
||||
def dpsi1_dmuS(self, dL_dpsi1, Z, mu, S, slices1=None, slices2=None):
|
||||
"""return shapes are N,M,Q"""
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
target_mu, target_S = np.zeros((2, mu.shape[0], mu.shape[1]))
|
||||
[p.dpsi1_dmuS(dL_dpsi1[s2, s1], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target_mu[s1, i_s], target_S[s1, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)]
|
||||
return target_mu, target_S
|
||||
|
||||
def psi2(self, Z, mu, S, slices1=None, slices2=None):
|
||||
"""
|
||||
:param Z: np.ndarray of inducing inputs (M x Q)
|
||||
:param mu, S: np.ndarrays of means and variances (each N x Q)
|
||||
:returns psi2: np.ndarray (N,M,M)
|
||||
"""
|
||||
target = np.zeros((mu.shape[0],Z.shape[0],Z.shape[0]))
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
[p.psi2(Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[s1,s2,s2]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)]
|
||||
target = np.zeros((mu.shape[0], Z.shape[0], Z.shape[0]))
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
[p.psi2(Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[s1, s2, s2]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)]
|
||||
|
||||
#compute the "cross" terms
|
||||
for p1, p2 in itertools.combinations(self.parts,2):
|
||||
#white doesn;t combine with anything
|
||||
if p1.name=='white' or p2.name=='white':
|
||||
# compute the "cross" terms
|
||||
for p1, p2 in itertools.combinations(self.parts, 2):
|
||||
# white doesn;t combine with anything
|
||||
if p1.name == 'white' or p2.name == 'white':
|
||||
pass
|
||||
#rbf X bias
|
||||
elif p1.name=='bias' and p2.name=='rbf':
|
||||
target += p1.variance*(p2._psi1[:,:,None]+p2._psi1[:,None,:])
|
||||
elif p2.name=='bias' and p1.name=='rbf':
|
||||
target += p2.variance*(p1._psi1[:,:,None]+p1._psi1[:,None,:])
|
||||
#linear X bias
|
||||
elif p1.name=='bias' and p2.name=='linear':
|
||||
tmp = np.zeros((mu.shape[0],Z.shape[0]))
|
||||
p2.psi1(Z,mu,S,tmp)
|
||||
target += p1.variance*(tmp[:,:,None] + tmp[:,None,:])
|
||||
elif p2.name=='bias' and p1.name=='linear':
|
||||
tmp = np.zeros((mu.shape[0],Z.shape[0]))
|
||||
p1.psi1(Z,mu,S,tmp)
|
||||
target += p2.variance*(tmp[:,:,None] + tmp[:,None,:])
|
||||
#rbf X linear
|
||||
elif p1.name=='linear' and p2.name=='rbf':
|
||||
raise NotImplementedError #TODO
|
||||
elif p2.name=='linear' and p1.name=='rbf':
|
||||
raise NotImplementedError #TODO
|
||||
# rbf X bias
|
||||
elif p1.name == 'bias' and p2.name == 'rbf':
|
||||
target += p1.variance * (p2._psi1[:, :, None] + p2._psi1[:, None, :])
|
||||
elif p2.name == 'bias' and p1.name == 'rbf':
|
||||
target += p2.variance * (p1._psi1[:, :, None] + p1._psi1[:, None, :])
|
||||
# linear X bias
|
||||
elif p1.name == 'bias' and p2.name == 'linear':
|
||||
tmp = np.zeros((mu.shape[0], Z.shape[0]))
|
||||
p2.psi1(Z, mu, S, tmp)
|
||||
target += p1.variance * (tmp[:, :, None] + tmp[:, None, :])
|
||||
elif p2.name == 'bias' and p1.name == 'linear':
|
||||
tmp = np.zeros((mu.shape[0], Z.shape[0]))
|
||||
p1.psi1(Z, mu, S, tmp)
|
||||
target += p2.variance * (tmp[:, :, None] + tmp[:, None, :])
|
||||
# rbf X linear
|
||||
elif p1.name == 'linear' and p2.name == 'rbf':
|
||||
raise NotImplementedError # TODO
|
||||
elif p2.name == 'linear' and p1.name == 'rbf':
|
||||
raise NotImplementedError # TODO
|
||||
else:
|
||||
raise NotImplementedError, "psi2 cannot be computed for this kernel"
|
||||
return target
|
||||
|
||||
def dpsi2_dtheta(self,dL_dpsi2,Z,mu,S,slices1=None,slices2=None):
|
||||
def dpsi2_dtheta(self, dL_dpsi2, Z, mu, S, slices1=None, slices2=None):
|
||||
"""Returns shape (N,M,M,Ntheta)"""
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
target = np.zeros(self.Nparam)
|
||||
[p.dpsi2_dtheta(dL_dpsi2[s1,s2,s2],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[ps]) for p,i_s,s1,s2,ps in zip(self.parts,self.input_slices,slices1,slices2,self.param_slices)]
|
||||
[p.dpsi2_dtheta(dL_dpsi2[s1, s2, s2], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[ps]) for p, i_s, s1, s2, ps in zip(self.parts, self.input_slices, slices1, slices2, self.param_slices)]
|
||||
|
||||
#compute the "cross" terms
|
||||
#TODO: better looping
|
||||
for i1, i2 in itertools.combinations(range(len(self.parts)),2):
|
||||
p1,p2 = self.parts[i1], self.parts[i2]
|
||||
ipsl1, ipsl2 = self.input_slices[i1], self.input_slices[i2]
|
||||
# compute the "cross" terms
|
||||
# TODO: better looping
|
||||
for i1, i2 in itertools.combinations(range(len(self.parts)), 2):
|
||||
p1, p2 = self.parts[i1], self.parts[i2]
|
||||
# ipsl1, ipsl2 = self.input_slices[i1], self.input_slices[i2]
|
||||
ps1, ps2 = self.param_slices[i1], self.param_slices[i2]
|
||||
|
||||
#white doesn;t combine with anything
|
||||
if p1.name=='white' or p2.name=='white':
|
||||
# white doesn;t combine with anything
|
||||
if p1.name == 'white' or p2.name == 'white':
|
||||
pass
|
||||
#rbf X bias
|
||||
elif p1.name=='bias' and p2.name=='rbf':
|
||||
p2.dpsi1_dtheta(dL_dpsi2.sum(1)*p1.variance*2.,Z,mu,S,target[ps2])
|
||||
p1.dpsi1_dtheta(dL_dpsi2.sum(1)*p2._psi1*2.,Z,mu,S,target[ps1])
|
||||
elif p2.name=='bias' and p1.name=='rbf':
|
||||
p1.dpsi1_dtheta(dL_dpsi2.sum(1)*p2.variance*2.,Z,mu,S,target[ps1])
|
||||
p2.dpsi1_dtheta(dL_dpsi2.sum(1)*p1._psi1*2.,Z,mu,S,target[ps2])
|
||||
#linear X bias
|
||||
elif p1.name=='bias' and p2.name=='linear':
|
||||
p2.dpsi1_dtheta(dL_dpsi2.sum(1)*p1.variance*2., Z, mu, S, target[ps1])
|
||||
elif p2.name=='bias' and p1.name=='linear':
|
||||
p1.dpsi1_dtheta(dL_dpsi2.sum(1)*p2.variance*2., Z, mu, S, target[ps1])
|
||||
#rbf X linear
|
||||
elif p1.name=='linear' and p2.name=='rbf':
|
||||
raise NotImplementedError #TODO
|
||||
elif p2.name=='linear' and p1.name=='rbf':
|
||||
raise NotImplementedError #TODO
|
||||
# rbf X bias
|
||||
elif p1.name == 'bias' and p2.name == 'rbf':
|
||||
p2.dpsi1_dtheta(dL_dpsi2.sum(1) * p1.variance * 2., Z, mu, S, target[ps2])
|
||||
p1.dpsi1_dtheta(dL_dpsi2.sum(1) * p2._psi1 * 2., Z, mu, S, target[ps1])
|
||||
elif p2.name == 'bias' and p1.name == 'rbf':
|
||||
p1.dpsi1_dtheta(dL_dpsi2.sum(1) * p2.variance * 2., Z, mu, S, target[ps1])
|
||||
p2.dpsi1_dtheta(dL_dpsi2.sum(1) * p1._psi1 * 2., Z, mu, S, target[ps2])
|
||||
# linear X bias
|
||||
elif p1.name == 'bias' and p2.name == 'linear':
|
||||
p2.dpsi1_dtheta(dL_dpsi2.sum(1) * p1.variance * 2., Z, mu, S, target[ps2]) # [ps1])
|
||||
psi1 = np.zeros((mu.shape[0], Z.shape[0]))
|
||||
p2.psi1(Z, mu, S, psi1)
|
||||
p1.dpsi1_dtheta(dL_dpsi2.sum(1) * psi1 * 2., Z, mu, S, target[ps1])
|
||||
elif p2.name == 'bias' and p1.name == 'linear':
|
||||
p1.dpsi1_dtheta(dL_dpsi2.sum(1) * p2.variance * 2., Z, mu, S, target[ps1])
|
||||
psi1 = np.zeros((mu.shape[0], Z.shape[0]))
|
||||
p1.psi1(Z, mu, S, psi1)
|
||||
p2.dpsi1_dtheta(dL_dpsi2.sum(1) * psi1 * 2., Z, mu, S, target[ps2])
|
||||
# rbf X linear
|
||||
elif p1.name == 'linear' and p2.name == 'rbf':
|
||||
raise NotImplementedError # TODO
|
||||
elif p2.name == 'linear' and p1.name == 'rbf':
|
||||
raise NotImplementedError # TODO
|
||||
else:
|
||||
raise NotImplementedError, "psi2 cannot be computed for this kernel"
|
||||
|
||||
return self._transform_gradients(target)
|
||||
|
||||
def dpsi2_dZ(self,dL_dpsi2,Z,mu,S,slices1=None,slices2=None):
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
def dpsi2_dZ(self, dL_dpsi2, Z, mu, S, slices1=None, slices2=None):
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
target = np.zeros_like(Z)
|
||||
[p.dpsi2_dZ(dL_dpsi2[s1,s2,s2],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target[s2,i_s]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)]
|
||||
[p.dpsi2_dZ(dL_dpsi2[s1, s2, s2], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target[s2, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)]
|
||||
|
||||
#compute the "cross" terms
|
||||
for p1, p2 in itertools.combinations(self.parts,2):
|
||||
#white doesn;t combine with anything
|
||||
if p1.name=='white' or p2.name=='white':
|
||||
# compute the "cross" terms
|
||||
for p1, p2 in itertools.combinations(self.parts, 2):
|
||||
# white doesn;t combine with anything
|
||||
if p1.name == 'white' or p2.name == 'white':
|
||||
pass
|
||||
#rbf X bias
|
||||
elif p1.name=='bias' and p2.name=='rbf':
|
||||
p2.dpsi1_dX(dL_dpsi2.sum(1).T*p1.variance,Z,mu,S,target)
|
||||
elif p2.name=='bias' and p1.name=='rbf':
|
||||
p1.dpsi1_dZ(dL_dpsi2.sum(1).T*p2.variance,Z,mu,S,target)
|
||||
#linear X bias
|
||||
elif p1.name=='bias' and p2.name=='linear':
|
||||
p2.dpsi1_dZ(dL_dpsi2.sum(1).T*p1.variance, Z, mu, S, target)
|
||||
elif p2.name=='bias' and p1.name=='linear':
|
||||
p1.dpsi1_dZ(dL_dpsi2.sum(1).T*p2.variance, Z, mu, S, target)
|
||||
#rbf X linear
|
||||
elif p1.name=='linear' and p2.name=='rbf':
|
||||
raise NotImplementedError #TODO
|
||||
elif p2.name=='linear' and p1.name=='rbf':
|
||||
raise NotImplementedError #TODO
|
||||
# rbf X bias
|
||||
elif p1.name == 'bias' and p2.name == 'rbf':
|
||||
p2.dpsi1_dX(dL_dpsi2.sum(1).T * p1.variance, Z, mu, S, target)
|
||||
elif p2.name == 'bias' and p1.name == 'rbf':
|
||||
p1.dpsi1_dZ(dL_dpsi2.sum(1).T * p2.variance, Z, mu, S, target)
|
||||
# linear X bias
|
||||
elif p1.name == 'bias' and p2.name == 'linear':
|
||||
p2.dpsi1_dZ(dL_dpsi2.sum(1).T * p1.variance, Z, mu, S, target)
|
||||
elif p2.name == 'bias' and p1.name == 'linear':
|
||||
p1.dpsi1_dZ(dL_dpsi2.sum(1).T * p2.variance, Z, mu, S, target)
|
||||
# rbf X linear
|
||||
elif p1.name == 'linear' and p2.name == 'rbf':
|
||||
raise NotImplementedError # TODO
|
||||
elif p2.name == 'linear' and p1.name == 'rbf':
|
||||
raise NotImplementedError # TODO
|
||||
else:
|
||||
raise NotImplementedError, "psi2 cannot be computed for this kernel"
|
||||
|
||||
|
||||
return target
|
||||
return target * 2.
|
||||
|
||||
def dpsi2_dmuS(self,dL_dpsi2,Z,mu,S,slices1=None,slices2=None):
|
||||
def dpsi2_dmuS(self, dL_dpsi2, Z, mu, S, slices1=None, slices2=None):
|
||||
"""return shapes are N,M,M,Q"""
|
||||
slices1, slices2 = self._process_slices(slices1,slices2)
|
||||
target_mu, target_S = np.zeros((2,mu.shape[0],mu.shape[1]))
|
||||
[p.dpsi2_dmuS(dL_dpsi2[s1,s2,s2],Z[s2,i_s],mu[s1,i_s],S[s1,i_s],target_mu[s1,i_s],target_S[s1,i_s]) for p,i_s,s1,s2 in zip(self.parts,self.input_slices,slices1,slices2)]
|
||||
slices1, slices2 = self._process_slices(slices1, slices2)
|
||||
target_mu, target_S = np.zeros((2, mu.shape[0], mu.shape[1]))
|
||||
[p.dpsi2_dmuS(dL_dpsi2[s1, s2, s2], Z[s2, i_s], mu[s1, i_s], S[s1, i_s], target_mu[s1, i_s], target_S[s1, i_s]) for p, i_s, s1, s2 in zip(self.parts, self.input_slices, slices1, slices2)]
|
||||
|
||||
#compute the "cross" terms
|
||||
for p1, p2 in itertools.combinations(self.parts,2):
|
||||
#white doesn;t combine with anything
|
||||
if p1.name=='white' or p2.name=='white':
|
||||
# compute the "cross" terms
|
||||
for p1, p2 in itertools.combinations(self.parts, 2):
|
||||
# white doesn;t combine with anything
|
||||
if p1.name == 'white' or p2.name == 'white':
|
||||
pass
|
||||
#rbf X bias
|
||||
elif p1.name=='bias' and p2.name=='rbf':
|
||||
p2.dpsi1_dmuS(dL_dpsi2.sum(1).T*p1.variance*2.,Z,mu,S,target_mu,target_S)
|
||||
elif p2.name=='bias' and p1.name=='rbf':
|
||||
p1.dpsi1_dmuS(dL_dpsi2.sum(1).T*p2.variance*2.,Z,mu,S,target_mu,target_S)
|
||||
#linear X bias
|
||||
elif p1.name=='bias' and p2.name=='linear':
|
||||
p2.dpsi1_dmuS(dL_dpsi2.sum(1).T*p1.variance*2., Z, mu, S, target_mu, target_S)
|
||||
elif p2.name=='bias' and p1.name=='linear':
|
||||
p1.dpsi1_dmuS(dL_dpsi2.sum(1).T*p2.variance*2., Z, mu, S, target_mu, target_S)
|
||||
#rbf X linear
|
||||
elif p1.name=='linear' and p2.name=='rbf':
|
||||
raise NotImplementedError #TODO
|
||||
elif p2.name=='linear' and p1.name=='rbf':
|
||||
raise NotImplementedError #TODO
|
||||
# rbf X bias
|
||||
elif p1.name == 'bias' and p2.name == 'rbf':
|
||||
p2.dpsi1_dmuS(dL_dpsi2.sum(1).T * p1.variance * 2., Z, mu, S, target_mu, target_S)
|
||||
elif p2.name == 'bias' and p1.name == 'rbf':
|
||||
p1.dpsi1_dmuS(dL_dpsi2.sum(1).T * p2.variance * 2., Z, mu, S, target_mu, target_S)
|
||||
# linear X bias
|
||||
elif p1.name == 'bias' and p2.name == 'linear':
|
||||
p2.dpsi1_dmuS(dL_dpsi2.sum(1).T * p1.variance * 2., Z, mu, S, target_mu, target_S)
|
||||
elif p2.name == 'bias' and p1.name == 'linear':
|
||||
p1.dpsi1_dmuS(dL_dpsi2.sum(1).T * p2.variance * 2., Z, mu, S, target_mu, target_S)
|
||||
# rbf X linear
|
||||
elif p1.name == 'linear' and p2.name == 'rbf':
|
||||
raise NotImplementedError # TODO
|
||||
elif p2.name == 'linear' and p1.name == 'rbf':
|
||||
raise NotImplementedError # TODO
|
||||
else:
|
||||
raise NotImplementedError, "psi2 cannot be computed for this kernel"
|
||||
|
||||
return target_mu, target_S
|
||||
|
||||
def plot(self, x = None, plot_limits=None,which_functions='all',resolution=None,*args,**kwargs):
|
||||
if which_functions=='all':
|
||||
which_functions = [True]*self.Nparts
|
||||
def plot(self, x=None, plot_limits=None, which_functions='all', resolution=None, *args, **kwargs):
|
||||
if which_functions == 'all':
|
||||
which_functions = [True] * self.Nparts
|
||||
if self.D == 1:
|
||||
if x is None:
|
||||
x = np.zeros((1,1))
|
||||
x = np.zeros((1, 1))
|
||||
else:
|
||||
x = np.asarray(x)
|
||||
assert x.size == 1, "The size of the fixed variable x is not 1"
|
||||
x = x.reshape((1,1))
|
||||
x = x.reshape((1, 1))
|
||||
|
||||
if plot_limits == None:
|
||||
xmin, xmax = (x-5).flatten(), (x+5).flatten()
|
||||
xmin, xmax = (x - 5).flatten(), (x + 5).flatten()
|
||||
elif len(plot_limits) == 2:
|
||||
xmin, xmax = plot_limits
|
||||
else:
|
||||
raise ValueError, "Bad limits for plotting"
|
||||
|
||||
Xnew = np.linspace(xmin,xmax,resolution or 201)[:,None]
|
||||
Kx = self.K(Xnew,x,slices2=which_functions)
|
||||
pb.plot(Xnew,Kx,*args,**kwargs)
|
||||
pb.xlim(xmin,xmax)
|
||||
Xnew = np.linspace(xmin, xmax, resolution or 201)[:, None]
|
||||
Kx = self.K(Xnew, x, slices2=which_functions)
|
||||
pb.plot(Xnew, Kx, *args, **kwargs)
|
||||
pb.xlim(xmin, xmax)
|
||||
pb.xlabel("x")
|
||||
pb.ylabel("k(x,%0.1f)" %x)
|
||||
pb.ylabel("k(x,%0.1f)" % x)
|
||||
|
||||
elif self.D == 2:
|
||||
if x is None:
|
||||
x = np.zeros((1,2))
|
||||
x = np.zeros((1, 2))
|
||||
else:
|
||||
x = np.asarray(x)
|
||||
assert x.size == 2, "The size of the fixed variable x is not 2"
|
||||
x = x.reshape((1,2))
|
||||
x = x.reshape((1, 2))
|
||||
|
||||
if plot_limits == None:
|
||||
xmin, xmax = (x-5).flatten(), (x+5).flatten()
|
||||
xmin, xmax = (x - 5).flatten(), (x + 5).flatten()
|
||||
elif len(plot_limits) == 2:
|
||||
xmin, xmax = plot_limits
|
||||
else:
|
||||
raise ValueError, "Bad limits for plotting"
|
||||
|
||||
resolution = resolution or 51
|
||||
xx,yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution]
|
||||
xg = np.linspace(xmin[0],xmax[0],resolution)
|
||||
yg = np.linspace(xmin[1],xmax[1],resolution)
|
||||
Xnew = np.vstack((xx.flatten(),yy.flatten())).T
|
||||
Kx = self.K(Xnew,x,slices2=which_functions)
|
||||
Kx = Kx.reshape(resolution,resolution).T
|
||||
pb.contour(xg,yg,Kx,vmin=Kx.min(),vmax=Kx.max(),cmap=pb.cm.jet,*args,**kwargs)
|
||||
pb.xlim(xmin[0],xmax[0])
|
||||
pb.ylim(xmin[1],xmax[1])
|
||||
xx, yy = np.mgrid[xmin[0]:xmax[0]:1j * resolution, xmin[1]:xmax[1]:1j * resolution]
|
||||
xg = np.linspace(xmin[0], xmax[0], resolution)
|
||||
yg = np.linspace(xmin[1], xmax[1], resolution)
|
||||
Xnew = np.vstack((xx.flatten(), yy.flatten())).T
|
||||
Kx = self.K(Xnew, x, slices2=which_functions)
|
||||
Kx = Kx.reshape(resolution, resolution).T
|
||||
pb.contour(xg, yg, Kx, vmin=Kx.min(), vmax=Kx.max(), cmap=pb.cm.jet, *args, **kwargs)
|
||||
pb.xlim(xmin[0], xmax[0])
|
||||
pb.ylim(xmin[1], xmax[1])
|
||||
pb.xlabel("x1")
|
||||
pb.ylabel("x2")
|
||||
pb.title("k(x1,x2 ; %0.1f,%0.1f)" %(x[0,0],x[0,1]) )
|
||||
pb.title("k(x1,x2 ; %0.1f,%0.1f)" % (x[0, 0], x[0, 1]))
|
||||
else:
|
||||
raise NotImplementedError, "Cannot plot a kernel with more than two input dimensions"
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ class linear(kernpart):
|
|||
|
||||
def psi1(self,Z,mu,S,target):
|
||||
"""the variance, it does nothing"""
|
||||
self.K(mu,Z,target)
|
||||
self._psi1 = self.K(mu, Z, target)
|
||||
|
||||
def dpsi1_dtheta(self,dL_dpsi1,Z,mu,S,target):
|
||||
"""the variance, it does nothing"""
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue