mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-14 22:42:37 +02:00
last changes for mrd stability
This commit is contained in:
parent
f39afb9ea5
commit
18117bba81
4 changed files with 8 additions and 7 deletions
|
|
@ -312,7 +312,7 @@ def mrd_simulation(plot_sim=False):
|
||||||
# Y2 = np.random.multivariate_normal(np.zeros(N), k.K(X), D2).T
|
# Y2 = np.random.multivariate_normal(np.zeros(N), k.K(X), D2).T
|
||||||
# Y2 -= Y2.mean(0)
|
# Y2 -= Y2.mean(0)
|
||||||
# make_params = lambda ard: np.hstack([[1], ard, [1, .3]])
|
# make_params = lambda ard: np.hstack([[1], ard, [1, .3]])
|
||||||
D1, D2, D3, N, M, Q = 150, 250, 300, 700, 10, 7
|
D1, D2, D3, N, M, Q = 150, 250, 300, 700, 3, 7
|
||||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim)
|
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim)
|
||||||
|
|
||||||
from GPy.models import mrd
|
from GPy.models import mrd
|
||||||
|
|
@ -328,7 +328,7 @@ def mrd_simulation(plot_sim=False):
|
||||||
|
|
||||||
# k = kern.rbf(Q, ARD=True) + kern.bias(Q) + kern.white(Q)
|
# k = kern.rbf(Q, ARD=True) + kern.bias(Q) + kern.white(Q)
|
||||||
|
|
||||||
k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
k = kern.linear(Q, [0.01] * Q, True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||||
m = mrd.MRD(*Ylist, Q=Q, M=M, kernel=k, initx="concat", initz='permute', _debug=False)
|
m = mrd.MRD(*Ylist, Q=Q, M=M, kernel=k, initx="concat", initz='permute', _debug=False)
|
||||||
|
|
||||||
for i, Y in enumerate(Ylist):
|
for i, Y in enumerate(Ylist):
|
||||||
|
|
@ -347,7 +347,7 @@ def mrd_simulation(plot_sim=False):
|
||||||
print "initializing beta"
|
print "initializing beta"
|
||||||
cstr = "noise"
|
cstr = "noise"
|
||||||
m.unconstrain(cstr); m.constrain_fixed(cstr)
|
m.unconstrain(cstr); m.constrain_fixed(cstr)
|
||||||
m.optimize('scg', messages=1, max_f_eval=200, gtol=11)
|
m.optimize('scg', messages=1, max_f_eval=2e3, gtol=100)
|
||||||
|
|
||||||
print "releasing beta"
|
print "releasing beta"
|
||||||
cstr = "noise"
|
cstr = "noise"
|
||||||
|
|
|
||||||
|
|
@ -54,7 +54,7 @@ def SCG(f, gradf, x, optargs=(), maxiters=500, max_f_eval=500, display=True, xto
|
||||||
success = True # Force calculation of directional derivs.
|
success = True # Force calculation of directional derivs.
|
||||||
nsuccess = 0 # nsuccess counts number of successes.
|
nsuccess = 0 # nsuccess counts number of successes.
|
||||||
beta = 1.0 # Initial scale parameter.
|
beta = 1.0 # Initial scale parameter.
|
||||||
betamin = 1.0e-25 # Lower bound on scale.
|
betamin = 1.0e-15 # Lower bound on scale.
|
||||||
betamax = 1.0e100 # Upper bound on scale.
|
betamax = 1.0e100 # Upper bound on scale.
|
||||||
status = "Not converged"
|
status = "Not converged"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,7 @@ class likelihood:
|
||||||
self.is_heteroscedastic : enables significant computational savings in GP
|
self.is_heteroscedastic : enables significant computational savings in GP
|
||||||
self.precision : a scalar or vector representation of the effective target precision
|
self.precision : a scalar or vector representation of the effective target precision
|
||||||
self.YYT : (optional) = np.dot(self.Y, self.Y.T) enables computational savings for D>N
|
self.YYT : (optional) = np.dot(self.Y, self.Y.T) enables computational savings for D>N
|
||||||
|
self.V : self.precision * self.Y
|
||||||
"""
|
"""
|
||||||
def __init__(self,data):
|
def __init__(self,data):
|
||||||
raise ValueError, "this class is not to be instantiated"
|
raise ValueError, "this class is not to be instantiated"
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,7 @@ class sparse_GP(GP):
|
||||||
# psi2_beta_scaled = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.N, 1, 1) / sf2)).sum(0)
|
# psi2_beta_scaled = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.N, 1, 1) / sf2)).sum(0)
|
||||||
psi2_beta_scaled = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.N, 1, 1))).sum(0)
|
psi2_beta_scaled = (self.psi2 * (self.likelihood.precision.flatten().reshape(self.N, 1, 1))).sum(0)
|
||||||
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
||||||
clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable
|
clipped_evals = np.clip(evals, 0., 1e15) # TODO: make clipping configurable
|
||||||
if not np.allclose(evals, clipped_evals):
|
if not np.allclose(evals, clipped_evals):
|
||||||
print "Warning: clipping posterior eigenvalues"
|
print "Warning: clipping posterior eigenvalues"
|
||||||
tmp = evecs * np.sqrt(clipped_evals)
|
tmp = evecs * np.sqrt(clipped_evals)
|
||||||
|
|
@ -97,7 +97,7 @@ class sparse_GP(GP):
|
||||||
# psi2_beta_scaled = (self.psi2 * (self.likelihood.precision / sf2)).sum(0)
|
# psi2_beta_scaled = (self.psi2 * (self.likelihood.precision / sf2)).sum(0)
|
||||||
psi2_beta_scaled = (self.psi2 * (self.likelihood.precision)).sum(0)
|
psi2_beta_scaled = (self.psi2 * (self.likelihood.precision)).sum(0)
|
||||||
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
evals, evecs = linalg.eigh(psi2_beta_scaled)
|
||||||
clipped_evals = np.clip(evals, 0., 1e6) # TODO: make clipping configurable
|
clipped_evals = np.clip(evals, 0., 1e15) # TODO: make clipping configurable
|
||||||
if not np.allclose(evals, clipped_evals):
|
if not np.allclose(evals, clipped_evals):
|
||||||
print "Warning: clipping posterior eigenvalues"
|
print "Warning: clipping posterior eigenvalues"
|
||||||
tmp = evecs * np.sqrt(clipped_evals)
|
tmp = evecs * np.sqrt(clipped_evals)
|
||||||
|
|
@ -259,7 +259,7 @@ class sparse_GP(GP):
|
||||||
Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.M) - Bi)
|
Kmmi_LmiBLmi = backsub_both_sides(self.Lm, np.eye(self.M) - Bi)
|
||||||
|
|
||||||
Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts)
|
Kx = self.kern.K(self.Z, Xnew, which_parts=which_parts)
|
||||||
mu = np.dot(Kx.T, self.Cpsi1V / self.scale_factor)
|
mu = np.dot(Kx.T, self.Cpsi1V) # / self.scale_factor)
|
||||||
if full_cov:
|
if full_cov:
|
||||||
Kxx = self.kern.K(Xnew, which_parts=which_parts)
|
Kxx = self.kern.K(Xnew, which_parts=which_parts)
|
||||||
var = Kxx - mdot(Kx.T, Kmmi_LmiBLmi, Kx) # NOTE this won't work for plotting
|
var = Kxx - mdot(Kx.T, Kmmi_LmiBLmi, Kx) # NOTE this won't work for plotting
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue