mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-11 04:52:37 +02:00
dim reduc plotting
This commit is contained in:
parent
4b4af86e5b
commit
2a39440619
2 changed files with 42 additions and 42 deletions
|
|
@ -14,20 +14,20 @@ default_seed = np.random.seed(123344)
|
||||||
def BGPLVM(seed=default_seed):
|
def BGPLVM(seed=default_seed):
|
||||||
N = 10
|
N = 10
|
||||||
M = 3
|
M = 3
|
||||||
input_dim = 2
|
Q = 2
|
||||||
D = 4
|
D = 4
|
||||||
# generate GPLVM-like data
|
# generate GPLVM-like data
|
||||||
X = np.random.rand(N, input_dim)
|
X = np.random.rand(N, Q)
|
||||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.rbf(Q) + GPy.kern.white(Q, 0.00001)
|
||||||
K = k.K(X)
|
K = k.K(X)
|
||||||
Y = np.random.multivariate_normal(np.zeros(N), K, D).T
|
Y = np.random.multivariate_normal(np.zeros(N), K, D).T
|
||||||
|
|
||||||
k = GPy.kern.rbf(input_dim, ARD=True) + GPy.kern.linear(input_dim, ARD=True) + GPy.kern.rbf(input_dim, ARD=True) + GPy.kern.white(input_dim)
|
k = GPy.kern.rbf(Q, ARD=True) + GPy.kern.linear(Q, ARD=True) + GPy.kern.rbf(Q, ARD=True) + GPy.kern.white(Q)
|
||||||
# k = GPy.kern.rbf(input_dim) + GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim)
|
# k = GPy.kern.rbf(Q) + GPy.kern.rbf(Q) + GPy.kern.white(Q)
|
||||||
# k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
# k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001)
|
||||||
# k = GPy.kern.rbf(input_dim, ARD = False) + GPy.kern.white(input_dim, 0.00001)
|
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
||||||
|
|
||||||
m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel=k, M=M)
|
m = GPy.models.Bayesian_GPLVM(Y, Q, kernel=k, M=M)
|
||||||
m.constrain_positive('(rbf|bias|noise|white|S)')
|
m.constrain_positive('(rbf|bias|noise|white|S)')
|
||||||
# m.constrain_fixed('S', 1)
|
# m.constrain_fixed('S', 1)
|
||||||
|
|
||||||
|
|
@ -63,7 +63,7 @@ def GPLVM_oil_100(optimize=True):
|
||||||
m.plot_latent(labels=m.data_labels)
|
m.plot_latent(labels=m.data_labels)
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def swiss_roll(optimize=True, N=1000, M=15, input_dim=4, sigma=.2, plot=False):
|
def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False):
|
||||||
from GPy.util.datasets import swiss_roll
|
from GPy.util.datasets import swiss_roll
|
||||||
from GPy.core.transformations import logexp_clipped
|
from GPy.core.transformations import logexp_clipped
|
||||||
|
|
||||||
|
|
@ -79,10 +79,10 @@ def swiss_roll(optimize=True, N=1000, M=15, input_dim=4, sigma=.2, plot=False):
|
||||||
from sklearn.manifold.isomap import Isomap
|
from sklearn.manifold.isomap import Isomap
|
||||||
iso = Isomap().fit(Y)
|
iso = Isomap().fit(Y)
|
||||||
X = iso.embedding_
|
X = iso.embedding_
|
||||||
if input_dim > 2:
|
if Q > 2:
|
||||||
X = np.hstack((X, np.random.randn(N, input_dim - 2)))
|
X = np.hstack((X, np.random.randn(N, Q - 2)))
|
||||||
except ImportError:
|
except ImportError:
|
||||||
X = np.random.randn(N, input_dim)
|
X = np.random.randn(N, Q)
|
||||||
|
|
||||||
if plot:
|
if plot:
|
||||||
from mpl_toolkits import mplot3d
|
from mpl_toolkits import mplot3d
|
||||||
|
|
@ -98,14 +98,14 @@ def swiss_roll(optimize=True, N=1000, M=15, input_dim=4, sigma=.2, plot=False):
|
||||||
|
|
||||||
|
|
||||||
var = .5
|
var = .5
|
||||||
S = (var * np.ones_like(X) + np.clip(np.random.randn(N, input_dim) * var ** 2,
|
S = (var * np.ones_like(X) + np.clip(np.random.randn(N, Q) * var ** 2,
|
||||||
- (1 - var),
|
- (1 - var),
|
||||||
(1 - var))) + .001
|
(1 - var))) + .001
|
||||||
Z = np.random.permutation(X)[:M]
|
Z = np.random.permutation(X)[:M]
|
||||||
|
|
||||||
kernel = GPy.kern.rbf(input_dim, ARD=True) + GPy.kern.bias(input_dim, np.exp(-2)) + GPy.kern.white(input_dim, np.exp(-2))
|
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
|
||||||
|
|
||||||
m = Bayesian_GPLVM(Y, input_dim, X=X, X_variance=S, M=M, Z=Z, kernel=kernel)
|
m = Bayesian_GPLVM(Y, Q, X=X, X_variance=S, M=M, Z=Z, kernel=kernel)
|
||||||
m.data_colors = c
|
m.data_colors = c
|
||||||
m.data_t = t
|
m.data_t = t
|
||||||
|
|
||||||
|
|
@ -118,18 +118,18 @@ def swiss_roll(optimize=True, N=1000, M=15, input_dim=4, sigma=.2, plot=False):
|
||||||
m.optimize('scg', messages=1)
|
m.optimize('scg', messages=1)
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def BGPLVM_oil(optimize=True, N=100, input_dim=5, M=25, max_f_eval=4e3, plot=False, **k):
|
def BGPLVM_oil(optimize=True, N=100, Q=5, M=25, max_f_eval=4e3, plot=False, **k):
|
||||||
np.random.seed(0)
|
np.random.seed(0)
|
||||||
data = GPy.util.datasets.oil()
|
data = GPy.util.datasets.oil()
|
||||||
from GPy.core.transformations import logexp_clipped
|
from GPy.core.transformations import logexp_clipped
|
||||||
|
|
||||||
# create simple GP model
|
# create simple GP model
|
||||||
kernel = GPy.kern.rbf(input_dim, ARD=True) + GPy.kern.bias(input_dim, np.exp(-2)) + GPy.kern.white(input_dim, np.exp(-2))
|
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
|
||||||
Y = data['X'][:N]
|
Y = data['X'][:N]
|
||||||
Yn = Y - Y.mean(0)
|
Yn = Y - Y.mean(0)
|
||||||
Yn /= Yn.std(0)
|
Yn /= Yn.std(0)
|
||||||
|
|
||||||
m = GPy.models.Bayesian_GPLVM(Yn, input_dim, kernel=kernel, M=M, **k)
|
m = GPy.models.Bayesian_GPLVM(Yn, Q, kernel=kernel, M=M, **k)
|
||||||
m.data_labels = data['Y'][:N].argmax(axis=1)
|
m.data_labels = data['Y'][:N].argmax(axis=1)
|
||||||
|
|
||||||
# m.constrain('variance|leng', logexp_clipped())
|
# m.constrain('variance|leng', logexp_clipped())
|
||||||
|
|
@ -168,7 +168,7 @@ def oil_100():
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def _simulate_sincos(D1, D2, D3, N, M, input_dim, plot_sim=False):
|
def _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim=False):
|
||||||
x = np.linspace(0, 4 * np.pi, N)[:, None]
|
x = np.linspace(0, 4 * np.pi, N)[:, None]
|
||||||
s1 = np.vectorize(lambda x: np.sin(x))
|
s1 = np.vectorize(lambda x: np.sin(x))
|
||||||
s2 = np.vectorize(lambda x: np.cos(x))
|
s2 = np.vectorize(lambda x: np.cos(x))
|
||||||
|
|
@ -228,13 +228,13 @@ def bgplvm_simulation_matlab_compare():
|
||||||
Y = sim_data['Y']
|
Y = sim_data['Y']
|
||||||
S = sim_data['S']
|
S = sim_data['S']
|
||||||
mu = sim_data['mu']
|
mu = sim_data['mu']
|
||||||
M, [_, input_dim] = 3, mu.shape
|
M, [_, Q] = 3, mu.shape
|
||||||
|
|
||||||
from GPy.models import mrd
|
from GPy.models import mrd
|
||||||
from GPy import kern
|
from GPy import kern
|
||||||
reload(mrd); reload(kern)
|
reload(mrd); reload(kern)
|
||||||
k = kern.linear(input_dim, ARD=True) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2))
|
k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||||
m = Bayesian_GPLVM(Y, input_dim, init="PCA", M=M, kernel=k,
|
m = Bayesian_GPLVM(Y, Q, init="PCA", M=M, kernel=k,
|
||||||
# X=mu,
|
# X=mu,
|
||||||
# X_variance=S,
|
# X_variance=S,
|
||||||
_debug=False)
|
_debug=False)
|
||||||
|
|
@ -248,8 +248,8 @@ def bgplvm_simulation(optimize='scg',
|
||||||
plot=True,
|
plot=True,
|
||||||
max_f_eval=2e4):
|
max_f_eval=2e4):
|
||||||
# from GPy.core.transformations import logexp_clipped
|
# from GPy.core.transformations import logexp_clipped
|
||||||
D1, D2, D3, N, M, input_dim = 15, 8, 8, 100, 3, 5
|
D1, D2, D3, N, M, Q = 15, 8, 8, 100, 3, 5
|
||||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, input_dim, plot)
|
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot)
|
||||||
|
|
||||||
from GPy.models import mrd
|
from GPy.models import mrd
|
||||||
from GPy import kern
|
from GPy import kern
|
||||||
|
|
@ -258,8 +258,8 @@ def bgplvm_simulation(optimize='scg',
|
||||||
|
|
||||||
Y = Ylist[0]
|
Y = Ylist[0]
|
||||||
|
|
||||||
k = kern.linear(input_dim, ARD=True) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2)) # + kern.bias(input_dim)
|
k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q)
|
||||||
m = Bayesian_GPLVM(Y, input_dim, init="PCA", M=M, kernel=k, _debug=True)
|
m = Bayesian_GPLVM(Y, Q, init="PCA", M=M, kernel=k, _debug=True)
|
||||||
# m.constrain('variance|noise', logexp_clipped())
|
# m.constrain('variance|noise', logexp_clipped())
|
||||||
m.ensure_default_constraints()
|
m.ensure_default_constraints()
|
||||||
m['noise'] = Y.var() / 100.
|
m['noise'] = Y.var() / 100.
|
||||||
|
|
@ -276,16 +276,16 @@ def bgplvm_simulation(optimize='scg',
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw):
|
def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw):
|
||||||
D1, D2, D3, N, M, input_dim = 150, 200, 400, 500, 3, 7
|
D1, D2, D3, N, M, Q = 150, 200, 400, 500, 3, 7
|
||||||
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, input_dim, plot_sim)
|
slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim)
|
||||||
|
|
||||||
from GPy.models import mrd
|
from GPy.models import mrd
|
||||||
from GPy import kern
|
from GPy import kern
|
||||||
|
|
||||||
reload(mrd); reload(kern)
|
reload(mrd); reload(kern)
|
||||||
|
|
||||||
k = kern.linear(input_dim, [.05] * input_dim, ARD=True) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2))
|
k = kern.linear(Q, [.05] * Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||||
m = mrd.MRD(Ylist, input_dim=input_dim, M=M, kernels=k, initx="", initz='permute', **kw)
|
m = mrd.MRD(Ylist, Q=Q, M=M, kernels=k, initx="", initz='permute', **kw)
|
||||||
|
|
||||||
for i, Y in enumerate(Ylist):
|
for i, Y in enumerate(Ylist):
|
||||||
m['{}_noise'.format(i + 1)] = Y.var() / 100.
|
m['{}_noise'.format(i + 1)] = Y.var() / 100.
|
||||||
|
|
@ -299,21 +299,21 @@ def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw):
|
||||||
print "Optimizing Model:"
|
print "Optimizing Model:"
|
||||||
m.optimize('scg', messages=1, max_iters=5e4, max_f_eval=5e4)
|
m.optimize('scg', messages=1, max_iters=5e4, max_f_eval=5e4)
|
||||||
if plot:
|
if plot:
|
||||||
m.plot_X_1d()
|
m.plot_X_1d("MRD Latent Space 1D")
|
||||||
m.plot_scales()
|
m.plot_scales("MRD Scales")
|
||||||
return m
|
return m
|
||||||
|
|
||||||
def brendan_faces():
|
def brendan_faces():
|
||||||
from GPy import kern
|
from GPy import kern
|
||||||
data = GPy.util.datasets.brendan_faces()
|
data = GPy.util.datasets.brendan_faces()
|
||||||
input_dim = 2
|
Q = 2
|
||||||
Y = data['Y'][0:-1:10, :]
|
Y = data['Y'][0:-1:10, :]
|
||||||
# Y = data['Y']
|
# Y = data['Y']
|
||||||
Yn = Y - Y.mean()
|
Yn = Y - Y.mean()
|
||||||
Yn /= Yn.std()
|
Yn /= Yn.std()
|
||||||
|
|
||||||
m = GPy.models.GPLVM(Yn, input_dim)
|
m = GPy.models.GPLVM(Yn, Q)
|
||||||
# m = GPy.models.Bayesian_GPLVM(Yn, input_dim, M=100)
|
# m = GPy.models.Bayesian_GPLVM(Yn, Q, M=100)
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.constrain('rbf|noise|white', GPy.core.transformations.logexp_clipped())
|
m.constrain('rbf|noise|white', GPy.core.transformations.logexp_clipped())
|
||||||
|
|
@ -376,11 +376,11 @@ def cmu_mocap(subject='35', motion=['01'], in_place=True):
|
||||||
# X -= X.mean(axis=0)
|
# X -= X.mean(axis=0)
|
||||||
# X /= X.std(axis=0)
|
# X /= X.std(axis=0)
|
||||||
#
|
#
|
||||||
# input_dim = 10
|
# Q = 10
|
||||||
# M = 30
|
# M = 30
|
||||||
#
|
#
|
||||||
# kernel = GPy.kern.rbf(input_dim, ARD=True) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim)
|
# kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q) + GPy.kern.white(Q)
|
||||||
# m = GPy.models.Bayesian_GPLVM(X, input_dim, kernel=kernel, M=M)
|
# m = GPy.models.Bayesian_GPLVM(X, Q, kernel=kernel, M=M)
|
||||||
# # m.scale_factor = 100.0
|
# # m.scale_factor = 100.0
|
||||||
# m.constrain_positive('(white|noise|bias|X_variance|rbf_variance|rbf_length)')
|
# m.constrain_positive('(white|noise|bias|X_variance|rbf_variance|rbf_length)')
|
||||||
# from sklearn import cluster
|
# from sklearn import cluster
|
||||||
|
|
|
||||||
|
|
@ -277,19 +277,19 @@ class MRD(model):
|
||||||
def plot_X_1d(self):
|
def plot_X_1d(self):
|
||||||
return self.gref.plot_X_1d()
|
return self.gref.plot_X_1d()
|
||||||
|
|
||||||
def plot_X(self, fignum="MRD Predictions", ax=None):
|
def plot_X(self, fignum=None, ax=None):
|
||||||
fig = self._handle_plotting(fignum, ax, lambda i, g, ax: ax.imshow(g.X))
|
fig = self._handle_plotting(fignum, ax, lambda i, g, ax: ax.imshow(g.X))
|
||||||
return fig
|
return fig
|
||||||
|
|
||||||
def plot_predict(self, fignum="MRD Predictions", ax=None, **kwargs):
|
def plot_predict(self, fignum=None, ax=None, **kwargs):
|
||||||
fig = self._handle_plotting(fignum, ax, lambda i, g, ax: ax.imshow(g. predict(g.X)[0], **kwargs))
|
fig = self._handle_plotting(fignum, ax, lambda i, g, ax: ax.imshow(g. predict(g.X)[0], **kwargs))
|
||||||
return fig
|
return fig
|
||||||
|
|
||||||
def plot_scales(self, fignum="MRD Scales", ax=None, *args, **kwargs):
|
def plot_scales(self, fignum=None, ax=None, *args, **kwargs):
|
||||||
fig = self._handle_plotting(fignum, ax, lambda i, g, ax: g.kern.plot_ARD(ax=ax, *args, **kwargs))
|
fig = self._handle_plotting(fignum, ax, lambda i, g, ax: g.kern.plot_ARD(ax=ax, *args, **kwargs))
|
||||||
return fig
|
return fig
|
||||||
|
|
||||||
def plot_latent(self, fignum="MRD Latent Spaces", ax=None, *args, **kwargs):
|
def plot_latent(self, fignum=None, ax=None, *args, **kwargs):
|
||||||
fig = self._handle_plotting(fignum, ax, lambda i, g, ax: g.plot_latent(ax=ax, *args, **kwargs))
|
fig = self._handle_plotting(fignum, ax, lambda i, g, ax: g.plot_latent(ax=ax, *args, **kwargs))
|
||||||
return fig
|
return fig
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue