mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-15 06:52:39 +02:00
ensure_default_constraints is on by default
This commit is contained in:
parent
4cc26902e4
commit
8fd8288fb8
22 changed files with 16 additions and 71 deletions
|
|
@ -25,7 +25,6 @@ def crescent_data(seed=default_seed): # FIXME
|
||||||
Y[Y.flatten()==-1] = 0
|
Y[Y.flatten()==-1] = 0
|
||||||
|
|
||||||
m = GPy.models.GPClassification(data['X'], Y)
|
m = GPy.models.GPClassification(data['X'], Y)
|
||||||
m.ensure_default_constraints()
|
|
||||||
#m.update_likelihood_approximation()
|
#m.update_likelihood_approximation()
|
||||||
#m.optimize()
|
#m.optimize()
|
||||||
m.pseudo_EM()
|
m.pseudo_EM()
|
||||||
|
|
@ -75,7 +74,6 @@ def toy_linear_1d_classification(seed=default_seed):
|
||||||
|
|
||||||
# Model definition
|
# Model definition
|
||||||
m = GPy.models.GPClassification(data['X'], Y)
|
m = GPy.models.GPClassification(data['X'], Y)
|
||||||
m.ensure_default_constraints()
|
|
||||||
|
|
||||||
# Optimize
|
# Optimize
|
||||||
#m.update_likelihood_approximation()
|
#m.update_likelihood_approximation()
|
||||||
|
|
@ -106,7 +104,6 @@ def sparse_toy_linear_1d_classification(num_inducing=10,seed=default_seed):
|
||||||
m = GPy.models.SparseGPClassification(data['X'], Y,num_inducing=num_inducing)
|
m = GPy.models.SparseGPClassification(data['X'], Y,num_inducing=num_inducing)
|
||||||
m['.*len']= 4.
|
m['.*len']= 4.
|
||||||
|
|
||||||
m.ensure_default_constraints()
|
|
||||||
# Optimize
|
# Optimize
|
||||||
#m.update_likelihood_approximation()
|
#m.update_likelihood_approximation()
|
||||||
# Parameters optimization:
|
# Parameters optimization:
|
||||||
|
|
@ -137,7 +134,6 @@ def sparse_crescent_data(num_inducing=10, seed=default_seed):
|
||||||
Y[Y.flatten()==-1]=0
|
Y[Y.flatten()==-1]=0
|
||||||
|
|
||||||
m = GPy.models.SparseGPClassification(data['X'], Y,num_inducing=num_inducing)
|
m = GPy.models.SparseGPClassification(data['X'], Y,num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m['.*len'] = 10.
|
m['.*len'] = 10.
|
||||||
#m.update_likelihood_approximation()
|
#m.update_likelihood_approximation()
|
||||||
#m.optimize()
|
#m.optimize()
|
||||||
|
|
@ -163,7 +159,6 @@ def FITC_crescent_data(num_inducing=10, seed=default_seed):
|
||||||
|
|
||||||
m = GPy.models.FITCClassification(data['X'], Y,num_inducing=num_inducing)
|
m = GPy.models.FITCClassification(data['X'], Y,num_inducing=num_inducing)
|
||||||
m.constrain_bounded('.*len',1.,1e3)
|
m.constrain_bounded('.*len',1.,1e3)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m['.*len'] = 3.
|
m['.*len'] = 3.
|
||||||
#m.update_likelihood_approximation()
|
#m.update_likelihood_approximation()
|
||||||
#m.optimize()
|
#m.optimize()
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,6 @@ def BGPLVM(seed=default_seed):
|
||||||
# m.optimize(messages = 1)
|
# m.optimize(messages = 1)
|
||||||
# m.plot()
|
# m.plot()
|
||||||
# pb.title('After optimisation')
|
# pb.title('After optimisation')
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
m.checkgrad(verbose=1)
|
m.checkgrad(verbose=1)
|
||||||
|
|
||||||
|
|
@ -53,7 +52,6 @@ def GPLVM_oil_100(optimize=True):
|
||||||
m.data_labels = data['Y'].argmax(axis=1)
|
m.data_labels = data['Y'].argmax(axis=1)
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
|
||||||
if optimize:
|
if optimize:
|
||||||
m.optimize('scg', messages=1)
|
m.optimize('scg', messages=1)
|
||||||
|
|
||||||
|
|
@ -108,7 +106,6 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False
|
||||||
m.data_colors = c
|
m.data_colors = c
|
||||||
m.data_t = t
|
m.data_t = t
|
||||||
|
|
||||||
m.ensure_default_constraints()
|
|
||||||
m['rbf_lengthscale'] = 1. # X.var(0).max() / X.var(0)
|
m['rbf_lengthscale'] = 1. # X.var(0).max() / X.var(0)
|
||||||
m['noise_variance'] = Y.var() / 100.
|
m['noise_variance'] = Y.var() / 100.
|
||||||
m['bias_variance'] = 0.05
|
m['bias_variance'] = 0.05
|
||||||
|
|
@ -134,7 +131,6 @@ def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_f_eval=50, plot=
|
||||||
m['.*lengt'] = 1. # m.X.var(0).max() / m.X.var(0)
|
m['.*lengt'] = 1. # m.X.var(0).max() / m.X.var(0)
|
||||||
m['noise'] = Yn.var() / 100.
|
m['noise'] = Yn.var() / 100.
|
||||||
|
|
||||||
m.ensure_default_constraints()
|
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
if optimize:
|
if optimize:
|
||||||
|
|
@ -159,7 +155,6 @@ def oil_100():
|
||||||
m = GPy.models.GPLVM(data['X'], 2)
|
m = GPy.models.GPLVM(data['X'], 2)
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize(messages=1, max_iters=2)
|
m.optimize(messages=1, max_iters=2)
|
||||||
|
|
||||||
# plot
|
# plot
|
||||||
|
|
@ -239,7 +234,6 @@ def bgplvm_simulation_matlab_compare():
|
||||||
# X=mu,
|
# X=mu,
|
||||||
# X_variance=S,
|
# X_variance=S,
|
||||||
_debug=False)
|
_debug=False)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.auto_scale_factor = True
|
m.auto_scale_factor = True
|
||||||
m['noise'] = Y.var() / 100.
|
m['noise'] = Y.var() / 100.
|
||||||
m['linear_variance'] = .01
|
m['linear_variance'] = .01
|
||||||
|
|
@ -262,7 +256,6 @@ def bgplvm_simulation(optimize='scg',
|
||||||
k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q)
|
k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q)
|
||||||
m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k, _debug=True)
|
m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k, _debug=True)
|
||||||
# m.constrain('variance|noise', logexp_clipped())
|
# m.constrain('variance|noise', logexp_clipped())
|
||||||
m.ensure_default_constraints()
|
|
||||||
m['noise'] = Y.var() / 100.
|
m['noise'] = Y.var() / 100.
|
||||||
m['linear_variance'] = .01
|
m['linear_variance'] = .01
|
||||||
|
|
||||||
|
|
@ -291,7 +284,6 @@ def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw):
|
||||||
for i, Y in enumerate(Ylist):
|
for i, Y in enumerate(Ylist):
|
||||||
m['{}_noise'.format(i + 1)] = Y.var() / 100.
|
m['{}_noise'.format(i + 1)] = Y.var() / 100.
|
||||||
|
|
||||||
m.ensure_default_constraints()
|
|
||||||
|
|
||||||
# DEBUG
|
# DEBUG
|
||||||
# np.seterr("raise")
|
# np.seterr("raise")
|
||||||
|
|
@ -319,7 +311,6 @@ def brendan_faces():
|
||||||
# optimize
|
# optimize
|
||||||
m.constrain('rbf|noise|white', GPy.core.transformations.logexp_clipped())
|
m.constrain('rbf|noise|white', GPy.core.transformations.logexp_clipped())
|
||||||
|
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize('scg', messages=1, max_f_eval=10000)
|
m.optimize('scg', messages=1, max_f_eval=10000)
|
||||||
|
|
||||||
ax = m.plot_latent(which_indices=(0, 1))
|
ax = m.plot_latent(which_indices=(0, 1))
|
||||||
|
|
@ -336,7 +327,6 @@ def stick():
|
||||||
m = GPy.models.GPLVM(data['Y'], 2)
|
m = GPy.models.GPLVM(data['Y'], 2)
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize(messages=1, max_f_eval=10000)
|
m.optimize(messages=1, max_f_eval=10000)
|
||||||
m._set_params(m._get_params())
|
m._set_params(m._get_params())
|
||||||
|
|
||||||
|
|
@ -359,7 +349,6 @@ def cmu_mocap(subject='35', motion=['01'], in_place=True):
|
||||||
m = GPy.models.GPLVM(data['Y'], 2, normalize_Y=True)
|
m = GPy.models.GPLVM(data['Y'], 2, normalize_Y=True)
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize(messages=1, max_f_eval=10000)
|
m.optimize(messages=1, max_f_eval=10000)
|
||||||
|
|
||||||
ax = m.plot_latent()
|
ax = m.plot_latent()
|
||||||
|
|
@ -391,7 +380,6 @@ def cmu_mocap(subject='35', motion=['01'], in_place=True):
|
||||||
# m.set('iip', Z)
|
# m.set('iip', Z)
|
||||||
# m.set('bias', 1e-4)
|
# m.set('bias', 1e-4)
|
||||||
# # optimize
|
# # optimize
|
||||||
# # m.ensure_default_constraints()
|
|
||||||
#
|
#
|
||||||
# import pdb; pdb.set_trace()
|
# import pdb; pdb.set_trace()
|
||||||
# m.optimize('tnc', messages=1)
|
# m.optimize('tnc', messages=1)
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,6 @@ def toy_rbf_1d(optimizer='tnc', max_nb_eval_optim=100):
|
||||||
m = GPy.models.GPRegression(data['X'],data['Y'])
|
m = GPy.models.GPRegression(data['X'],data['Y'])
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize(optimizer, max_f_eval=max_nb_eval_optim)
|
m.optimize(optimizer, max_f_eval=max_nb_eval_optim)
|
||||||
# plot
|
# plot
|
||||||
m.plot()
|
m.plot()
|
||||||
|
|
@ -36,7 +35,6 @@ def rogers_girolami_olympics(optim_iters=100):
|
||||||
m['rbf_lengthscale'] = 10
|
m['rbf_lengthscale'] = 10
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize(max_f_eval=optim_iters)
|
m.optimize(max_f_eval=optim_iters)
|
||||||
|
|
||||||
# plot
|
# plot
|
||||||
|
|
@ -52,7 +50,6 @@ def toy_rbf_1d_50(optim_iters=100):
|
||||||
m = GPy.models.GPRegression(data['X'],data['Y'])
|
m = GPy.models.GPRegression(data['X'],data['Y'])
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize(max_f_eval=optim_iters)
|
m.optimize(max_f_eval=optim_iters)
|
||||||
|
|
||||||
# plot
|
# plot
|
||||||
|
|
@ -68,7 +65,6 @@ def silhouette(optim_iters=100):
|
||||||
m = GPy.models.GPRegression(data['X'],data['Y'])
|
m = GPy.models.GPRegression(data['X'],data['Y'])
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize(messages=True,max_f_eval=optim_iters)
|
m.optimize(messages=True,max_f_eval=optim_iters)
|
||||||
|
|
||||||
print(m)
|
print(m)
|
||||||
|
|
@ -92,7 +88,6 @@ def coregionalisation_toy2(optim_iters=100):
|
||||||
m = GPy.models.GPRegression(X,Y,kernel=k)
|
m = GPy.models.GPRegression(X,Y,kernel=k)
|
||||||
m.constrain_fixed('.*rbf_var',1.)
|
m.constrain_fixed('.*rbf_var',1.)
|
||||||
#m.constrain_positive('.*kappa')
|
#m.constrain_positive('.*kappa')
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize('sim',messages=1,max_f_eval=optim_iters)
|
m.optimize('sim',messages=1,max_f_eval=optim_iters)
|
||||||
|
|
||||||
pb.figure()
|
pb.figure()
|
||||||
|
|
@ -124,7 +119,6 @@ def coregionalisation_toy(optim_iters=100):
|
||||||
m = GPy.models.GPRegression(X,Y,kernel=k)
|
m = GPy.models.GPRegression(X,Y,kernel=k)
|
||||||
m.constrain_fixed('.*rbf_var',1.)
|
m.constrain_fixed('.*rbf_var',1.)
|
||||||
#m.constrain_positive('kappa')
|
#m.constrain_positive('kappa')
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize(max_f_eval=optim_iters)
|
m.optimize(max_f_eval=optim_iters)
|
||||||
|
|
||||||
pb.figure()
|
pb.figure()
|
||||||
|
|
@ -162,7 +156,6 @@ def coregionalisation_sparse(optim_iters=100):
|
||||||
m.constrain_fixed('.*rbf_var',1.)
|
m.constrain_fixed('.*rbf_var',1.)
|
||||||
m.constrain_fixed('iip')
|
m.constrain_fixed('iip')
|
||||||
m.constrain_bounded('noise_variance',1e-3,1e-1)
|
m.constrain_bounded('noise_variance',1e-3,1e-1)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
|
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
|
||||||
|
|
||||||
#plotting:
|
#plotting:
|
||||||
|
|
@ -189,11 +182,9 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000
|
||||||
log_SNRs = np.linspace(-3., 4., resolution)
|
log_SNRs = np.linspace(-3., 4., resolution)
|
||||||
|
|
||||||
data = GPy.util.datasets.della_gatta_TRP63_gene_expression(gene_number)
|
data = GPy.util.datasets.della_gatta_TRP63_gene_expression(gene_number)
|
||||||
# Sub sample the data to ensure multiple optima
|
|
||||||
#data['Y'] = data['Y'][0::2, :]
|
#data['Y'] = data['Y'][0::2, :]
|
||||||
#data['X'] = data['X'][0::2, :]
|
#data['X'] = data['X'][0::2, :]
|
||||||
|
|
||||||
# Remove the mean (no bias kernel to ensure signal/noise is in RBF/white)
|
|
||||||
data['Y'] = data['Y'] - np.mean(data['Y'])
|
data['Y'] = data['Y'] - np.mean(data['Y'])
|
||||||
|
|
||||||
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.rbf)
|
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.rbf)
|
||||||
|
|
@ -220,7 +211,6 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000
|
||||||
optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
|
optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
|
||||||
|
|
||||||
# optimize
|
# optimize
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_f_eval=optim_iters)
|
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_f_eval=optim_iters)
|
||||||
|
|
||||||
optim_point_x[1] = m['rbf_lengthscale']
|
optim_point_x[1] = m['rbf_lengthscale']
|
||||||
|
|
@ -273,7 +263,6 @@ def sparse_GP_regression_1D(N = 400, num_inducing = 5, optim_iters=100):
|
||||||
# create simple GP Model
|
# create simple GP Model
|
||||||
m = GPy.models.SparseGPRegression(X, Y, kernel, num_inducing=num_inducing)
|
m = GPy.models.SparseGPRegression(X, Y, kernel, num_inducing=num_inducing)
|
||||||
|
|
||||||
m.ensure_default_constraints()
|
|
||||||
|
|
||||||
m.checkgrad(verbose=1)
|
m.checkgrad(verbose=1)
|
||||||
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
|
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
|
||||||
|
|
@ -294,7 +283,6 @@ def sparse_GP_regression_2D(N = 400, num_inducing = 50, optim_iters=100):
|
||||||
m = GPy.models.SparseGPRegression(X,Y,kernel, num_inducing = num_inducing)
|
m = GPy.models.SparseGPRegression(X,Y,kernel, num_inducing = num_inducing)
|
||||||
|
|
||||||
# contrain all parameters to be positive (but not inducing inputs)
|
# contrain all parameters to be positive (but not inducing inputs)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.set('.*len',2.)
|
m.set('.*len',2.)
|
||||||
|
|
||||||
m.checkgrad()
|
m.checkgrad()
|
||||||
|
|
@ -320,7 +308,6 @@ def uncertain_inputs_sparse_regression(optim_iters=100):
|
||||||
|
|
||||||
# create simple GP Model - no input uncertainty on this one
|
# create simple GP Model - no input uncertainty on this one
|
||||||
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
|
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize('scg', messages=1, max_f_eval=optim_iters)
|
m.optimize('scg', messages=1, max_f_eval=optim_iters)
|
||||||
m.plot(ax=axes[0])
|
m.plot(ax=axes[0])
|
||||||
axes[0].set_title('no input uncertainty')
|
axes[0].set_title('no input uncertainty')
|
||||||
|
|
@ -328,7 +315,6 @@ def uncertain_inputs_sparse_regression(optim_iters=100):
|
||||||
|
|
||||||
#the same Model with uncertainty
|
#the same Model with uncertainty
|
||||||
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S)
|
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.optimize('scg', messages=1, max_f_eval=optim_iters)
|
m.optimize('scg', messages=1, max_f_eval=optim_iters)
|
||||||
m.plot(ax=axes[1])
|
m.plot(ax=axes[1])
|
||||||
axes[1].set_title('with input uncertainty')
|
axes[1].set_title('with input uncertainty')
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,6 @@ def toy_1d():
|
||||||
Y = np.sin(X) + np.cos(0.3*X) + np.random.randn(*X.shape)/np.sqrt(50.)
|
Y = np.sin(X) + np.cos(0.3*X) + np.random.randn(*X.shape)/np.sqrt(50.)
|
||||||
|
|
||||||
m = GPy.models.SVIGPRegression(X,Y, batchsize=10, Z=Z)
|
m = GPy.models.SVIGPRegression(X,Y, batchsize=10, Z=Z)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.constrain_bounded('noise_variance',1e-3,1e-1)
|
m.constrain_bounded('noise_variance',1e-3,1e-1)
|
||||||
|
|
||||||
m.param_steplength = 1e-4
|
m.param_steplength = 1e-4
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,6 @@ def tuto_GP_regression():
|
||||||
print m
|
print m
|
||||||
m.plot()
|
m.plot()
|
||||||
|
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.constrain_positive('')
|
m.constrain_positive('')
|
||||||
|
|
||||||
m.unconstrain('') # may be used to remove the previous constrains
|
m.unconstrain('') # may be used to remove the previous constrains
|
||||||
|
|
@ -135,7 +134,6 @@ def tuto_kernel_overview():
|
||||||
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
||||||
m.plot(ax=axs, which_parts=[False,False,False,True])
|
m.plot(ax=axs, which_parts=[False,False,False,True])
|
||||||
|
|
||||||
m.ensure_default_constraints()
|
|
||||||
return(m)
|
return(m)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -144,6 +142,5 @@ def model_interaction():
|
||||||
Y = np.sin(X) + np.random.randn(*X.shape)*0.01 + 5.
|
Y = np.sin(X) + np.random.randn(*X.shape)*0.01 + 5.
|
||||||
k = GPy.kern.rbf(1) + GPy.kern.bias(1)
|
k = GPy.kern.rbf(1) + GPy.kern.bias(1)
|
||||||
m = GPy.models.GPRegression(X, Y, kernel=k)
|
m = GPy.models.GPRegression(X, Y, kernel=k)
|
||||||
m.ensure_default_constraints()
|
|
||||||
return m
|
return m
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -241,9 +241,9 @@ class rbf(Kernpart):
|
||||||
# here are the "statistics" for psi1 and psi2
|
# here are the "statistics" for psi1 and psi2
|
||||||
if not np.array_equal(Z, self._Z):
|
if not np.array_equal(Z, self._Z):
|
||||||
#Z has changed, compute Z specific stuff
|
#Z has changed, compute Z specific stuff
|
||||||
self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # num_inducing,num_inducing,input_dim
|
self._psi2_Zhat = 0.5*(Z[:,None,:] +Z[None,:,:]) # M,M,Q
|
||||||
self._psi2_Zdist = 0.5*(Z[:,None,:]-Z[None,:,:]) # num_inducing,num_inducing,input_dim
|
self._psi2_Zdist = 0.5*(Z[:,None,:]-Z[None,:,:]) # M,M,Q
|
||||||
self._psi2_Zdist_sq = np.square(self._psi2_Zdist/self.lengthscale) # num_inducing,num_inducing,input_dim
|
self._psi2_Zdist_sq = np.square(self._psi2_Zdist/self.lengthscale) # M,M,Q
|
||||||
self._Z = Z
|
self._Z = Z
|
||||||
|
|
||||||
if not (np.array_equal(Z, self._Z) and np.array_equal(mu, self._mu) and np.array_equal(S, self._S)):
|
if not (np.array_equal(Z, self._Z) and np.array_equal(mu, self._mu) and np.array_equal(S, self._S)):
|
||||||
|
|
@ -257,12 +257,12 @@ class rbf(Kernpart):
|
||||||
self._psi1 = self.variance*np.exp(self._psi1_exponent)
|
self._psi1 = self.variance*np.exp(self._psi1_exponent)
|
||||||
|
|
||||||
#psi2
|
#psi2
|
||||||
self._psi2_denom = 2.*S[:,None,None,:]/self.lengthscale2+1. # N,num_inducing,num_inducing,input_dim
|
self._psi2_denom = 2.*S[:,None,None,:]/self.lengthscale2+1. # N,M,M,Q
|
||||||
self._psi2_mudist, self._psi2_mudist_sq, self._psi2_exponent, _ = self.weave_psi2(mu,self._psi2_Zhat)
|
self._psi2_mudist, self._psi2_mudist_sq, self._psi2_exponent, _ = self.weave_psi2(mu,self._psi2_Zhat)
|
||||||
#self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,num_inducing,num_inducing,input_dim
|
#self._psi2_mudist = mu[:,None,None,:]-self._psi2_Zhat #N,M,M,Q
|
||||||
#self._psi2_mudist_sq = np.square(self._psi2_mudist)/(self.lengthscale2*self._psi2_denom)
|
#self._psi2_mudist_sq = np.square(self._psi2_mudist)/(self.lengthscale2*self._psi2_denom)
|
||||||
#self._psi2_exponent = np.sum(-self._psi2_Zdist_sq -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,num_inducing,num_inducing
|
#self._psi2_exponent = np.sum(-self._psi2_Zdist_sq -self._psi2_mudist_sq -0.5*np.log(self._psi2_denom),-1) #N,M,M,Q
|
||||||
self._psi2 = np.square(self.variance)*np.exp(self._psi2_exponent) # N,num_inducing,num_inducing
|
self._psi2 = np.square(self.variance)*np.exp(self._psi2_exponent) # N,M,M,Q
|
||||||
|
|
||||||
#store matrices for caching
|
#store matrices for caching
|
||||||
self._Z, self._mu, self._S = Z, mu,S
|
self._Z, self._mu, self._S = Z, mu,S
|
||||||
|
|
|
||||||
|
|
@ -60,7 +60,7 @@ class BayesianGPLVM(SparseGP, GPLVM):
|
||||||
self._savedABCD = []
|
self._savedABCD = []
|
||||||
|
|
||||||
SparseGP.__init__(self, X, likelihood, kernel, Z=Z, X_variance=X_variance, **kwargs)
|
SparseGP.__init__(self, X, likelihood, kernel, Z=Z, X_variance=X_variance, **kwargs)
|
||||||
self._set_params(self._get_params())
|
self.ensure_default_constraints()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def oldps(self):
|
def oldps(self):
|
||||||
|
|
|
||||||
|
|
@ -44,4 +44,4 @@ class FITCClassification(FITC):
|
||||||
assert Z.shape[1]==X.shape[1]
|
assert Z.shape[1]==X.shape[1]
|
||||||
|
|
||||||
FITC.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X)
|
FITC.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
self.ensure_default_constraints()
|
||||||
|
|
|
||||||
|
|
@ -38,4 +38,4 @@ class GPClassification(GP):
|
||||||
raise Warning, 'likelihood.data and Y are different.'
|
raise Warning, 'likelihood.data and Y are different.'
|
||||||
|
|
||||||
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
self.ensure_default_constraints()
|
||||||
|
|
|
||||||
|
|
@ -32,4 +32,4 @@ class GPRegression(GP):
|
||||||
likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y)
|
likelihood = likelihoods.Gaussian(Y,normalize=normalize_Y)
|
||||||
|
|
||||||
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
GP.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
self.ensure_default_constraints()
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ class GPLVM(GP):
|
||||||
kernel = kern.rbf(input_dim, ARD=input_dim>1) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2))
|
kernel = kern.rbf(input_dim, ARD=input_dim>1) + kern.bias(input_dim, np.exp(-2)) + kern.white(input_dim, np.exp(-2))
|
||||||
likelihood = Gaussian(Y, normalize=normalize_Y)
|
likelihood = Gaussian(Y, normalize=normalize_Y)
|
||||||
GP.__init__(self, X, likelihood, kernel, normalize_X=False)
|
GP.__init__(self, X, likelihood, kernel, normalize_X=False)
|
||||||
self._set_params(self._get_params())
|
self.ensure_default_constraints()
|
||||||
|
|
||||||
def initialise_latent(self, init, input_dim, Y):
|
def initialise_latent(self, init, input_dim, Y):
|
||||||
if init == 'PCA':
|
if init == 'PCA':
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,7 @@ class MRD(Model):
|
||||||
self.MQ = self.num_inducing * self.input_dim
|
self.MQ = self.num_inducing * self.input_dim
|
||||||
|
|
||||||
Model.__init__(self)
|
Model.__init__(self)
|
||||||
self._set_params(self._get_params())
|
self.ensure_default_constraints()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def X(self):
|
def X(self):
|
||||||
|
|
|
||||||
|
|
@ -44,4 +44,4 @@ class SparseGPClassification(SparseGP):
|
||||||
assert Z.shape[1]==X.shape[1]
|
assert Z.shape[1]==X.shape[1]
|
||||||
|
|
||||||
SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X)
|
SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X)
|
||||||
self._set_params(self._get_params())
|
self.ensure_default_constraints()
|
||||||
|
|
|
||||||
|
|
@ -42,4 +42,4 @@ class SparseGPRegression(SparseGP):
|
||||||
likelihood = likelihoods.Gaussian(Y, normalize=normalize_Y)
|
likelihood = likelihoods.Gaussian(Y, normalize=normalize_Y)
|
||||||
|
|
||||||
SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X, X_variance=X_variance)
|
SparseGP.__init__(self, X, likelihood, kernel, Z=Z, normalize_X=normalize_X, X_variance=X_variance)
|
||||||
self._set_params(self._get_params())
|
self.ensure_default_constraints()
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@ class SparseGPLVM(SparseGPRegression, GPLVM):
|
||||||
def __init__(self, Y, input_dim, kernel=None, init='PCA', num_inducing=10):
|
def __init__(self, Y, input_dim, kernel=None, init='PCA', num_inducing=10):
|
||||||
X = self.initialise_latent(init, input_dim, Y)
|
X = self.initialise_latent(init, input_dim, Y)
|
||||||
SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
|
SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
|
||||||
|
self.ensure_default_constraints()
|
||||||
|
|
||||||
def _get_param_names(self):
|
def _get_param_names(self):
|
||||||
return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
|
return (sum([['X_%i_%i' % (n, q) for q in range(self.input_dim)] for n in range(self.num_data)], [])
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,6 @@ class BGPLVMTests(unittest.TestCase):
|
||||||
Y -= Y.mean(axis=0)
|
Y -= Y.mean(axis=0)
|
||||||
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
@ -29,7 +28,6 @@ class BGPLVMTests(unittest.TestCase):
|
||||||
Y -= Y.mean(axis=0)
|
Y -= Y.mean(axis=0)
|
||||||
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
@ -42,7 +40,6 @@ class BGPLVMTests(unittest.TestCase):
|
||||||
Y -= Y.mean(axis=0)
|
Y -= Y.mean(axis=0)
|
||||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
@ -55,7 +52,6 @@ class BGPLVMTests(unittest.TestCase):
|
||||||
Y -= Y.mean(axis=0)
|
Y -= Y.mean(axis=0)
|
||||||
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
@ -69,7 +65,6 @@ class BGPLVMTests(unittest.TestCase):
|
||||||
Y -= Y.mean(axis=0)
|
Y -= Y.mean(axis=0)
|
||||||
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,6 @@ class GPLVMTests(unittest.TestCase):
|
||||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||||
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
@ -26,7 +25,6 @@ class GPLVMTests(unittest.TestCase):
|
||||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||||
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
@ -38,7 +36,6 @@ class GPLVMTests(unittest.TestCase):
|
||||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
m = GPy.models.GPLVM(Y, input_dim, kernel = k)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,6 @@ class MRDTests(unittest.TestCase):
|
||||||
likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist]
|
likelihood_list = [GPy.likelihoods.Gaussian(Y) for Y in Ylist]
|
||||||
|
|
||||||
m = GPy.models.MRD(likelihood_list, input_dim=input_dim, kernels=k, num_inducing=num_inducing)
|
m = GPy.models.MRD(likelihood_list, input_dim=input_dim, kernels=k, num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
|
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,6 @@ class PriorTests(unittest.TestCase):
|
||||||
y += 0.05*np.random.randn(len(X))
|
y += 0.05*np.random.randn(len(X))
|
||||||
X, y = X[:, None], y[:, None]
|
X, y = X[:, None], y[:, None]
|
||||||
m = GPy.models.GPRegression(X, y)
|
m = GPy.models.GPRegression(X, y)
|
||||||
m.ensure_default_constraints()
|
|
||||||
lognormal = GPy.priors.LogGaussian(1, 2)
|
lognormal = GPy.priors.LogGaussian(1, 2)
|
||||||
m.set_prior('rbf', lognormal)
|
m.set_prior('rbf', lognormal)
|
||||||
m.randomize()
|
m.randomize()
|
||||||
|
|
@ -28,7 +27,6 @@ class PriorTests(unittest.TestCase):
|
||||||
y += 0.05*np.random.randn(len(X))
|
y += 0.05*np.random.randn(len(X))
|
||||||
X, y = X[:, None], y[:, None]
|
X, y = X[:, None], y[:, None]
|
||||||
m = GPy.models.GPRegression(X, y)
|
m = GPy.models.GPRegression(X, y)
|
||||||
m.ensure_default_constraints()
|
|
||||||
Gamma = GPy.priors.Gamma(1, 1)
|
Gamma = GPy.priors.Gamma(1, 1)
|
||||||
m.set_prior('rbf', Gamma)
|
m.set_prior('rbf', Gamma)
|
||||||
m.randomize()
|
m.randomize()
|
||||||
|
|
@ -42,7 +40,6 @@ class PriorTests(unittest.TestCase):
|
||||||
y += 0.05*np.random.randn(len(X))
|
y += 0.05*np.random.randn(len(X))
|
||||||
X, y = X[:, None], y[:, None]
|
X, y = X[:, None], y[:, None]
|
||||||
m = GPy.models.GPRegression(X, y)
|
m = GPy.models.GPRegression(X, y)
|
||||||
m.ensure_default_constraints()
|
|
||||||
gaussian = GPy.priors.Gaussian(1, 1)
|
gaussian = GPy.priors.Gaussian(1, 1)
|
||||||
success = False
|
success = False
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -113,7 +113,6 @@ if __name__ == "__main__":
|
||||||
# Y -= Y.mean(axis=0)
|
# Y -= Y.mean(axis=0)
|
||||||
# k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
# k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
# m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
# m = GPy.models.Bayesian_GPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||||
# m.ensure_default_constraints()
|
|
||||||
# m.randomize()
|
# m.randomize()
|
||||||
# # self.assertTrue(m.checkgrad())
|
# # self.assertTrue(m.checkgrad())
|
||||||
numpy.random.seed(0)
|
numpy.random.seed(0)
|
||||||
|
|
@ -146,7 +145,6 @@ if __name__ == "__main__":
|
||||||
# num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim))
|
# num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim))
|
||||||
m3 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
|
m3 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
|
||||||
num_inducing=num_inducing, kernel=GPy.kern.linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)))
|
num_inducing=num_inducing, kernel=GPy.kern.linear(input_dim, ARD=True, variances=numpy.random.rand(input_dim)))
|
||||||
m3.ensure_default_constraints()
|
|
||||||
# + GPy.kern.bias(input_dim))
|
# + GPy.kern.bias(input_dim))
|
||||||
# m4 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
|
# m4 = PsiStatModel('psi2', X=X, X_variance=X_var, Z=Z,
|
||||||
# num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim))
|
# num_inducing=num_inducing, kernel=GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim))
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,6 @@ class sparse_GPLVMTests(unittest.TestCase):
|
||||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||||
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
@ -27,7 +26,6 @@ class sparse_GPLVMTests(unittest.TestCase):
|
||||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||||
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
@ -39,7 +37,6 @@ class sparse_GPLVMTests(unittest.TestCase):
|
||||||
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
|
||||||
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
|
||||||
m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
m = SparseGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,6 @@ class GradientTests(unittest.TestCase):
|
||||||
noise = GPy.kern.white(dimension)
|
noise = GPy.kern.white(dimension)
|
||||||
kern = kern + noise
|
kern = kern + noise
|
||||||
m = model_fit(X, Y, kernel=kern)
|
m = model_fit(X, Y, kernel=kern)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.randomize()
|
m.randomize()
|
||||||
# contrain all parameters to be positive
|
# contrain all parameters to be positive
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
@ -150,7 +149,6 @@ class GradientTests(unittest.TestCase):
|
||||||
K = k.K(X)
|
K = k.K(X)
|
||||||
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
|
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
|
||||||
m = GPy.models.GPLVM(Y, input_dim, kernel=k)
|
m = GPy.models.GPLVM(Y, input_dim, kernel=k)
|
||||||
m.ensure_default_constraints()
|
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
def test_GPLVM_rbf_linear_white_kern_2D(self):
|
def test_GPLVM_rbf_linear_white_kern_2D(self):
|
||||||
|
|
@ -161,7 +159,6 @@ class GradientTests(unittest.TestCase):
|
||||||
K = k.K(X)
|
K = k.K(X)
|
||||||
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
|
Y = np.random.multivariate_normal(np.zeros(N), K, input_dim).T
|
||||||
m = GPy.models.GPLVM(Y, input_dim, init='PCA', kernel=k)
|
m = GPy.models.GPLVM(Y, input_dim, init='PCA', kernel=k)
|
||||||
m.ensure_default_constraints()
|
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
def test_GP_EP_probit(self):
|
def test_GP_EP_probit(self):
|
||||||
|
|
@ -195,7 +192,6 @@ class GradientTests(unittest.TestCase):
|
||||||
k = GPy.kern.rbf(1) + GPy.kern.white(1)
|
k = GPy.kern.rbf(1) + GPy.kern.white(1)
|
||||||
Y = np.hstack([np.ones(N/2),np.zeros(N/2)])[:,None]
|
Y = np.hstack([np.ones(N/2),np.zeros(N/2)])[:,None]
|
||||||
m = GPy.models.FITCClassification(X, Y=Y)
|
m = GPy.models.FITCClassification(X, Y=Y)
|
||||||
m.ensure_default_constraints()
|
|
||||||
m.update_likelihood_approximation()
|
m.update_likelihood_approximation()
|
||||||
self.assertTrue(m.checkgrad())
|
self.assertTrue(m.checkgrad())
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue