diff --git a/GPy/examples/dimensionality_reduction.py b/GPy/examples/dimensionality_reduction.py index 60726b1d..f7d9cda4 100644 --- a/GPy/examples/dimensionality_reduction.py +++ b/GPy/examples/dimensionality_reduction.py @@ -13,7 +13,7 @@ default_seed = np.random.seed(123344) def BGPLVM(seed=default_seed): N = 10 - M = 3 + num_inducing = 3 Q = 2 D = 4 # generate GPLVM-like data @@ -27,7 +27,7 @@ def BGPLVM(seed=default_seed): # k = GPy.kern.rbf(Q) + GPy.kern.bias(Q) + GPy.kern.white(Q, 0.00001) # k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001) - m = GPy.models.BayesianGPLVM(Y, Q, kernel=k, M=M) + m = GPy.models.BayesianGPLVM(Y, Q, kernel=k, num_inducing=num_inducing) m.constrain_positive('(rbf|bias|noise|white|S)') # m.constrain_fixed('S', 1) @@ -63,7 +63,7 @@ def GPLVM_oil_100(optimize=True): m.plot_latent(labels=m.data_labels) return m -def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False): +def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False): from GPy.util.datasets import swiss_roll from GPy.core.transformations import logexp_clipped @@ -101,11 +101,11 @@ def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False): S = (var * np.ones_like(X) + np.clip(np.random.randn(N, Q) * var ** 2, - (1 - var), (1 - var))) + .001 - Z = np.random.permutation(X)[:M] + Z = np.random.permutation(X)[:num_inducing] kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2)) - m = BayesianGPLVM(Y, Q, X=X, X_variance=S, M=M, Z=Z, kernel=kernel) + m = BayesianGPLVM(Y, Q, X=X, X_variance=S, num_inducing=num_inducing, Z=Z, kernel=kernel) m.data_colors = c m.data_t = t @@ -118,7 +118,7 @@ def swiss_roll(optimize=True, N=1000, M=15, Q=4, sigma=.2, plot=False): m.optimize('scg', messages=1) return m -def BGPLVM_oil(optimize=True, N=100, Q=5, M=25, max_f_eval=4e3, plot=False, **k): +def BGPLVM_oil(optimize=True, N=100, Q=5, num_inducing=25, max_f_eval=4e3, plot=False, **k): np.random.seed(0) data = GPy.util.datasets.oil() from GPy.core.transformations import logexp_clipped @@ -129,7 +129,7 @@ def BGPLVM_oil(optimize=True, N=100, Q=5, M=25, max_f_eval=4e3, plot=False, **k) Yn = Y - Y.mean(0) Yn /= Yn.std(0) - m = GPy.models.BayesianGPLVM(Yn, Q, kernel=kernel, M=M, **k) + m = GPy.models.BayesianGPLVM(Yn, Q, kernel=kernel, num_inducing=num_inducing, **k) m.data_labels = data['Y'][:N].argmax(axis=1) # m.constrain('variance|leng', logexp_clipped()) @@ -168,7 +168,7 @@ def oil_100(): -def _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim=False): +def _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim=False): x = np.linspace(0, 4 * np.pi, N)[:, None] s1 = np.vectorize(lambda x: np.sin(x)) s2 = np.vectorize(lambda x: np.cos(x)) @@ -228,13 +228,13 @@ def bgplvm_simulation_matlab_compare(): Y = sim_data['Y'] S = sim_data['S'] mu = sim_data['mu'] - M, [_, Q] = 3, mu.shape + num_inducing, [_, Q] = 3, mu.shape from GPy.models import mrd from GPy import kern reload(mrd); reload(kern) k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) - m = BayesianGPLVM(Y, Q, init="PCA", M=M, kernel=k, + m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k, # X=mu, # X_variance=S, _debug=False) @@ -248,8 +248,8 @@ def bgplvm_simulation(optimize='scg', plot=True, max_f_eval=2e4): # from GPy.core.transformations import logexp_clipped - D1, D2, D3, N, M, Q = 15, 8, 8, 100, 3, 5 - slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot) + D1, D2, D3, N, num_inducing, Q = 15, 8, 8, 100, 3, 5 + slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot) from GPy.models import mrd from GPy import kern @@ -259,7 +259,7 @@ def bgplvm_simulation(optimize='scg', Y = Ylist[0] k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) # + kern.bias(Q) - m = BayesianGPLVM(Y, Q, init="PCA", M=M, kernel=k, _debug=True) + m = BayesianGPLVM(Y, Q, init="PCA", num_inducing=num_inducing, kernel=k, _debug=True) # m.constrain('variance|noise', logexp_clipped()) m.ensure_default_constraints() m['noise'] = Y.var() / 100. @@ -276,8 +276,8 @@ def bgplvm_simulation(optimize='scg', return m def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw): - D1, D2, D3, N, M, Q = 150, 200, 400, 500, 3, 7 - slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, M, Q, plot_sim) + D1, D2, D3, N, num_inducing, Q = 150, 200, 400, 500, 3, 7 + slist, Slist, Ylist = _simulate_sincos(D1, D2, D3, N, num_inducing, Q, plot_sim) from GPy.models import mrd from GPy import kern @@ -285,7 +285,7 @@ def mrd_simulation(optimize=True, plot=True, plot_sim=True, **kw): reload(mrd); reload(kern) k = kern.linear(Q, [.05] * Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) - m = mrd.MRD(Ylist, input_dim=Q, M=M, kernels=k, initx="", initz='permute', **kw) + m = mrd.MRD(Ylist, input_dim=Q, num_inducing=num_inducing, kernels=k, initx="", initz='permute', **kw) for i, Y in enumerate(Ylist): m['{}_noise'.format(i + 1)] = Y.var() / 100. @@ -313,7 +313,7 @@ def brendan_faces(): Yn /= Yn.std() m = GPy.models.GPLVM(Yn, Q) - # m = GPy.models.BayesianGPLVM(Yn, Q, M=100) + # m = GPy.models.BayesianGPLVM(Yn, Q, num_inducing=100) # optimize m.constrain('rbf|noise|white', GPy.core.transformations.logexp_clipped()) @@ -377,16 +377,16 @@ def cmu_mocap(subject='35', motion=['01'], in_place=True): # X /= X.std(axis=0) # # Q = 10 -# M = 30 +# num_inducing = 30 # # kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q) + GPy.kern.white(Q) -# m = GPy.models.BayesianGPLVM(X, Q, kernel=kernel, M=M) +# m = GPy.models.BayesianGPLVM(X, Q, kernel=kernel, num_inducing=num_inducing) # # m.scale_factor = 100.0 # m.constrain_positive('(white|noise|bias|X_variance|rbf_variance|rbf_length)') # from sklearn import cluster -# km = cluster.KMeans(M, verbose=10) +# km = cluster.KMeans(num_inducing, verbose=10) # Z = km.fit(m.X).cluster_centers_ -# # Z = GPy.util.misc.kmm_init(m.X, M) +# # Z = GPy.util.misc.kmm_init(m.X, num_inducing) # m.set('iip', Z) # m.set('bias', 1e-4) # # optimize diff --git a/GPy/examples/regression.py b/GPy/examples/regression.py index be3a71bd..64a2d12c 100644 --- a/GPy/examples/regression.py +++ b/GPy/examples/regression.py @@ -151,8 +151,8 @@ def coregionalisation_sparse(optim_iters=100): Y2 = -np.sin(X2) + np.random.randn(*X2.shape)*0.05 Y = np.vstack((Y1,Y2)) - M = 40 - Z = np.hstack((np.random.rand(M,1)*8,np.random.randint(0,2,M)[:,None])) + num_inducing = 40 + Z = np.hstack((np.random.rand(num_inducing,1)*8,np.random.randint(0,2,num_inducing)[:,None])) k1 = GPy.kern.rbf(1) k2 = GPy.kern.Coregionalise(2,2) @@ -261,7 +261,7 @@ def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf): return np.array(lls) -def sparse_GP_regression_1D(N = 400, M = 5, optim_iters=100): +def sparse_GP_regression_1D(N = 400, num_inducing = 5, optim_iters=100): """Run a 1D example of a sparse GP regression.""" # sample inputs and outputs X = np.random.uniform(-3.,3.,(N,1)) @@ -271,7 +271,7 @@ def sparse_GP_regression_1D(N = 400, M = 5, optim_iters=100): noise = GPy.kern.white(1) kernel = rbf + noise # create simple GP model - m = GPy.models.SparseGPRegression(X, Y, kernel, M=M) + m = GPy.models.SparseGPRegression(X, Y, kernel, num_inducing=num_inducing) m.ensure_default_constraints() @@ -280,7 +280,7 @@ def sparse_GP_regression_1D(N = 400, M = 5, optim_iters=100): m.plot() return m -def sparse_GP_regression_2D(N = 400, M = 50, optim_iters=100): +def sparse_GP_regression_2D(N = 400, num_inducing = 50, optim_iters=100): """Run a 2D example of a sparse GP regression.""" X = np.random.uniform(-3.,3.,(N,2)) Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(N,1)*0.05 @@ -291,7 +291,7 @@ def sparse_GP_regression_2D(N = 400, M = 50, optim_iters=100): kernel = rbf + noise # create simple GP model - m = GPy.models.SparseGPRegression(X,Y,kernel, M = M) + m = GPy.models.SparseGPRegression(X,Y,kernel, num_inducing = num_inducing) # contrain all parameters to be positive (but not inducing inputs) m.ensure_default_constraints() diff --git a/GPy/kern/coregionalise.py b/GPy/kern/coregionalise.py index 7fe922fd..329d1015 100644 --- a/GPy/kern/coregionalise.py +++ b/GPy/kern/coregionalise.py @@ -69,14 +69,14 @@ class Coregionalise(kernpart): else: index2 = np.asarray(index2,dtype=np.int) code=""" - for(int i=0;i """ weave.inline(code, support_code=support_code, libraries=['gomp'], - arg_names=['N','M','input_dim','mu','Zhat','mudist_sq','mudist','lengthscale2','_psi2_denom','psi2_Zdist_sq','psi2_exponent','half_log_psi2_denom','psi2','variance_sq'], + arg_names=['N','num_inducing','input_dim','mu','Zhat','mudist_sq','mudist','lengthscale2','_psi2_denom','psi2_Zdist_sq','psi2_exponent','half_log_psi2_denom','psi2','variance_sq'], type_converters=weave.converters.blitz,**self.weave_options) return mudist,mudist_sq, psi2_exponent, psi2 diff --git a/GPy/kern/sympykern.py b/GPy/kern/sympykern.py index db3cc976..0ec86acf 100644 --- a/GPy/kern/sympykern.py +++ b/GPy/kern/sympykern.py @@ -122,12 +122,12 @@ class spkern(kernpart): int i; int j; int N = target_array->dimensions[0]; - int M = target_array->dimensions[1]; + int num_inducing = target_array->dimensions[1]; int D = X_array->dimensions[1]; //#pragma omp parallel for private(j) for (i=0;idimensions[0]; - int M = partial_array->dimensions[1]; + int num_inducing = partial_array->dimensions[1]; int D = X_array->dimensions[1]; //#pragma omp parallel for private(j) for (i=0;idimensions[0]; - int M = partial_array->dimensions[1]; + int num_inducing = partial_array->dimensions[1]; int D = X_array->dimensions[1]; //#pragma omp parallel for private(j) for (i=0;idimensions[0]; - int M = 0; + int num_inducing = 0; int D = X_array->dimensions[1]; for (i=0;i