mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-11 13:02:38 +02:00
Merge branch 'devel' of github.com:SheffieldML/GPy into devel
This commit is contained in:
commit
a2bc6ec1e6
11 changed files with 123 additions and 183 deletions
|
|
@ -28,8 +28,7 @@ class GPBase(Model):
|
|||
self._Xmean = np.zeros((1, self.input_dim))
|
||||
self._Xstd = np.ones((1, self.input_dim))
|
||||
|
||||
Model.__init__(self)
|
||||
|
||||
super(GPBase, self).__init__()
|
||||
# All leaf nodes should call self._set_params(self._get_params()) at
|
||||
# the end
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ def BGPLVM(seed=default_seed):
|
|||
# k = GPy.kern.rbf(Q, ARD = False) + GPy.kern.white(Q, 0.00001)
|
||||
|
||||
m = GPy.models.BayesianGPLVM(Y, Q, kernel=k, num_inducing=num_inducing)
|
||||
m.constrain_positive('(rbf|bias|noise|white|S)')
|
||||
# m.constrain_positive('(rbf|bias|noise|white|S)')
|
||||
# m.constrain_fixed('S', 1)
|
||||
|
||||
# pb.figure()
|
||||
|
|
@ -117,10 +117,9 @@ def swiss_roll(optimize=True, N=1000, num_inducing=15, Q=4, sigma=.2, plot=False
|
|||
m.optimize('scg', messages=1)
|
||||
return m
|
||||
|
||||
def BGPLVM_oil(optimize=True, N=100, Q=5, num_inducing=25, max_f_eval=4e3, plot=False, **k):
|
||||
def BGPLVM_oil(optimize=True, N=200, Q=10, num_inducing=15, max_f_eval=4e3, plot=False, **k):
|
||||
np.random.seed(0)
|
||||
data = GPy.util.datasets.oil()
|
||||
from GPy.core.transformations import logexp_clipped
|
||||
|
||||
# create simple GP model
|
||||
kernel = GPy.kern.rbf(Q, ARD=True) + GPy.kern.bias(Q, np.exp(-2)) + GPy.kern.white(Q, np.exp(-2))
|
||||
|
|
@ -132,14 +131,14 @@ def BGPLVM_oil(optimize=True, N=100, Q=5, num_inducing=25, max_f_eval=4e3, plot=
|
|||
m.data_labels = data['Y'][:N].argmax(axis=1)
|
||||
|
||||
# m.constrain('variance|leng', logexp_clipped())
|
||||
m['lengt'] = m.X.var(0).max() / m.X.var(0)
|
||||
m['.*lengt'] = 1. # m.X.var(0).max() / m.X.var(0)
|
||||
m['noise'] = Yn.var() / 100.
|
||||
|
||||
m.ensure_default_constraints()
|
||||
|
||||
# optimize
|
||||
if optimize:
|
||||
m.optimize('scg', messages=1, max_f_eval=max_f_eval)
|
||||
m.optimize('scg', messages=1, max_f_eval=max_f_eval, gtol=.05)
|
||||
|
||||
if plot:
|
||||
y = m.likelihood.Y[0, :]
|
||||
|
|
@ -266,9 +265,9 @@ def bgplvm_simulation(optimize='scg',
|
|||
|
||||
if optimize:
|
||||
print "Optimizing model:"
|
||||
m.optimize('scg', max_iters=max_f_eval,
|
||||
m.optimize(optimize, max_iters=max_f_eval,
|
||||
max_f_eval=max_f_eval,
|
||||
messages=True, gtol=1e-6)
|
||||
messages=True, gtol=.05)
|
||||
if plot:
|
||||
m.plot_X_1d("BGPLVM Latent Space 1D")
|
||||
m.kern.plot_ARD('BGPLVM Simulation ARD Parameters')
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@ def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000
|
|||
|
||||
ax.set_xlim(xlim)
|
||||
ax.set_ylim(ylim)
|
||||
return (models, lls)
|
||||
return m #(models, lls)
|
||||
|
||||
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf):
|
||||
"""Evaluate the GP objective function for a given data set for a range of signal to noise ratios and a range of lengthscales.
|
||||
|
|
|
|||
|
|
@ -17,28 +17,27 @@ def tuto_GP_regression():
|
|||
X = np.random.uniform(-3.,3.,(20,1))
|
||||
Y = np.sin(X) + np.random.randn(20,1)*0.05
|
||||
|
||||
kernel = GPy.kern.rbf(D=1, variance=1., lengthscale=1.)
|
||||
kernel = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)
|
||||
|
||||
m = GPy.models.GPRegression(X, Y, kernel)
|
||||
|
||||
print m
|
||||
m.plot()
|
||||
|
||||
m.ensure_default_constraints()
|
||||
m.constrain_positive('')
|
||||
|
||||
m.unconstrain('') # Required to remove the previous constrains
|
||||
m.constrain_positive('rbf_variance')
|
||||
m.constrain_bounded('lengthscale',1.,10. )
|
||||
m.constrain_fixed('noise',0.0025)
|
||||
m.unconstrain('') # may be used to remove the previous constrains
|
||||
m.constrain_positive('.*rbf_variance')
|
||||
m.constrain_bounded('.*lengthscale',1.,10. )
|
||||
m.constrain_fixed('.*noise',0.0025)
|
||||
|
||||
m.optimize()
|
||||
|
||||
m.optimize_restarts(num_restarts = 10)
|
||||
|
||||
###########################
|
||||
# 2-dimensional example #
|
||||
###########################
|
||||
|
||||
#######################################################
|
||||
#######################################################
|
||||
# sample inputs and outputs
|
||||
X = np.random.uniform(-3.,3.,(50,2))
|
||||
Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(50,1)*0.05
|
||||
|
|
@ -53,22 +52,19 @@ def tuto_GP_regression():
|
|||
m.constrain_positive('')
|
||||
|
||||
# optimize and plot
|
||||
pb.figure()
|
||||
m.optimize('tnc', max_f_eval = 1000)
|
||||
|
||||
m.plot()
|
||||
print(m)
|
||||
|
||||
return(m)
|
||||
|
||||
def tuto_kernel_overview():
|
||||
"""The detailed explanations of the commands used in this file can be found in the tutorial section"""
|
||||
pb.ion()
|
||||
|
||||
ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(D=1, variance=1., lengthscale=1.)
|
||||
ker2 = GPy.kern.rbf(D=1, variance = .75, lengthscale=2.)
|
||||
ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)
|
||||
ker2 = GPy.kern.rbf(input_dim=1, variance = .75, lengthscale=2.)
|
||||
ker3 = GPy.kern.rbf(1, .5, .5)
|
||||
|
||||
print ker2
|
||||
|
||||
ker1.plot()
|
||||
ker2.plot()
|
||||
ker3.plot()
|
||||
|
|
@ -77,27 +73,12 @@ def tuto_kernel_overview():
|
|||
k2 = GPy.kern.Matern32(1, 0.5, 0.2)
|
||||
|
||||
# Product of kernels
|
||||
k_prod = k1.prod(k2)
|
||||
k_prodorth = k1.prod_orthogonal(k2)
|
||||
k_prod = k1.prod(k2) # By default, tensor=False
|
||||
k_prodtens = k1.prod(k2,tensor=True)
|
||||
|
||||
# Sum of kernels
|
||||
k_add = k1.add(k2)
|
||||
k_addorth = k1.add_orthogonal(k2)
|
||||
|
||||
pb.figure(figsize=(8,8))
|
||||
pb.subplot(2,2,1)
|
||||
k_prod.plot()
|
||||
pb.title('prod')
|
||||
pb.subplot(2,2,2)
|
||||
k_prodorth.plot()
|
||||
pb.title('prod_orthogonal')
|
||||
pb.subplot(2,2,3)
|
||||
k_add.plot()
|
||||
pb.title('add')
|
||||
pb.subplot(2,2,4)
|
||||
k_addorth.plot()
|
||||
pb.title('add_orthogonal')
|
||||
pb.subplots_adjust(wspace=0.3, hspace=0.3)
|
||||
k_add = k1.add(k2) # By default, tensor=False
|
||||
k_addtens = k1.add(k2,tensor=True)
|
||||
|
||||
k1 = GPy.kern.rbf(1,1.,2)
|
||||
k2 = GPy.kern.periodic_Matern52(1,variance=1e3, lengthscale=1, period = 1.5, lower=-5., upper = 5)
|
||||
|
|
@ -109,18 +90,6 @@ def tuto_kernel_overview():
|
|||
X = np.linspace(-5,5,501)[:,None]
|
||||
Y = np.random.multivariate_normal(np.zeros(501),k.K(X),1)
|
||||
|
||||
# plot
|
||||
pb.figure(figsize=(10,4))
|
||||
pb.subplot(1,2,1)
|
||||
k.plot()
|
||||
pb.subplot(1,2,2)
|
||||
pb.plot(X,Y.T)
|
||||
pb.ylabel("Sample path")
|
||||
pb.subplots_adjust(wspace=0.3)
|
||||
|
||||
k = (k1+k2)*(k1+k2)
|
||||
print k.parts[0].name, '\n', k.parts[1].name, '\n', k.parts[2].name, '\n', k.parts[3].name
|
||||
|
||||
k1 = GPy.kern.rbf(1)
|
||||
k2 = GPy.kern.Matern32(1)
|
||||
k3 = GPy.kern.white(1)
|
||||
|
|
@ -128,16 +97,16 @@ def tuto_kernel_overview():
|
|||
k = k1 + k2 + k3
|
||||
print k
|
||||
|
||||
k.constrain_positive('var')
|
||||
k.constrain_positive('.*var')
|
||||
k.constrain_fixed(np.array([1]),1.75)
|
||||
k.tie_params('len')
|
||||
k.tie_params('.*len')
|
||||
k.unconstrain('white')
|
||||
k.constrain_bounded('white',lower=1e-5,upper=.5)
|
||||
print k
|
||||
|
||||
k_cst = GPy.kern.bias(1,variance=1.)
|
||||
k_mat = GPy.kern.Matern52(1,variance=1., lengthscale=3)
|
||||
Kanova = (k_cst + k_mat).prod_orthogonal(k_cst + k_mat)
|
||||
Kanova = (k_cst + k_mat).prod(k_cst + k_mat,tensor=True)
|
||||
print Kanova
|
||||
|
||||
# sample inputs and outputs
|
||||
|
|
@ -156,41 +125,17 @@ def tuto_kernel_overview():
|
|||
pb.subplot(1,5,2)
|
||||
pb.ylabel("= ",rotation='horizontal',fontsize='30')
|
||||
pb.subplot(1,5,3)
|
||||
m.plot(which_functions=[False,True,False,False])
|
||||
m.plot(which_parts=[False,True,False,False])
|
||||
pb.ylabel("cst +",rotation='horizontal',fontsize='30')
|
||||
pb.subplot(1,5,4)
|
||||
m.plot(which_functions=[False,False,True,False])
|
||||
m.plot(which_parts=[False,False,True,False])
|
||||
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
||||
pb.subplot(1,5,5)
|
||||
pb.ylabel("+ ",rotation='horizontal',fontsize='30')
|
||||
m.plot(which_functions=[False,False,False,True])
|
||||
m.plot(which_parts=[False,False,False,True])
|
||||
|
||||
ker1 = GPy.kern.rbf(D=1) # Equivalent to ker1 = GPy.kern.rbf(D=1, variance=1., lengthscale=1.)
|
||||
ker2 = GPy.kern.rbf(D=1, variance = .75, lengthscale=3.)
|
||||
ker3 = GPy.kern.rbf(1, .5, .25)
|
||||
return(m)
|
||||
|
||||
ker1.plot()
|
||||
ker2.plot()
|
||||
ker3.plot()
|
||||
#pb.savefig("Figures/tuto_kern_overview_basicdef.png")
|
||||
|
||||
kernels = [GPy.kern.rbf(1), GPy.kern.exponential(1), GPy.kern.Matern32(1), GPy.kern.Matern52(1), GPy.kern.Brownian(1), GPy.kern.bias(1), GPy.kern.linear(1), GPy.kern.spline(1), GPy.kern.periodic_exponential(1), GPy.kern.periodic_Matern32(1), GPy.kern.periodic_Matern52(1), GPy.kern.white(1)]
|
||||
kernel_names = ["GPy.kern.rbf", "GPy.kern.exponential", "GPy.kern.Matern32", "GPy.kern.Matern52", "GPy.kern.Brownian", "GPy.kern.bias", "GPy.kern.linear", "GPy.kern.spline", "GPy.kern.periodic_exponential", "GPy.kern.periodic_Matern32", "GPy.kern.periodic_Matern52", "GPy.kern.white"]
|
||||
|
||||
pb.figure(figsize=(16,12))
|
||||
pb.subplots_adjust(wspace=.5, hspace=.5)
|
||||
for i, kern in enumerate(kernels):
|
||||
pb.subplot(3,4,i+1)
|
||||
kern.plot(x=7.5,plot_limits=[0.00001,15.])
|
||||
pb.title(kernel_names[i]+ '\n')
|
||||
|
||||
# actual plot for the noise
|
||||
i = 11
|
||||
X = np.linspace(0.,15.,201)
|
||||
WN = 0*X
|
||||
WN[100] = 1.
|
||||
pb.subplot(3,4,i+1)
|
||||
pb.plot(X,WN,'b')
|
||||
|
||||
def model_interaction():
|
||||
X = np.random.randn(20,1)
|
||||
|
|
|
|||
|
|
@ -18,10 +18,10 @@ class opt_SGD(Optimizer):
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, Model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, actual_iter=None, schedule=None, **kwargs):
|
||||
def __init__(self, start, iterations = 10, learning_rate = 1e-4, momentum = 0.9, model = None, messages = False, batch_size = 1, self_paced = False, center = True, iteration_file = None, learning_rate_adaptation=None, actual_iter=None, schedule=None, **kwargs):
|
||||
self.opt_name = "Stochastic Gradient Descent"
|
||||
|
||||
self.Model = Model
|
||||
self.Model = model
|
||||
self.iterations = iterations
|
||||
self.momentum = momentum
|
||||
self.learning_rate = learning_rate
|
||||
|
|
@ -42,11 +42,11 @@ class opt_SGD(Optimizer):
|
|||
self.learning_rate_0 = self.learning_rate.mean()
|
||||
|
||||
self.schedule = schedule
|
||||
# if len([p for p in self.Model.kern.parts if p.name == 'bias']) == 1:
|
||||
# if len([p for p in self.model.kern.parts if p.name == 'bias']) == 1:
|
||||
# self.param_traces.append(('bias',[]))
|
||||
# if len([p for p in self.Model.kern.parts if p.name == 'linear']) == 1:
|
||||
# if len([p for p in self.model.kern.parts if p.name == 'linear']) == 1:
|
||||
# self.param_traces.append(('linear',[]))
|
||||
# if len([p for p in self.Model.kern.parts if p.name == 'rbf']) == 1:
|
||||
# if len([p for p in self.model.kern.parts if p.name == 'rbf']) == 1:
|
||||
# self.param_traces.append(('rbf_var',[]))
|
||||
|
||||
self.param_traces = dict(self.param_traces)
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ from independent_outputs import IndependentOutputs as independent_output_part
|
|||
#using meta-classes to make the objects construct properly wthout them.
|
||||
|
||||
|
||||
def rbf(D,variance=1., lengthscale=None,ARD=False):
|
||||
def rbf(input_dim,variance=1., lengthscale=None,ARD=False):
|
||||
"""
|
||||
Construct an RBF kernel
|
||||
|
||||
|
|
@ -42,10 +42,10 @@ def rbf(D,variance=1., lengthscale=None,ARD=False):
|
|||
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
|
||||
:type ARD: Boolean
|
||||
"""
|
||||
part = rbfpart(D,variance,lengthscale,ARD)
|
||||
return kern(D, [part])
|
||||
part = rbfpart(input_dim,variance,lengthscale,ARD)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def linear(D,variances=None,ARD=False):
|
||||
def linear(input_dim,variances=None,ARD=False):
|
||||
"""
|
||||
Construct a linear kernel.
|
||||
|
||||
|
|
@ -55,10 +55,10 @@ def linear(D,variances=None,ARD=False):
|
|||
variances (np.ndarray)
|
||||
ARD (boolean)
|
||||
"""
|
||||
part = linearpart(D,variances,ARD)
|
||||
return kern(D, [part])
|
||||
part = linearpart(input_dim,variances,ARD)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def white(D,variance=1.):
|
||||
def white(input_dim,variance=1.):
|
||||
"""
|
||||
Construct a white kernel.
|
||||
|
||||
|
|
@ -67,10 +67,10 @@ def white(D,variance=1.):
|
|||
input_dimD (int), obligatory
|
||||
variance (float)
|
||||
"""
|
||||
part = whitepart(D,variance)
|
||||
return kern(D, [part])
|
||||
part = whitepart(input_dim,variance)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def exponential(D,variance=1., lengthscale=None, ARD=False):
|
||||
def exponential(input_dim,variance=1., lengthscale=None, ARD=False):
|
||||
"""
|
||||
Construct an exponential kernel
|
||||
|
||||
|
|
@ -83,10 +83,10 @@ def exponential(D,variance=1., lengthscale=None, ARD=False):
|
|||
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
|
||||
:type ARD: Boolean
|
||||
"""
|
||||
part = exponentialpart(D,variance, lengthscale, ARD)
|
||||
return kern(D, [part])
|
||||
part = exponentialpart(input_dim,variance, lengthscale, ARD)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def Matern32(D,variance=1., lengthscale=None, ARD=False):
|
||||
def Matern32(input_dim,variance=1., lengthscale=None, ARD=False):
|
||||
"""
|
||||
Construct a Matern 3/2 kernel.
|
||||
|
||||
|
|
@ -99,10 +99,10 @@ def Matern32(D,variance=1., lengthscale=None, ARD=False):
|
|||
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
|
||||
:type ARD: Boolean
|
||||
"""
|
||||
part = Matern32part(D,variance, lengthscale, ARD)
|
||||
return kern(D, [part])
|
||||
part = Matern32part(input_dim,variance, lengthscale, ARD)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def Matern52(D,variance=1., lengthscale=None, ARD=False):
|
||||
def Matern52(input_dim, variance=1., lengthscale=None, ARD=False):
|
||||
"""
|
||||
Construct a Matern 5/2 kernel.
|
||||
|
||||
|
|
@ -115,10 +115,10 @@ def Matern52(D,variance=1., lengthscale=None, ARD=False):
|
|||
:param ARD: Auto Relevance Determination (one lengthscale per dimension)
|
||||
:type ARD: Boolean
|
||||
"""
|
||||
part = Matern52part(D,variance, lengthscale, ARD)
|
||||
return kern(D, [part])
|
||||
part = Matern52part(input_dim, variance, lengthscale, ARD)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def bias(D,variance=1.):
|
||||
def bias(input_dim, variance=1.):
|
||||
"""
|
||||
Construct a bias kernel.
|
||||
|
||||
|
|
@ -127,10 +127,10 @@ def bias(D,variance=1.):
|
|||
input_dim (int), obligatory
|
||||
variance (float)
|
||||
"""
|
||||
part = biaspart(D,variance)
|
||||
return kern(D, [part])
|
||||
part = biaspart(input_dim, variance)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def finite_dimensional(D,F,G,variances=1.,weights=None):
|
||||
def finite_dimensional(input_dim, F, G, variances=1., weights=None):
|
||||
"""
|
||||
Construct a finite dimensional kernel.
|
||||
input_dim: int - the number of input dimensions
|
||||
|
|
@ -138,10 +138,10 @@ def finite_dimensional(D,F,G,variances=1.,weights=None):
|
|||
G: np.array with shape (n,n) - the Gram matrix associated to F
|
||||
variances : np.ndarray with shape (n,)
|
||||
"""
|
||||
part = finite_dimensionalpart(D,F,G,variances,weights)
|
||||
return kern(D, [part])
|
||||
part = finite_dimensionalpart(input_dim, F, G, variances, weights)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def spline(D,variance=1.):
|
||||
def spline(input_dim, variance=1.):
|
||||
"""
|
||||
Construct a spline kernel.
|
||||
|
||||
|
|
@ -150,10 +150,10 @@ def spline(D,variance=1.):
|
|||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
"""
|
||||
part = splinepart(D,variance)
|
||||
return kern(D, [part])
|
||||
part = splinepart(input_dim, variance)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def Brownian(D,variance=1.):
|
||||
def Brownian(input_dim, variance=1.):
|
||||
"""
|
||||
Construct a Brownian motion kernel.
|
||||
|
||||
|
|
@ -162,8 +162,8 @@ def Brownian(D,variance=1.):
|
|||
:param variance: the variance of the kernel
|
||||
:type variance: float
|
||||
"""
|
||||
part = Brownianpart(D,variance)
|
||||
return kern(D, [part])
|
||||
part = Brownianpart(input_dim, variance)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
try:
|
||||
import sympy as sp
|
||||
|
|
@ -174,33 +174,33 @@ except ImportError:
|
|||
sympy_available = False
|
||||
|
||||
if sympy_available:
|
||||
def rbf_sympy(D,ARD=False,variance=1., lengthscale=1.):
|
||||
def rbf_sympy(input_dim, ARD=False, variance=1., lengthscale=1.):
|
||||
"""
|
||||
Radial Basis Function covariance.
|
||||
"""
|
||||
X = [sp.var('x%i'%i) for i in range(D)]
|
||||
Z = [sp.var('z%i'%i) for i in range(D)]
|
||||
X = [sp.var('x%i' % i) for i in range(input_dim)]
|
||||
Z = [sp.var('z%i' % i) for i in range(input_dim)]
|
||||
rbf_variance = sp.var('rbf_variance',positive=True)
|
||||
if ARD:
|
||||
rbf_lengthscales = [sp.var('rbf_lengthscale_%i'%i,positive=True) for i in range(D)]
|
||||
dist_string = ' + '.join(['(x%i-z%i)**2/rbf_lengthscale_%i**2'%(i,i,i) for i in range(D)])
|
||||
rbf_lengthscales = [sp.var('rbf_lengthscale_%i' % i, positive=True) for i in range(input_dim)]
|
||||
dist_string = ' + '.join(['(x%i-z%i)**2/rbf_lengthscale_%i**2' % (i, i, i) for i in range(input_dim)])
|
||||
dist = parse_expr(dist_string)
|
||||
f = rbf_variance*sp.exp(-dist/2.)
|
||||
else:
|
||||
rbf_lengthscale = sp.var('rbf_lengthscale',positive=True)
|
||||
dist_string = ' + '.join(['(x%i-z%i)**2'%(i,i) for i in range(D)])
|
||||
dist_string = ' + '.join(['(x%i-z%i)**2' % (i, i) for i in range(input_dim)])
|
||||
dist = parse_expr(dist_string)
|
||||
f = rbf_variance*sp.exp(-dist/(2*rbf_lengthscale**2))
|
||||
return kern(D,[spkern(D,f)])
|
||||
return kern(input_dim, [spkern(input_dim, f)])
|
||||
|
||||
def sympykern(D,k):
|
||||
def sympykern(input_dim, k):
|
||||
"""
|
||||
A kernel from a symbolic sympy representation
|
||||
"""
|
||||
return kern(D,[spkern(D,k)])
|
||||
return kern(input_dim, [spkern(input_dim, k)])
|
||||
del sympy_available
|
||||
|
||||
def periodic_exponential(D=1,variance=1., lengthscale=None, period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi):
|
||||
def periodic_exponential(input_dim=1, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi):
|
||||
"""
|
||||
Construct an periodic exponential kernel
|
||||
|
||||
|
|
@ -215,10 +215,10 @@ def periodic_exponential(D=1,variance=1., lengthscale=None, period=2*np.pi,n_fre
|
|||
:param n_freq: the number of frequencies considered for the periodic subspace
|
||||
:type n_freq: int
|
||||
"""
|
||||
part = periodic_exponentialpart(D,variance, lengthscale, period, n_freq, lower, upper)
|
||||
return kern(D, [part])
|
||||
part = periodic_exponentialpart(input_dim, variance, lengthscale, period, n_freq, lower, upper)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def periodic_Matern32(D,variance=1., lengthscale=None, period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi):
|
||||
def periodic_Matern32(input_dim, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi):
|
||||
"""
|
||||
Construct a periodic Matern 3/2 kernel.
|
||||
|
||||
|
|
@ -233,10 +233,10 @@ def periodic_Matern32(D,variance=1., lengthscale=None, period=2*np.pi,n_freq=10,
|
|||
:param n_freq: the number of frequencies considered for the periodic subspace
|
||||
:type n_freq: int
|
||||
"""
|
||||
part = periodic_Matern32part(D,variance, lengthscale, period, n_freq, lower, upper)
|
||||
return kern(D, [part])
|
||||
part = periodic_Matern32part(input_dim, variance, lengthscale, period, n_freq, lower, upper)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def periodic_Matern52(D,variance=1., lengthscale=None, period=2*np.pi,n_freq=10,lower=0.,upper=4*np.pi):
|
||||
def periodic_Matern52(input_dim, variance=1., lengthscale=None, period=2 * np.pi, n_freq=10, lower=0., upper=4 * np.pi):
|
||||
"""
|
||||
Construct a periodic Matern 5/2 kernel.
|
||||
|
||||
|
|
@ -251,8 +251,8 @@ def periodic_Matern52(D,variance=1., lengthscale=None, period=2*np.pi,n_freq=10,
|
|||
:param n_freq: the number of frequencies considered for the periodic subspace
|
||||
:type n_freq: int
|
||||
"""
|
||||
part = periodic_Matern52part(D,variance, lengthscale, period, n_freq, lower, upper)
|
||||
return kern(D, [part])
|
||||
part = periodic_Matern52part(input_dim, variance, lengthscale, period, n_freq, lower, upper)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def prod(k1,k2,tensor=False):
|
||||
"""
|
||||
|
|
@ -278,7 +278,7 @@ def Coregionalise(Nout,R=1, W=None, kappa=None):
|
|||
return kern(1,[p])
|
||||
|
||||
|
||||
def rational_quadratic(D,variance=1., lengthscale=1., power=1.):
|
||||
def rational_quadratic(input_dim, variance=1., lengthscale=1., power=1.):
|
||||
"""
|
||||
Construct rational quadratic kernel.
|
||||
|
||||
|
|
@ -291,10 +291,10 @@ def rational_quadratic(D,variance=1., lengthscale=1., power=1.):
|
|||
:rtype: kern object
|
||||
|
||||
"""
|
||||
part = rational_quadraticpart(D,variance, lengthscale, power)
|
||||
return kern(D, [part])
|
||||
part = rational_quadraticpart(input_dim, variance, lengthscale, power)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def Fixed(D, K, variance=1.):
|
||||
def Fixed(input_dim, K, variance=1.):
|
||||
"""
|
||||
Construct a Fixed effect kernel.
|
||||
|
||||
|
|
@ -304,15 +304,15 @@ def Fixed(D, K, variance=1.):
|
|||
K (np.array), obligatory
|
||||
variance (float)
|
||||
"""
|
||||
part = fixedpart(D, K, variance)
|
||||
return kern(D, [part])
|
||||
part = fixedpart(input_dim, K, variance)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def rbfcos(D,variance=1.,frequencies=None,bandwidths=None,ARD=False):
|
||||
def rbfcos(input_dim, variance=1., frequencies=None, bandwidths=None, ARD=False):
|
||||
"""
|
||||
construct a rbfcos kernel
|
||||
"""
|
||||
part = rbfcospart(D,variance,frequencies,bandwidths,ARD)
|
||||
return kern(D,[part])
|
||||
part = rbfcospart(input_dim, variance, frequencies, bandwidths, ARD)
|
||||
return kern(input_dim, [part])
|
||||
|
||||
def IndependentOutputs(k):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ class MRD(Model):
|
|||
self.NQ = self.num_data * self.input_dim
|
||||
self.MQ = self.num_inducing * self.input_dim
|
||||
|
||||
Model.__init__(self) # @UndefinedVariable
|
||||
model.__init__(self) # @UndefinedVariable
|
||||
self._set_params(self._get_params())
|
||||
|
||||
@property
|
||||
|
|
|
|||
|
|
@ -64,12 +64,9 @@ class DPsiStatTest(unittest.TestCase):
|
|||
|
||||
def testPsi0(self):
|
||||
for k in self.kernels:
|
||||
m = PsiStatModel('psi1', X=self.X, X_variance=self.X_var, Z=self.Z,
|
||||
m = PsiStatModel('psi0', X=self.X, X_variance=self.X_var, Z=self.Z,
|
||||
num_inducing=self.num_inducing, kernel=k)
|
||||
try:
|
||||
assert m.checkgrad(), "{} x psi0".format("+".join(map(lambda x: x.name, k.parts)))
|
||||
except:
|
||||
import ipdb;ipdb.set_trace()
|
||||
|
||||
# def testPsi1(self):
|
||||
# for k in self.kernels:
|
||||
|
|
|
|||
|
|
@ -2,9 +2,9 @@ import pylab as pb
|
|||
import numpy as np
|
||||
from .. import util
|
||||
|
||||
def plot_latent(Model, labels=None, which_indices=None, resolution=50, ax=None, marker='o', s=40):
|
||||
def plot_latent(model, labels=None, which_indices=None, resolution=50, ax=None, marker='o', s=40):
|
||||
"""
|
||||
:param labels: a np.array of size Model.N containing labels for the points (can be number, strings, etc)
|
||||
:param labels: a np.array of size model.num_data containing labels for the points (can be number, strings, etc)
|
||||
:param resolution: the resolution of the grid on which to evaluate the predictive variance
|
||||
"""
|
||||
if ax is None:
|
||||
|
|
@ -12,26 +12,26 @@ def plot_latent(Model, labels=None, which_indices=None, resolution=50, ax=None,
|
|||
util.plot.Tango.reset()
|
||||
|
||||
if labels is None:
|
||||
labels = np.ones(Model.N)
|
||||
labels = np.ones(model.num_data)
|
||||
if which_indices is None:
|
||||
if Model.input_dim==1:
|
||||
if model.input_dim==1:
|
||||
input_1 = 0
|
||||
input_2 = None
|
||||
if Model.input_dim==2:
|
||||
if model.input_dim==2:
|
||||
input_1, input_2 = 0,1
|
||||
else:
|
||||
try:
|
||||
input_1, input_2 = np.argsort(Model.input_sensitivity())[:2]
|
||||
input_1, input_2 = np.argsort(model.input_sensitivity())[:2]
|
||||
except:
|
||||
raise ValueError, "cannot Atomatically determine which dimensions to plot, please pass 'which_indices'"
|
||||
else:
|
||||
input_1, input_2 = which_indices
|
||||
|
||||
#first, plot the output variance as a function of the latent space
|
||||
Xtest, xx,yy,xmin,xmax = util.plot.x_frame2D(Model.X[:,[input_1, input_2]],resolution=resolution)
|
||||
Xtest_full = np.zeros((Xtest.shape[0], Model.X.shape[1]))
|
||||
Xtest, xx,yy,xmin,xmax = util.plot.x_frame2D(model.X[:,[input_1, input_2]],resolution=resolution)
|
||||
Xtest_full = np.zeros((Xtest.shape[0], model.X.shape[1]))
|
||||
Xtest_full[:, :2] = Xtest
|
||||
mu, var, low, up = Model.predict(Xtest_full)
|
||||
mu, var, low, up = model.predict(Xtest_full)
|
||||
var = var[:, :1]
|
||||
ax.imshow(var.reshape(resolution, resolution).T,
|
||||
extent=[xmin[0], xmax[0], xmin[1], xmax[1]], cmap=pb.cm.binary,interpolation='bilinear',origin='lower')
|
||||
|
|
@ -55,12 +55,12 @@ def plot_latent(Model, labels=None, which_indices=None, resolution=50, ax=None,
|
|||
m = marker
|
||||
|
||||
index = np.nonzero(labels==ul)[0]
|
||||
if Model.input_dim==1:
|
||||
x = Model.X[index,input_1]
|
||||
if model.input_dim==1:
|
||||
x = model.X[index,input_1]
|
||||
y = np.zeros(index.size)
|
||||
else:
|
||||
x = Model.X[index,input_1]
|
||||
y = Model.X[index,input_2]
|
||||
x = model.X[index,input_1]
|
||||
y = model.X[index,input_2]
|
||||
ax.scatter(x, y, marker=m, s=s, color=util.plot.Tango.nextMedium(), label=this_label)
|
||||
|
||||
ax.set_xlabel('latent dimension %i'%input_1)
|
||||
|
|
|
|||
|
|
@ -43,16 +43,16 @@ class vector_show(data_show):
|
|||
|
||||
|
||||
class lvm(data_show):
|
||||
def __init__(self, vals, Model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1]):
|
||||
"""Visualize a latent variable Model
|
||||
def __init__(self, vals, model, data_visualize, latent_axes=None, sense_axes=None, latent_index=[0,1]):
|
||||
"""Visualize a latent variable model
|
||||
|
||||
:param Model: the latent variable Model to visualize.
|
||||
:param model: the latent variable model to visualize.
|
||||
:param data_visualize: the object used to visualize the data which has been modelled.
|
||||
:type data_visualize: visualize.data_show type.
|
||||
:param latent_axes: the axes where the latent visualization should be plotted.
|
||||
"""
|
||||
if vals == None:
|
||||
vals = Model.X[0]
|
||||
vals = model.X[0]
|
||||
|
||||
data_show.__init__(self, vals, axes=latent_axes)
|
||||
|
||||
|
|
@ -68,13 +68,13 @@ class lvm(data_show):
|
|||
self.cid = latent_axes[0].figure.canvas.mpl_connect('axes_enter_event', self.on_enter)
|
||||
|
||||
self.data_visualize = data_visualize
|
||||
self.Model = Model
|
||||
self.Model = model
|
||||
self.latent_axes = latent_axes
|
||||
self.sense_axes = sense_axes
|
||||
self.called = False
|
||||
self.move_on = False
|
||||
self.latent_index = latent_index
|
||||
self.latent_dim = Model.input_dim
|
||||
self.latent_dim = model.input_dim
|
||||
|
||||
# The red cross which shows current latent point.
|
||||
self.latent_values = vals
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ The first step is to define the covariance kernel we want to use for the model.
|
|||
|
||||
kernel = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)
|
||||
|
||||
The parameter ``D`` stands for the dimension of the input space. The parameters ``variance`` and ``lengthscale`` are optional. Many other kernels are implemented such as:
|
||||
The parameter ``input_dim`` stands for the dimension of the input space. The parameters ``variance`` and ``lengthscale`` are optional. Many other kernels are implemented such as:
|
||||
|
||||
* linear (``GPy.kern.linear``)
|
||||
* exponential kernel (``GPy.kern.exponential``)
|
||||
|
|
@ -69,7 +69,7 @@ There are various ways to constrain the parameters of the kernel. The most basic
|
|||
|
||||
but it is also possible to set a range on to constrain one parameter to be fixed. The parameter of ``m.constrain_positive`` is a regular expression that matches the name of the parameters to be constrained (as seen in ``print m``). For example, if we want the variance to be positive, the lengthscale to be in [1,10] and the noise variance to be fixed we can write::
|
||||
|
||||
m.unconstrain('') # Required to remove the previous constrains
|
||||
m.unconstrain('') # may be used to remove the previous constrains
|
||||
m.constrain_positive('.*rbf_variance')
|
||||
m.constrain_bounded('.*lengthscale',1.,10. )
|
||||
m.constrain_fixed('.*noise',0.0025)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue