diff --git a/GPy/kern/_src/psi_comp/rbf_psi_gpucomp.py b/GPy/kern/_src/psi_comp/rbf_psi_gpucomp.py index c159a0b6..73c2c33b 100644 --- a/GPy/kern/_src/psi_comp/rbf_psi_gpucomp.py +++ b/GPy/kern/_src/psi_comp/rbf_psi_gpucomp.py @@ -258,6 +258,11 @@ class PSICOMP_RBF_GPU(PSICOMP_RBF): self.g_psi2compDer.prepare('PPPPPPPdPPPPiii') self.g_compDenom = module.get_function('compDenom') self.g_compDenom.prepare('PPPPii') + + def __deepcopy__(self, memo): + s = PSICOMP_RBF_GPU(threadnum=self.threadnum, blocknum=self.blocknum, GPU_direct=self.GPU_direct) + memo[id(self)] = s + return s def _initGPUCache(self, N, M, Q): if self.gpuCache == None: diff --git a/GPy/kern/_src/psi_comp/ssrbf_psi_gpucomp.py b/GPy/kern/_src/psi_comp/ssrbf_psi_gpucomp.py index c430e217..1a9d2058 100644 --- a/GPy/kern/_src/psi_comp/ssrbf_psi_gpucomp.py +++ b/GPy/kern/_src/psi_comp/ssrbf_psi_gpucomp.py @@ -308,7 +308,12 @@ class PSICOMP_SSRBF_GPU(PSICOMP_RBF): self.g_psi2compDer.prepare('PPPPPPPPPPPdPPPPPiii') self.g_compDenom = module.get_function('compDenom') self.g_compDenom.prepare('PPPPPPPii') - + + def __deepcopy__(self, memo): + s = PSICOMP_SSRBF_GPU(threadnum=self.threadnum, blocknum=self.blocknum, GPU_direct=self.GPU_direct) + memo[id(self)] = s + return s + def _initGPUCache(self, N, M, Q): if self.gpuCache == None: self.gpuCache = { diff --git a/GPy/kern/_src/rbf.py b/GPy/kern/_src/rbf.py index 90c9100b..471c3305 100644 --- a/GPy/kern/_src/rbf.py +++ b/GPy/kern/_src/rbf.py @@ -43,6 +43,15 @@ class RBF(Stationary): def __setstate__(self, state): return super(RBF, self).__setstate__(state) +# def copy(self): +# k = super(RBF, self).copy() +# # Make sure the copy of the kernel instance has different instance of psicomp +# if k.useGPU: +# k.psicomp = PSICOMP_RBF_GPU() +# else: +# k.psicomp = PSICOMP_RBF() +# return k + #---------------------------------------# # PSI statistics # #---------------------------------------# diff --git a/GPy/models/__init__.py b/GPy/models/__init__.py index 299d5e65..1fb781d5 100644 --- a/GPy/models/__init__.py +++ b/GPy/models/__init__.py @@ -17,3 +17,4 @@ from ss_gplvm import SSGPLVM from gp_coregionalized_regression import GPCoregionalizedRegression from sparse_gp_coregionalized_regression import SparseGPCoregionalizedRegression from gp_heteroscedastic_regression import GPHeteroscedasticRegression +from ss_mrd import SSMRD diff --git a/GPy/models/ss_gplvm.py b/GPy/models/ss_gplvm.py index 2fa3cb2a..df35d549 100644 --- a/GPy/models/ss_gplvm.py +++ b/GPy/models/ss_gplvm.py @@ -24,7 +24,7 @@ class SSGPLVM(SparseGP): """ def __init__(self, Y, input_dim, X=None, X_variance=None, init='PCA', num_inducing=10, - Z=None, kernel=None, inference_method=None, likelihood=None, name='Spike-and-Slab GPLVM', group_spike=False, mpi_comm=None, **kwargs): + Z=None, kernel=None, inference_method=None, likelihood=None, name='Spike_and_Slab GPLVM', group_spike=False, mpi_comm=None, **kwargs): self.mpi_comm = mpi_comm self.__IN_OPTIMIZATION__ = False diff --git a/GPy/models/ss_mrd.py b/GPy/models/ss_mrd.py new file mode 100644 index 00000000..f959eb59 --- /dev/null +++ b/GPy/models/ss_mrd.py @@ -0,0 +1,22 @@ +""" +The Maniforld Relevance Determination model with the spike-and-slab prior +""" + +from ..core import Model +from .ss_gplvm import SSGPLVM + +class SSMRD(Model): + + def __init__(self, Ylist, input_dim, X=None, X_variance=None, + initx = 'PCA', initz = 'permute', + num_inducing=10, Z=None, kernel=None, + inference_method=None, likelihoods=None, name='ss_mrd', Ynames=None): + super(SSMRD, self).__init__(name) + + self.updates = False + self.models = [SSGPLVM(y, input_dim, X=X, X_variance=X_variance, num_inducing=num_inducing,Z=Z,init=initx, + kernel=kernel if kernel else None,inference_method=inference_method,likelihood=likelihoods, + name='model_'+str(i)) for i,y in enumerate(Ylist)] + self.add_parameters(*(self.models)) + self.updates = True + diff --git a/GPy/util/gpu_init.py b/GPy/util/gpu_init.py index 03d07d77..845d38a1 100644 --- a/GPy/util/gpu_init.py +++ b/GPy/util/gpu_init.py @@ -17,7 +17,7 @@ except: pass try: - if MPI_enabled: #and MPI.COMM_WORLD.size>1: + if MPI_enabled and MPI.COMM_WORLD.size>1: from .parallel import get_id_within_node gpuid = get_id_within_node() import pycuda.driver