mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-02 00:02:38 +02:00
fix the problem of multiple ties on the same param array object
This commit is contained in:
parent
567612b3a9
commit
3f36a245d1
5 changed files with 18 additions and 24 deletions
|
|
@ -500,8 +500,9 @@ class Indexable(Nameable, Observable):
|
|||
#===========================================================================
|
||||
|
||||
def tie(self, name):
|
||||
from ties_and_remappings import Tie
|
||||
#remove any constraints
|
||||
old_const = self.constraints.properties()[:]
|
||||
old_const = [c for c in self.constraints.properties() if not isinstance(c,Tie)]
|
||||
self.unconstrain()
|
||||
|
||||
#see if a tie exists with that name
|
||||
|
|
@ -510,14 +511,14 @@ class Indexable(Nameable, Observable):
|
|||
else:
|
||||
#create a tie object
|
||||
value = np.atleast_1d(self.param_array)[0]*1
|
||||
from ties_and_remappings import Tie
|
||||
t = Tie(value=value, name=name)
|
||||
|
||||
#add the new tie object to the global index
|
||||
self._highest_parent_.ties[name] = t
|
||||
self._highest_parent_.add_parameter(t)
|
||||
|
||||
#constrain the tie as we were constrained
|
||||
if len(old_const)==1:
|
||||
if len(old_const)>0:
|
||||
t.constrain(old_const[0])
|
||||
|
||||
self.constraints.add(t, self._raveled_index())
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ class Tie(Remapping):
|
|||
def callback(self, param=None, which=None):
|
||||
"""
|
||||
This gets called whenever any of the tied parameters changes. we spend
|
||||
considerable effort working out whhat has changed ant to what value.
|
||||
considerable effort working out what has changed and to what value.
|
||||
Then we store that value in self.value, and broadcast it everywhere
|
||||
with parameters_changed.
|
||||
"""
|
||||
|
|
@ -54,11 +54,13 @@ class Tie(Remapping):
|
|||
index = self._highest_parent_.constraints[self]
|
||||
if len(index)==0:
|
||||
return # nothing to tie together, this tie exists without any tied parameters
|
||||
self.value.gradient[:] = self._highest_parent_.gradient[index].sum()
|
||||
self.collate_gradient()
|
||||
vals = self._highest_parent_.param_array[index]
|
||||
uvals = np.unique(vals)
|
||||
if len(uvals)==1:
|
||||
#all of the tied things are at the same value
|
||||
if (self.value==uvals[0]).all():
|
||||
return # DO NOT DO ANY CHANGES IF THE TIED PART IS NOT CHANGED!
|
||||
self.value[...] = uvals[0]
|
||||
elif len(uvals)==2:
|
||||
#only *one* of the tied things has changed. it must be different to self.value
|
||||
|
|
@ -69,7 +71,7 @@ class Tie(Remapping):
|
|||
raise ValueError, "something is wrong with the tieing"
|
||||
def parameters_changed(self):
|
||||
super(Tie,self).parameters_changed()
|
||||
self.value.gradient[:] = self._highest_parent_.gradient[self._highest_parent_.constraints[self]].sum()
|
||||
self.collate_gradient()
|
||||
|
||||
def mapping(self):
|
||||
return self.value
|
||||
|
|
|
|||
|
|
@ -40,7 +40,6 @@ class SpikeAndSlabPrior(VariationalPrior):
|
|||
self.pi = Param('pi', pi, Logistic(1e-10,1.-1e-10))
|
||||
self.variance = Param('variance',variance)
|
||||
self.add_parameters(self.pi)
|
||||
self.group_spike_prob = False
|
||||
|
||||
def KL_divergence(self, variational_posterior):
|
||||
mu = variational_posterior.mean
|
||||
|
|
@ -56,11 +55,7 @@ class SpikeAndSlabPrior(VariationalPrior):
|
|||
S = variational_posterior.variance
|
||||
gamma = variational_posterior.binary_prob
|
||||
|
||||
if self.group_spike_prob:
|
||||
gamma_grad = np.log((1-self.pi)/self.pi*gamma/(1.-gamma))+(np.square(mu)+S-np.log(S)-1.)/2.
|
||||
gamma.gradient -= gamma_grad.mean(axis=0)
|
||||
else:
|
||||
gamma.gradient -= np.log((1-self.pi)/self.pi*gamma/(1.-gamma))+(np.square(mu)+S-np.log(S)-1.)/2.
|
||||
gamma.gradient -= np.log((1-self.pi)/self.pi*gamma/(1.-gamma))+(np.square(mu)+S-np.log(S)-1.)/2.
|
||||
mu.gradient -= gamma*mu
|
||||
S.gradient -= (1. - (1. / (S))) * gamma /2.
|
||||
self.pi.gradient = (gamma/self.pi - (1.-gamma)/(1.-self.pi)).sum(axis=0)
|
||||
|
|
|
|||
|
|
@ -242,14 +242,14 @@ gpu_code = """
|
|||
|
||||
class PSICOMP_RBF_GPU(PSICOMP_RBF):
|
||||
|
||||
def __init__(self, GPU_direct=False):
|
||||
def __init__(self, threadnum=128, blocknum=15, GPU_direct=False):
|
||||
assert gpu_init.initSuccess, "GPU initialization failed!"
|
||||
self.GPU_direct = GPU_direct
|
||||
self.cublas_handle = gpu_init.cublas_handle
|
||||
self.gpuCache = None
|
||||
|
||||
self.threadnum = 128
|
||||
self.blocknum = 15
|
||||
self.threadnum = threadnum
|
||||
self.blocknum = blocknum
|
||||
module = SourceModule("#define THREADNUM "+str(self.threadnum)+"\n"+gpu_code)
|
||||
self.g_psi1computations = module.get_function('psi1computations')
|
||||
self.g_psi1computations.prepare('PPdPPPPiii')
|
||||
|
|
|
|||
|
|
@ -44,15 +44,12 @@ class SSGPLVM(SparseGP):
|
|||
if X_variance is None: # The variance of the variational approximation (S)
|
||||
X_variance = np.random.uniform(0,.1,X.shape)
|
||||
|
||||
gamma = np.empty_like(X, order='F') # The posterior probabilities of the binary variable in the variational approximation
|
||||
gamma = np.empty_like(X) # The posterior probabilities of the binary variable in the variational approximation
|
||||
gamma[:] = 0.5 + 0.1 * np.random.randn(X.shape[0], input_dim)
|
||||
gamma[gamma>1.-1e-9] = 1.-1e-9
|
||||
gamma[gamma<1e-9] = 1e-9
|
||||
gamma[:] = 0.5
|
||||
|
||||
if group_spike:
|
||||
gamma[:] = gamma[:,0]
|
||||
|
||||
|
||||
if Z is None:
|
||||
Z = np.random.permutation(X.copy())[:num_inducing]
|
||||
assert Z.shape[1] == X.shape[1]
|
||||
|
|
@ -73,14 +70,13 @@ class SSGPLVM(SparseGP):
|
|||
|
||||
X = SpikeAndSlabPosterior(X, X_variance, gamma)
|
||||
|
||||
if group_spike:
|
||||
kernel.group_spike_prob = True
|
||||
self.variational_prior.group_spike_prob = True
|
||||
|
||||
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, inference_method, name, **kwargs)
|
||||
self.add_parameter(self.X, index=0)
|
||||
self.add_parameter(self.variational_prior)
|
||||
|
||||
if self.group_spike:
|
||||
[self.X.gamma[:,i].tie('tieGamma'+str(i)) for i in xrange(self.X.gamma.shape[1])] # Tie columns together
|
||||
|
||||
if mpi_comm != None:
|
||||
from ..util.mpi import divide_data
|
||||
Y_start, Y_end, Y_list = divide_data(Y.shape[0], mpi_comm)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue