diff --git a/GPy/core/parameterization/parameter_core.py b/GPy/core/parameterization/parameter_core.py index cdf3f534..bf5b74ac 100644 --- a/GPy/core/parameterization/parameter_core.py +++ b/GPy/core/parameterization/parameter_core.py @@ -520,7 +520,7 @@ class Indexable(Nameable, Observable): self.constraints.add(t, self._raveled_index()) t.add_tied_parameter(self) - self._highest_parent_._connect_fixes() + self._highest_parent_._connect_fixes() def constrain(self, transform, warning=True, trigger_parent=True): """ diff --git a/GPy/kern/_src/add.py b/GPy/kern/_src/add.py index 12f5d444..d63c9ab8 100644 --- a/GPy/kern/_src/add.py +++ b/GPy/kern/_src/add.py @@ -88,17 +88,18 @@ class Add(CombinationKernel): # rbf X bias #elif isinstance(p1, (Bias, Fixed)) and isinstance(p2, (RBF, RBFInv)): elif isinstance(p1, Bias) and isinstance(p2, (RBF, Linear)): - tmp = p2.psi1(Z, variational_posterior) - psi2 += p1.variance * (tmp[:, :, None] + tmp[:, None, :]) + tmp = p2.psi1(Z, variational_posterior).sum(axis=0) + psi2 += p1.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :]) #elif isinstance(p2, (Bias, Fixed)) and isinstance(p1, (RBF, RBFInv)): elif isinstance(p2, Bias) and isinstance(p1, (RBF, Linear)): - tmp = p1.psi1(Z, variational_posterior) - psi2 += p2.variance * (tmp[:, :, None] + tmp[:, None, :]) + tmp = p1.psi1(Z, variational_posterior).sum(axis=0) + psi2 += p2.variance * (tmp[:,None]+tmp[None,:]) #(tmp[:, :, None] + tmp[:, None, :]) elif isinstance(p2, (RBF, Linear)) and isinstance(p1, (RBF, Linear)): assert np.intersect1d(p1.active_dims, p2.active_dims).size == 0, "only non overlapping kernel dimensions allowed so far" tmp1 = p1.psi1(Z, variational_posterior) tmp2 = p2.psi1(Z, variational_posterior) - psi2 += (tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :]) + psi2 += np.einsum('nm,no->mo',tmp1,tmp2)+np.einsum('nm,no->mo',tmp2,tmp1) + #(tmp1[:, :, None] * tmp2[:, None, :]) + (tmp2[:, :, None] * tmp1[:, None, :]) else: raise NotImplementedError, "psi2 cannot be computed for this kernel" return psi2 diff --git a/GPy/kern/_src/static.py b/GPy/kern/_src/static.py index 68884937..f8ce7fd1 100644 --- a/GPy/kern/_src/static.py +++ b/GPy/kern/_src/static.py @@ -39,7 +39,7 @@ class Static(Kern): def psi2(self, Z, variational_posterior): K = self.K(variational_posterior.mean, Z) - return K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes + return np.einsum('ij,ik->jk',K,K) #K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes class White(Static): @@ -53,7 +53,7 @@ class White(Static): return np.zeros((X.shape[0], X2.shape[0])) def psi2(self, Z, variational_posterior): - return np.zeros((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) + return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64) def update_gradients_full(self, dL_dK, X, X2=None): self.variance.gradient = np.trace(dL_dK) @@ -82,12 +82,12 @@ class Bias(Static): self.variance.gradient = dL_dKdiag.sum() def psi2(self, Z, variational_posterior): - ret = np.empty((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) - ret[:] = self.variance**2 + ret = np.empty((Z.shape[0], Z.shape[0]), dtype=np.float64) + ret[:] = self.variance*self.variance*variational_posterior.shape[0] return ret def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): - self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum() + self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0] class Fixed(Static): def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'): @@ -97,7 +97,7 @@ class Fixed(Static): :param variance: the variance of the kernel :type variance: float """ - super(Bias, self).__init__(input_dim, variance, active_dims, name) + super(Fixed, self).__init__(input_dim, variance, active_dims, name) self.fixed_K = covariance_matrix def K(self, X, X2): return self.variance * self.fixed_K @@ -112,7 +112,7 @@ class Fixed(Static): self.variance.gradient = np.einsum('i,i', dL_dKdiag, self.fixed_K) def psi2(self, Z, variational_posterior): - return np.zeros((variational_posterior.shape[0], Z.shape[0], Z.shape[0]), dtype=np.float64) + return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64) def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior): self.variance.gradient = dL_dpsi0.sum() diff --git a/GPy/models/ss_mrd.py b/GPy/models/ss_mrd.py index 612ea350..036ac095 100644 --- a/GPy/models/ss_mrd.py +++ b/GPy/models/ss_mrd.py @@ -15,14 +15,17 @@ class SSMRD(Model): self.updates = False self.models = [SSGPLVM(y, input_dim, X=X, X_variance=X_variance, num_inducing=num_inducing,Z=Z,init=initx, - kernel=kernel if kernel else None,inference_method=inference_method,likelihood=likelihoods, + kernel=kernel.copy() if kernel else None,inference_method=inference_method,likelihood=likelihoods, name='model_'+str(i)) for i,y in enumerate(Ylist)] self.add_parameters(*(self.models)) + + [[[self.models[m].X.mean[i,j:j+1].tie('mean_'+str(i)+'_'+str(j)) for m in xrange(len(self.models))] for j in xrange(self.models[0].X.mean.shape[1])] + for i in xrange(self.models[0].X.mean.shape[0])] + [[[self.models[m].X.variance[i,j:j+1].tie('var_'+str(i)+'_'+str(j)) for m in xrange(len(self.models))] for j in xrange(self.models[0].X.variance.shape[1])] + for i in xrange(self.models[0].X.variance.shape[0])] + self.updates = True - [[self.models[j].X.mean.flat[i:i+1].tie('mean_'+str(i)) for j in xrange(len(self.models))] for i in xrange(self.models[0].X.mean.size)] - [[self.models[j].X.variance.flat[i:i+1].tie('var_'+str(i)) for j in xrange(len(self.models))] for i in xrange(self.models[0].X.variance.size)] - def parameters_changed(self): super(SSMRD, self).parameters_changed() self._log_marginal_likelihood = sum([m._log_marginal_likelihood for m in self.models])