mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 20:12:38 +02:00
[core] updating system, security branching
This commit is contained in:
parent
938cc49aed
commit
70a9a26d7e
15 changed files with 366 additions and 65 deletions
|
|
@ -137,6 +137,20 @@ class BayesianGPLVM(SparseGP_MPI):
|
|||
fignum, plot_inducing, legend,
|
||||
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
|
||||
|
||||
def plot_magnification(self, labels=None, which_indices=None,
|
||||
resolution=50, ax=None, marker='o', s=40,
|
||||
fignum=None, legend=True,
|
||||
plot_limits=None,
|
||||
aspect='auto', updates=False, **kwargs):
|
||||
import sys
|
||||
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
|
||||
from ..plotting.matplot_dep import dim_reduction_plots
|
||||
|
||||
return dim_reduction_plots.plot_magnification(self, labels, which_indices,
|
||||
resolution, ax, marker, s,
|
||||
fignum, False, legend,
|
||||
plot_limits, aspect, updates, **kwargs)
|
||||
|
||||
def do_test_latents(self, Y):
|
||||
"""
|
||||
Compute the latent representation for a set of new points Y
|
||||
|
|
|
|||
|
|
@ -43,19 +43,19 @@ class GPLVM(GP):
|
|||
super(GPLVM, self).parameters_changed()
|
||||
self.X.gradient = self.kern.gradients_X(self.grad_dict['dL_dK'], self.X, None)
|
||||
|
||||
def jacobian(self,X):
|
||||
J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
|
||||
for i in range(self.output_dim):
|
||||
J[:,:,i] = self.kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1], X, self.X)
|
||||
return J
|
||||
#def jacobian(self,X):
|
||||
# J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
|
||||
# for i in range(self.output_dim):
|
||||
# J[:,:,i] = self.kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1], X, self.X)
|
||||
# return J
|
||||
|
||||
def magnification(self,X):
|
||||
target=np.zeros(X.shape[0])
|
||||
#J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
|
||||
J = self.jacobian(X)
|
||||
for i in range(X.shape[0]):
|
||||
target[i]=np.sqrt(np.linalg.det(np.dot(J[i,:,:],np.transpose(J[i,:,:]))))
|
||||
return target
|
||||
#def magnification(self,X):
|
||||
# target=np.zeros(X.shape[0])
|
||||
# #J = np.zeros((X.shape[0],X.shape[1],self.output_dim))
|
||||
## J = self.jacobian(X)
|
||||
# for i in range(X.shape[0]):
|
||||
# target[i]=np.sqrt(np.linalg.det(np.dot(J[i,:,:],np.transpose(J[i,:,:]))))
|
||||
# return target
|
||||
|
||||
def plot(self):
|
||||
assert self.Y.shape[1] == 2, "too high dimensional to plot. Try plot_latent"
|
||||
|
|
@ -82,5 +82,17 @@ class GPLVM(GP):
|
|||
fignum, False, legend,
|
||||
plot_limits, aspect, updates, **kwargs)
|
||||
|
||||
def plot_magnification(self, *args, **kwargs):
|
||||
return util.plot_latent.plot_magnification(self, *args, **kwargs)
|
||||
def plot_magnification(self, labels=None, which_indices=None,
|
||||
resolution=50, ax=None, marker='o', s=40,
|
||||
fignum=None, legend=True,
|
||||
plot_limits=None,
|
||||
aspect='auto', updates=False, **kwargs):
|
||||
|
||||
import sys
|
||||
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
|
||||
from ..plotting.matplot_dep import dim_reduction_plots
|
||||
|
||||
return dim_reduction_plots.plot_magnification(self, labels, which_indices,
|
||||
resolution, ax, marker, s,
|
||||
fignum, False, legend,
|
||||
plot_limits, aspect, updates, **kwargs)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue