mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 11:32:39 +02:00
[newest patch updates, cleaned interfaces and mean_function addidtions]
This commit is contained in:
parent
75ccd468ef
commit
71bc90bf98
6 changed files with 28 additions and 18 deletions
|
|
@ -34,18 +34,24 @@ except:
|
|||
def tests(verbose=10):
|
||||
Tester(testing).test(verbose=verbose)
|
||||
|
||||
def load(file_path):
|
||||
def load(file_or_path):
|
||||
"""
|
||||
Load a previously pickled model, using `m.pickle('path/to/file.pickle)'
|
||||
|
||||
:param file_name: path/to/file.pickle
|
||||
"""
|
||||
import cPickle as pickle
|
||||
try:
|
||||
with open(file_path, 'rb') as f:
|
||||
m = pickle.load(f)
|
||||
import cPickle as pickle
|
||||
if isinstance(file_or_path, basestring):
|
||||
with open(file_or_path, 'rb') as f:
|
||||
m = pickle.load(f)
|
||||
else:
|
||||
m = pickle.load(file_or_path)
|
||||
except:
|
||||
import pickle as pickle
|
||||
with open(file_path, 'rb') as f:
|
||||
m = pickle.load(f)
|
||||
import pickle
|
||||
if isinstance(file_or_path, basestring):
|
||||
with open(file_or_path, 'rb') as f:
|
||||
m = pickle.load(f)
|
||||
else:
|
||||
m = pickle.load(file_or_path)
|
||||
return m
|
||||
|
|
|
|||
|
|
@ -227,8 +227,8 @@ class Nameable(Gradcheckable):
|
|||
Make an object nameable inside the hierarchy.
|
||||
"""
|
||||
def __init__(self, name, *a, **kw):
|
||||
super(Nameable, self).__init__(*a, **kw)
|
||||
self._name = name or self.__class__.__name__
|
||||
super(Nameable, self).__init__(*a, **kw)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
|
|
|
|||
|
|
@ -43,12 +43,10 @@ class SparseGP(GP):
|
|||
def __init__(self, X, Y, Z, kernel, likelihood, mean_function=None, X_variance=None, inference_method=None,
|
||||
name='sparse gp', Y_metadata=None, normalizer=False):
|
||||
|
||||
self.missing_data = np.isnan(Y).any()
|
||||
|
||||
#pick a sensible inference method
|
||||
if inference_method is None:
|
||||
if isinstance(likelihood, likelihoods.Gaussian):
|
||||
inference_method = var_dtc.VarDTC(limit=1 if not self.missing_data else Y.shape[1])
|
||||
inference_method = var_dtc.VarDTC(limit=1)
|
||||
else:
|
||||
#inference_method = ??
|
||||
raise NotImplementedError("what to do what to do?")
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class Linear(Mapping):
|
|||
"""
|
||||
|
||||
def __init__(self, input_dim, output_dim, name='linmap'):
|
||||
Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name)
|
||||
super(Linear, self).__init__(input_dim=input_dim, output_dim=output_dim, name=name)
|
||||
self.A = Param('A', np.random.randn(self.input_dim, self.output_dim))
|
||||
self.link_parameter(self.A)
|
||||
|
||||
|
|
|
|||
|
|
@ -20,10 +20,10 @@ class GPClassification(GP):
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self, X, Y, kernel=None,Y_metadata=None):
|
||||
def __init__(self, X, Y, kernel=None,Y_metadata=None, mean_function=None):
|
||||
if kernel is None:
|
||||
kernel = kern.RBF(X.shape[1])
|
||||
|
||||
likelihood = likelihoods.Bernoulli()
|
||||
|
||||
GP.__init__(self, X=X, Y=Y, kernel=kernel, likelihood=likelihood, inference_method=EP(), name='gp_classification')
|
||||
GP.__init__(self, X=X, Y=Y, kernel=kernel, likelihood=likelihood, inference_method=EP(), mean_function=mean_function, name='gp_classification')
|
||||
|
|
|
|||
|
|
@ -230,10 +230,16 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
|||
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
||||
|
||||
#set the limits of the plot to some sensible values
|
||||
ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper))
|
||||
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
|
||||
ax.set_xlim(xmin, xmax)
|
||||
ax.set_ylim(ymin, ymax)
|
||||
try:
|
||||
ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper))
|
||||
if ymin != ymax:
|
||||
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
|
||||
ax.set_xlim(xmin, xmax)
|
||||
ax.set_ylim(ymin, ymax)
|
||||
except:
|
||||
# do nothing
|
||||
# No training data on model
|
||||
pass
|
||||
|
||||
#add inducing inputs (if a sparse model is used)
|
||||
if hasattr(model,"Z"):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue