[newest patch updates, cleaned interfaces and mean_function addidtions]

This commit is contained in:
mzwiessele 2015-09-30 14:43:04 +01:00
parent 75ccd468ef
commit 71bc90bf98
6 changed files with 28 additions and 18 deletions

View file

@ -34,18 +34,24 @@ except:
def tests(verbose=10): def tests(verbose=10):
Tester(testing).test(verbose=verbose) Tester(testing).test(verbose=verbose)
def load(file_path): def load(file_or_path):
""" """
Load a previously pickled model, using `m.pickle('path/to/file.pickle)' Load a previously pickled model, using `m.pickle('path/to/file.pickle)'
:param file_name: path/to/file.pickle :param file_name: path/to/file.pickle
""" """
import cPickle as pickle
try: try:
with open(file_path, 'rb') as f: import cPickle as pickle
m = pickle.load(f) if isinstance(file_or_path, basestring):
with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
except: except:
import pickle as pickle import pickle
with open(file_path, 'rb') as f: if isinstance(file_or_path, basestring):
m = pickle.load(f) with open(file_or_path, 'rb') as f:
m = pickle.load(f)
else:
m = pickle.load(file_or_path)
return m return m

View file

@ -227,8 +227,8 @@ class Nameable(Gradcheckable):
Make an object nameable inside the hierarchy. Make an object nameable inside the hierarchy.
""" """
def __init__(self, name, *a, **kw): def __init__(self, name, *a, **kw):
super(Nameable, self).__init__(*a, **kw)
self._name = name or self.__class__.__name__ self._name = name or self.__class__.__name__
super(Nameable, self).__init__(*a, **kw)
@property @property
def name(self): def name(self):

View file

@ -43,12 +43,10 @@ class SparseGP(GP):
def __init__(self, X, Y, Z, kernel, likelihood, mean_function=None, X_variance=None, inference_method=None, def __init__(self, X, Y, Z, kernel, likelihood, mean_function=None, X_variance=None, inference_method=None,
name='sparse gp', Y_metadata=None, normalizer=False): name='sparse gp', Y_metadata=None, normalizer=False):
self.missing_data = np.isnan(Y).any()
#pick a sensible inference method #pick a sensible inference method
if inference_method is None: if inference_method is None:
if isinstance(likelihood, likelihoods.Gaussian): if isinstance(likelihood, likelihoods.Gaussian):
inference_method = var_dtc.VarDTC(limit=1 if not self.missing_data else Y.shape[1]) inference_method = var_dtc.VarDTC(limit=1)
else: else:
#inference_method = ?? #inference_method = ??
raise NotImplementedError("what to do what to do?") raise NotImplementedError("what to do what to do?")

View file

@ -25,7 +25,7 @@ class Linear(Mapping):
""" """
def __init__(self, input_dim, output_dim, name='linmap'): def __init__(self, input_dim, output_dim, name='linmap'):
Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name) super(Linear, self).__init__(input_dim=input_dim, output_dim=output_dim, name=name)
self.A = Param('A', np.random.randn(self.input_dim, self.output_dim)) self.A = Param('A', np.random.randn(self.input_dim, self.output_dim))
self.link_parameter(self.A) self.link_parameter(self.A)

View file

@ -20,10 +20,10 @@ class GPClassification(GP):
""" """
def __init__(self, X, Y, kernel=None,Y_metadata=None): def __init__(self, X, Y, kernel=None,Y_metadata=None, mean_function=None):
if kernel is None: if kernel is None:
kernel = kern.RBF(X.shape[1]) kernel = kern.RBF(X.shape[1])
likelihood = likelihoods.Bernoulli() likelihood = likelihoods.Bernoulli()
GP.__init__(self, X=X, Y=Y, kernel=kernel, likelihood=likelihood, inference_method=EP(), name='gp_classification') GP.__init__(self, X=X, Y=Y, kernel=kernel, likelihood=likelihood, inference_method=EP(), mean_function=mean_function, name='gp_classification')

View file

@ -230,10 +230,16 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
ecolor='k', fmt=None, elinewidth=.5, alpha=.5) ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
#set the limits of the plot to some sensible values #set the limits of the plot to some sensible values
ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper)) try:
ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin) ymin, ymax = min(np.append(Y[which_data_rows, which_data_ycols].flatten(), lower)), max(np.append(Y[which_data_rows, which_data_ycols].flatten(), upper))
ax.set_xlim(xmin, xmax) if ymin != ymax:
ax.set_ylim(ymin, ymax) ymin, ymax = ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
except:
# do nothing
# No training data on model
pass
#add inducing inputs (if a sparse model is used) #add inducing inputs (if a sparse model is used)
if hasattr(model,"Z"): if hasattr(model,"Z"):