mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-09 03:52:39 +02:00
added gplbm and sparse gp to new parameterized structure
This commit is contained in:
parent
c8eac84d55
commit
3316d29341
13 changed files with 106 additions and 96 deletions
|
|
@ -23,7 +23,6 @@ class GP(GPBase):
|
|||
"""
|
||||
def __init__(self, X, likelihood, kernel, normalize_X=False):
|
||||
GPBase.__init__(self, X, likelihood, kernel, normalize_X=normalize_X)
|
||||
|
||||
#self._set_params(self._get_params())
|
||||
|
||||
def getstate(self):
|
||||
|
|
|
|||
|
|
@ -32,24 +32,17 @@ class GPBase(Model):
|
|||
self._Xoffset = np.zeros((1, self.input_dim))
|
||||
self._Xscale = np.ones((1, self.input_dim))
|
||||
|
||||
self.add_parameter(self.kern, gradient=self.dL_dtheta)
|
||||
self.add_parameter(self.likelihood, gradient=self.dL_dlikelihood)
|
||||
self.kern.connect_input(self.X)
|
||||
self.add_parameter(self.kern, gradient=lambda:self.kern.dK_dtheta(self.dL_dK, self.X))
|
||||
self.add_parameter(self.likelihood, gradient=lambda:self.likelihood._gradients(partial=np.diag(self.dL_dK)))
|
||||
#self.kern.connect_input(self.X)
|
||||
|
||||
# Model.__init__(self)
|
||||
# All leaf nodes should call self._set_params(self._get_params()) at
|
||||
# the end
|
||||
|
||||
def parameters_changed(self):
|
||||
self.kern.parameters_changed()
|
||||
self.likelihood.parameters_changed()
|
||||
|
||||
def dL_dtheta(self):
|
||||
return self.kern.dK_dtheta(self.dL_dK, self.X)
|
||||
|
||||
def dL_dlikelihood(self):
|
||||
return self.likelihood._gradients(partial=np.diag(self.dL_dK))
|
||||
|
||||
#
|
||||
# def parameters_changed(self):
|
||||
# self.kern.parameters_changed()
|
||||
# self.likelihood.parameters_changed()
|
||||
|
||||
def getstate(self):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -544,7 +544,7 @@ class Model(Parameterized):
|
|||
if not hasattr(self, 'kern'):
|
||||
raise ValueError, "this model has no kernel"
|
||||
|
||||
k = [p for p in self.kern.parts if p.name in ['rbf', 'linear', 'rbf_inv']]
|
||||
k = [p for p in self.kern._parameters_ if p.name in ['rbf', 'linear', 'rbf_inv']]
|
||||
if (not len(k) == 1) or (not k[0].ARD):
|
||||
raise ValueError, "cannot determine sensitivity for this kernel"
|
||||
k = k[0]
|
||||
|
|
|
|||
|
|
@ -6,7 +6,8 @@ Created on 4 Sep 2013
|
|||
import itertools
|
||||
import numpy
|
||||
from transformations import Logexp, NegativeLogexp, Logistic
|
||||
from parameterized import Nameable, Pickleable
|
||||
from parameterized import Nameable, Pickleable, Observable
|
||||
from GPy.core.parameterized import _adjust_name_for_printing
|
||||
|
||||
###### printing
|
||||
__constraints_name__ = "Constraint"
|
||||
|
|
@ -26,7 +27,7 @@ class ListArray(numpy.ndarray):
|
|||
def __eq__(self, other):
|
||||
return other is self
|
||||
|
||||
class ObservableArray(ListArray):
|
||||
class ObservableArray(ListArray, Observable):
|
||||
"""
|
||||
An ndarray which reports changed to it's observers.
|
||||
The observers can add themselves with a callable, which
|
||||
|
|
@ -41,19 +42,11 @@ class ObservableArray(ListArray):
|
|||
# see InfoArray.__array_finalize__ for comments
|
||||
if obj is None: return
|
||||
self._observers_ = getattr(obj, '_observers_', None)
|
||||
def add_observer(self, observer, callble):
|
||||
self._observers_[observer] = callble
|
||||
def remove_observer(self, observer):
|
||||
del self._observers_[observer]
|
||||
def _notify_observers(self):
|
||||
[callble(self) for callble in self._observers_.itervalues()]
|
||||
def __setitem__(self, s, val):
|
||||
def __setitem__(self, s, val, update=True):
|
||||
if not numpy.all(numpy.equal(self[s], val)):
|
||||
if isinstance(s, slice):
|
||||
super(ObservableArray, self).__setitem__(s, val)
|
||||
else:
|
||||
numpy.put(self,s,val)
|
||||
self._notify_observers()
|
||||
super(ObservableArray, self).__setitem__(s, val)
|
||||
if update:
|
||||
self._notify_observers()
|
||||
def __getslice__(self, start, stop):
|
||||
return self.__getitem__(slice(start, stop))
|
||||
def __setslice__(self, start, stop, val):
|
||||
|
|
@ -158,10 +151,10 @@ class Param(ObservableArray, Nameable, Pickleable):
|
|||
#===========================================================================
|
||||
# get/set parameters
|
||||
#===========================================================================
|
||||
def _set_params(self, param):
|
||||
def _set_params(self, param, update=True):
|
||||
self.flat = param
|
||||
self._notify_observers()
|
||||
self._notify_tied_parameters()
|
||||
self._notify_observers()
|
||||
|
||||
def _get_params(self):
|
||||
return self.flat
|
||||
|
|
@ -370,7 +363,7 @@ class Param(ObservableArray, Nameable, Pickleable):
|
|||
except AttributeError: pass# returning 0d array or float, double etc
|
||||
return new_arr
|
||||
def __setitem__(self, s, val, update=True):
|
||||
super(Param, self).__setitem__(s, val)
|
||||
super(Param, self).__setitem__(s, val, update=update)
|
||||
self._notify_tied_parameters()
|
||||
if update:
|
||||
self._highest_parent_.parameters_changed()
|
||||
|
|
@ -456,8 +449,8 @@ class Param(ObservableArray, Nameable, Pickleable):
|
|||
@property
|
||||
def name_hirarchical(self):
|
||||
if self.has_parent():
|
||||
return self._direct_parent_.hirarchy_name()+self.name
|
||||
return self.name
|
||||
return self._direct_parent_.hirarchy_name()+_adjust_name_for_printing(self.name)
|
||||
return _adjust_name_for_printing(self.name)
|
||||
def __repr__(self, *args, **kwargs):
|
||||
name = "\033[1m{x:s}\033[0;0m:\n".format(
|
||||
x=self.name_hirarchical)
|
||||
|
|
@ -495,7 +488,7 @@ class Param(ObservableArray, Nameable, Pickleable):
|
|||
return reduce(lambda a, b:max(a, len(str(b))), ind, len(__index_name__))
|
||||
def _short(self):
|
||||
# short string to print
|
||||
name = self._direct_parent_.hirarchy_name() + self.name
|
||||
name = self._direct_parent_.hirarchy_name() + _adjust_name_for_printing(self.name)
|
||||
if self._realsize_ < 2:
|
||||
return name
|
||||
ind = self._indices()
|
||||
|
|
|
|||
|
|
@ -50,7 +50,20 @@ class Pickleable(object):
|
|||
"""
|
||||
raise NotImplementedError, "To be able to use pickling you need to implement this method"
|
||||
|
||||
from parameter import ParamConcatenation
|
||||
class Observable(object):
|
||||
_observers_ = {}
|
||||
def add_observer(self, observer, callble):
|
||||
self._observers_[observer] = callble
|
||||
callble(self)
|
||||
def remove_observer(self, observer):
|
||||
del self._observers_[observer]
|
||||
def _notify_observers(self):
|
||||
[callble(self) for callble in self._observers_.itervalues()]
|
||||
|
||||
def _adjust_name_for_printing(name):
|
||||
return name.replace(" ", "_").replace(".", "_")
|
||||
|
||||
from parameter import ParamConcatenation, Param
|
||||
from index_operations import ParameterIndexOperations,\
|
||||
index_empty
|
||||
|
||||
|
|
@ -65,7 +78,7 @@ FIXED = False
|
|||
UNFIXED = True
|
||||
#===============================================================================
|
||||
|
||||
class Parameterized(Nameable, Pickleable):
|
||||
class Parameterized(Nameable, Pickleable, Observable):
|
||||
"""
|
||||
Parameterized class
|
||||
|
||||
|
|
@ -120,9 +133,9 @@ class Parameterized(Nameable, Pickleable):
|
|||
# self._parameters_.extend(parameters)
|
||||
self._connect_parameters()
|
||||
self.gradient_mapping = {}
|
||||
self._added_names_ = set()
|
||||
del self._in_init_
|
||||
|
||||
|
||||
@property
|
||||
def constraints(self):
|
||||
if self._constraints_ is None:
|
||||
|
|
@ -188,6 +201,7 @@ class Parameterized(Nameable, Pickleable):
|
|||
parameters without gradient specification
|
||||
"""
|
||||
[self.add_parameter(p) for p in parameters]
|
||||
|
||||
# def remove_parameter(self, *names_params_indices):
|
||||
# """
|
||||
# :param names_params_indices: mix of parameter_names, parameter objects, or indices
|
||||
|
|
@ -232,13 +246,15 @@ class Parameterized(Nameable, Pickleable):
|
|||
# self.__dict__[k] = p
|
||||
# except: # parameter comparison, just for convenience
|
||||
# pass
|
||||
pname = p.name.replace(" ", "_").replace(".","_")
|
||||
pname = _adjust_name_for_printing(p.name)
|
||||
if pname in self.__dict__:
|
||||
if not p is self.__dict__[pname]:
|
||||
not_unique.append(pname)
|
||||
del self.__dict__[pname]
|
||||
if isinstance(self.__dict__[pname], (Parameterized, Param)):
|
||||
if not p is self.__dict__[pname]:
|
||||
not_unique.append(pname)
|
||||
del self.__dict__[pname]
|
||||
elif not (pname in not_unique):
|
||||
self.__dict__[pname] = p
|
||||
self._added_names_.add(pname)
|
||||
sizes = numpy.cumsum([0] + self._parameter_sizes_)
|
||||
self.size = sizes[-1]
|
||||
self._param_slices_ = [slice(start, stop) for start,stop in zip(sizes, sizes[1:])]
|
||||
|
|
@ -289,9 +305,11 @@ class Parameterized(Nameable, Pickleable):
|
|||
self._parameters_,
|
||||
self._name,
|
||||
self.gradient_mapping,
|
||||
self._added_names_,
|
||||
]
|
||||
|
||||
def setstate(self, state):
|
||||
self._added_names_ = state.pop()
|
||||
self.gradient_mapping = state.pop(),
|
||||
self._name = state.pop()
|
||||
self._parameters_ = state.pop()
|
||||
|
|
@ -334,9 +352,9 @@ class Parameterized(Nameable, Pickleable):
|
|||
def _get_params(self):
|
||||
# don't overwrite this anymore!
|
||||
return numpy.hstack([x._get_params() for x in self._parameters_])#numpy.fromiter(itertools.chain(*itertools.imap(lambda x: x._get_params(), self._parameters_)), dtype=numpy.float64, count=sum(self._parameter_sizes_))
|
||||
def _set_params(self, params):
|
||||
def _set_params(self, params, update=True):
|
||||
# don't overwrite this anymore!
|
||||
[p._set_params(params[s]) for p,s in itertools.izip(self._parameters_,self._param_slices_)]
|
||||
[p._set_params(params[s], update=update) for p,s in itertools.izip(self._parameters_,self._param_slices_)]
|
||||
self.parameters_changed()
|
||||
def _get_params_transformed(self):
|
||||
p = self._get_params()
|
||||
|
|
@ -350,9 +368,13 @@ class Parameterized(Nameable, Pickleable):
|
|||
[numpy.put(p, ind, c.f(p[ind])) for c,ind in self.constraints.iteritems() if c != __fixed__]
|
||||
self._set_params(p)
|
||||
def _name_changed(self, param, old_name):
|
||||
if hasattr(self, old_name):
|
||||
if hasattr(self, old_name) and old_name in self._added_names_:
|
||||
delattr(self, old_name)
|
||||
self.__dict__[param.name] = param
|
||||
self._added_names_.remove(old_name)
|
||||
pname = _adjust_name_for_printing(param.name)
|
||||
if pname not in self.__dict__:
|
||||
self._added_names_.add(pname)
|
||||
self.__dict__[pname] = param
|
||||
#===========================================================================
|
||||
# Index Handling
|
||||
#===========================================================================
|
||||
|
|
@ -540,9 +562,9 @@ class Parameterized(Nameable, Pickleable):
|
|||
#===========================================================================
|
||||
def _parameter_names(self, add_name=False):
|
||||
if add_name:
|
||||
return [self.name + "." + xi for x in self._parameters_ for xi in x._parameter_names(add_name=True)]
|
||||
return [_adjust_name_for_printing(self.name) + "." + xi for x in self._parameters_ for xi in x._parameter_names(add_name=True)]
|
||||
return [xi for x in self._parameters_ for xi in x._parameter_names(add_name=True)]
|
||||
parameter_names = property(_parameter_names, doc="Names for all parameters handled by this parameterization object")
|
||||
parameter_names = property(_parameter_names, doc="Names for all parameters handled by this parameterization object -- will add hirarchy name entries for printing")
|
||||
@property
|
||||
def flattened_parameters(self):
|
||||
return [xi for x in self._parameters_ for xi in x.flattened_parameters]
|
||||
|
|
|
|||
|
|
@ -51,15 +51,13 @@ class SparseGP(GPBase):
|
|||
if self.has_uncertain_inputs:
|
||||
self.X_variance /= np.square(self._Xscale)
|
||||
|
||||
self.Z = Param('inducing input', self.Z)
|
||||
self._const_jitter = None
|
||||
|
||||
self.Z = Param('inducing inputs', self.Z)
|
||||
self.add_parameter(self.Z, gradient=self.dL_dZ, index=0)
|
||||
self.add_parameter(self.kern, gradient=self.dL_dtheta)
|
||||
|
||||
self._compute_kernel_matrices()
|
||||
self.Z.add_observer(self, lambda Z: self._compute_kernel_matrices())
|
||||
#self.Z._notify_observers()
|
||||
|
||||
self._const_jitter = None
|
||||
self.add_parameter(self.likelihood, gradient=lambda:self.likelihood._gradients(partial=self.partial_for_likelihood))
|
||||
#self.Z.add_observer(self, lambda Z: self._compute_kernel_matrices() or self._computations())
|
||||
|
||||
def getstate(self):
|
||||
"""
|
||||
|
|
@ -165,7 +163,7 @@ class SparseGP(GPBase):
|
|||
|
||||
|
||||
# the partial derivative vector for the likelihood
|
||||
if self.likelihood.num_params == 0:
|
||||
if self.likelihood.size == 0:
|
||||
# save computation here.
|
||||
self.partial_for_likelihood = None
|
||||
elif self.likelihood.is_heteroscedastic:
|
||||
|
|
@ -211,19 +209,19 @@ class SparseGP(GPBase):
|
|||
#self.Z = p[:self.num_inducing * self.input_dim].reshape(self.num_inducing, self.input_dim)
|
||||
#self.kern._set_params(p[self.Z.size:self.Z.size + self.kern.num_params])
|
||||
#self.likelihood._set_params(p[self.Z.size + self.kern.num_params:])
|
||||
#self._compute_kernel_matrices()
|
||||
self._compute_kernel_matrices()
|
||||
import ipdb;ipdb.set_trace()
|
||||
self._computations()
|
||||
self.Cpsi1V = None
|
||||
# make sparse_gp compatible with gp_base gradients:
|
||||
self.dL_dK = self.dL_dKmm
|
||||
super(SparseGP, self).parameters_changed()
|
||||
|
||||
def _get_params(self):
|
||||
return np.hstack([self.Z.flatten(), self.kern._get_params_transformed(), self.likelihood._get_params()])
|
||||
|
||||
def _get_param_names(self):
|
||||
return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])], [])\
|
||||
+ self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||
# def _get_params(self):
|
||||
# return np.hstack([self.Z.flatten(), self.kern._get_params_transformed(), self.likelihood._get_params()])
|
||||
#
|
||||
# def _get_param_names(self):
|
||||
# return sum([['iip_%i_%i' % (i, j) for j in range(self.Z.shape[1])] for i in range(self.Z.shape[0])], [])\
|
||||
# + self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||
|
||||
#def _get_print_names(self):
|
||||
# return self.kern._get_param_names_transformed() + self.likelihood._get_param_names()
|
||||
|
|
@ -249,8 +247,8 @@ class SparseGP(GPBase):
|
|||
# self.likelihood.fit_FITC(self.Kmm,self.psi1,self.psi0)
|
||||
self._set_params(self._get_params()) # update the GP
|
||||
|
||||
def _log_likelihood_gradients(self):
|
||||
return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood)))
|
||||
# def _log_likelihood_gradients(self):
|
||||
# return np.hstack((self.dL_dZ().flatten(), self.dL_dtheta(), self.likelihood._gradients(partial=self.partial_for_likelihood)))
|
||||
|
||||
def dL_dtheta(self):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ def BGPLVM(seed=default_seed):
|
|||
|
||||
return m
|
||||
|
||||
def GPLVM_oil_100(optimize=True):
|
||||
def GPLVM_oil_100(optimize=True, plot=True):
|
||||
data = GPy.util.datasets.oil_100()
|
||||
Y = data['X']
|
||||
|
||||
|
|
@ -61,7 +61,8 @@ def GPLVM_oil_100(optimize=True):
|
|||
|
||||
# plot
|
||||
print(m)
|
||||
m.plot_latent(labels=m.data_labels)
|
||||
if plot:
|
||||
m.plot_latent(labels=m.data_labels)
|
||||
return m
|
||||
|
||||
def sparseGPLVM_oil(optimize=True, N=100, input_dim=6, num_inducing=15, max_iters=50):
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import itertools
|
|||
from parts.prod import Prod as prod
|
||||
from matplotlib.transforms import offset_copy
|
||||
import GPy
|
||||
from GPy.core.parameterized import Parameterized_old, __fixed__
|
||||
|
||||
class kern(Parameterized):
|
||||
def __init__(self, input_dim, parts=[], input_slices=None):
|
||||
|
|
|
|||
|
|
@ -21,11 +21,11 @@ class Kernpart(Parameterized):
|
|||
# the name of the covariance function.
|
||||
# link to parameterized objects
|
||||
self._parameters_ = []
|
||||
self._X = None
|
||||
#self._X = None
|
||||
|
||||
def connect_input(self, X):
|
||||
X.add_observer(self, self.on_input_change)
|
||||
self._X = X
|
||||
#self._X = X
|
||||
|
||||
def on_input_change(self, X):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -284,13 +284,13 @@ class Linear(Kernpart):
|
|||
#---------------------------------------#
|
||||
|
||||
def _K_computations(self, X, X2):
|
||||
if not (fast_array_equal(X, self._Xcache) and fast_array_equal(X2, self._X2cache)):
|
||||
self._Xcache = X.copy()
|
||||
if not (fast_array_equal(X, self._X) and fast_array_equal(X2, self._X2)):
|
||||
self._X = X.copy()
|
||||
if X2 is None:
|
||||
self._dot_product = tdot(X)
|
||||
self._X2cache = None
|
||||
self._X2 = None
|
||||
else:
|
||||
self._X2cache = X2.copy()
|
||||
self._X2 = X2.copy()
|
||||
self._dot_product = np.dot(X, X2.T)
|
||||
|
||||
def _psi_computations(self, Z, mu, S):
|
||||
|
|
|
|||
|
|
@ -54,9 +54,10 @@ class RBF(Kernpart):
|
|||
self.lengthscale = Param('lengthscale', lengthscale)
|
||||
self.lengthscale.add_observer(self, self.update_lengthscale)
|
||||
self.add_parameters(self.variance, self.lengthscale)
|
||||
|
||||
self.update_lengthscale(self.lengthscale)
|
||||
self.parameters_changed()
|
||||
|
||||
#self.update_lengthscale(self.lengthscale)
|
||||
#self.parameters_changed()
|
||||
# initialize cache
|
||||
#self._Z, self._mu, self._S = np.empty(shape=(3, 1))
|
||||
#self._X, self._X2, self._params_save = np.empty(shape=(3, 1))
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ class Gaussian(likelihood):
|
|||
def __init__(self, data, variance=1., normalize=False):
|
||||
super(Gaussian, self).__init__('gaussian')
|
||||
self.is_heteroscedastic = False
|
||||
self.num_params = 1
|
||||
#self.num_params = 1
|
||||
self.Z = 0. # a correction factor which accounts for the approximation made
|
||||
N, self.output_dim = data.shape
|
||||
|
||||
|
|
@ -34,10 +34,10 @@ class Gaussian(likelihood):
|
|||
self.set_data(data)
|
||||
|
||||
self.variance = Param('variance', variance)
|
||||
self._variance = variance + 1
|
||||
|
||||
self.variance.add_observer(self, self.update_variance)
|
||||
self.add_parameter(self.variance)
|
||||
self.parameters_changed()
|
||||
|
||||
#self.parameters_changed()
|
||||
# self._set_params(np.asarray(variance))
|
||||
|
||||
|
||||
|
|
@ -63,17 +63,21 @@ class Gaussian(likelihood):
|
|||
#
|
||||
# def _set_params(self, x):
|
||||
# self.variance = x[0]
|
||||
def parameters_changed(self):
|
||||
if np.any(self._variance != self.variance):
|
||||
if np.all(self.variance == 0.):#special case of zero noise
|
||||
self.precision = np.inf
|
||||
self.V = None
|
||||
else:
|
||||
self.precision = 1. / self.variance
|
||||
self.V = (self.precision) * self.Y
|
||||
self.VVT_factor = self.precision * self.YYT_factor
|
||||
self.covariance_matrix = np.eye(self.N) * self.variance
|
||||
self._variance = self.variance.copy()
|
||||
|
||||
def update_variance(self, v):
|
||||
if np.all(self.variance == 0.): #special case of zero noise
|
||||
self.precision = np.inf
|
||||
self.V = None
|
||||
else:
|
||||
self.precision = (1. / self.variance).squeeze()
|
||||
self.V = (self.precision) * self.Y
|
||||
self.VVT_factor = self.precision * self.YYT_factor
|
||||
self.covariance_matrix = np.eye(self.N) * self.variance
|
||||
#self._variance = self.variance.copy()
|
||||
|
||||
# def parameters_changed(self):
|
||||
# if np.any(self._variance != self.variance):
|
||||
# self.update_variance()
|
||||
|
||||
def predictive_values(self, mu, var, full_cov):
|
||||
"""
|
||||
|
|
@ -87,11 +91,11 @@ class Gaussian(likelihood):
|
|||
# This will mess up computations of diag(true_var), below.
|
||||
# note that the upper, lower quantiles should be the same shape as mean
|
||||
# Augment the output variance with the likelihood variance and rescale.
|
||||
true_var = (var + np.eye(var.shape[0]) * self._variance) * self._scale ** 2
|
||||
true_var = (var + np.eye(var.shape[0]) * self.variance) * self._scale ** 2
|
||||
_5pc = mean - 2.*np.sqrt(np.diag(true_var))
|
||||
_95pc = mean + 2.*np.sqrt(np.diag(true_var))
|
||||
else:
|
||||
true_var = (var + self._variance) * self._scale ** 2
|
||||
true_var = (var + self.variance) * self._scale ** 2
|
||||
_5pc = mean - 2.*np.sqrt(true_var)
|
||||
_95pc = mean + 2.*np.sqrt(true_var)
|
||||
return mean, true_var, _5pc, _95pc
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class GPLVM(GP):
|
|||
likelihood = Gaussian(Y, normalize=normalize_Y, variance=np.exp(-2.))
|
||||
GP.__init__(self, X, likelihood, kernel, normalize_X=False)
|
||||
self.X = Param('q_mean', self.X)
|
||||
self.add_parameter(self.X, self.dL_dK, 0)
|
||||
self.add_parameter(self.X, self.dK_dX, 0)
|
||||
#self.set_prior('.*X', Gaussian_prior(0, 1))
|
||||
self.ensure_default_constraints()
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue