mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-14 22:42:37 +02:00
merge devel into mrd > transformations added
This commit is contained in:
commit
83bde47d55
11 changed files with 273 additions and 269 deletions
|
|
@ -6,7 +6,7 @@ from .. import likelihoods
|
|||
from ..inference import optimization
|
||||
from ..util.linalg import jitchol
|
||||
from GPy.util.misc import opt_wrapper
|
||||
from parameterised import parameterised, truncate_pad
|
||||
from parameterised import parameterised
|
||||
from scipy import optimize
|
||||
import multiprocessing as mp
|
||||
import numpy as np
|
||||
|
|
@ -67,9 +67,14 @@ class model(parameterised):
|
|||
|
||||
# check constraints are okay
|
||||
if isinstance(what, (priors.gamma, priors.log_Gaussian)):
|
||||
assert not np.any(which[:, None] == self.constrained_negative_indices), "constraint and prior incompatible"
|
||||
assert not np.any(which[:, None] == self.constrained_bounded_indices), "constraint and prior incompatible"
|
||||
unconst = np.setdiff1d(which, self.constrained_positive_indices)
|
||||
constrained_positive_indices = [i for i,t in zip(self.constrained_indices, self.constraints) if t.domain=='positive']
|
||||
if len(constrained_positive_indices):
|
||||
constrained_positive_indices = np.hstack(constrained_positive_indices)
|
||||
else:
|
||||
constrained_positive_indices = np.zeros(shape=(0,))
|
||||
bad_constraints = np.setdiff1d(self.all_constrained_indices(),constrained_positive_indices)
|
||||
assert not np.any(which[:, None] == bad_constraints), "constraint and prior incompatible"
|
||||
unconst = np.setdiff1d(which, constrained_positive_indices)
|
||||
if len(unconst):
|
||||
print "Warning: constraining parameters to be positive:"
|
||||
print '\n'.join([n for i, n in enumerate(self._get_param_names()) if i in unconst])
|
||||
|
|
@ -80,7 +85,6 @@ class model(parameterised):
|
|||
else:
|
||||
raise ValueError, "prior not recognised"
|
||||
|
||||
|
||||
# store the prior in a local list
|
||||
for w in which:
|
||||
self.priors[w] = what
|
||||
|
|
@ -110,22 +114,16 @@ class model(parameterised):
|
|||
return ret
|
||||
|
||||
def _transform_gradients(self, g):
|
||||
"""
|
||||
Takes a list of gradients and return an array of transformed gradients (positive/negative/tied/and so on)
|
||||
"""
|
||||
|
||||
x = self._get_params()
|
||||
g[self.constrained_positive_indices] = g[self.constrained_positive_indices] * x[self.constrained_positive_indices]
|
||||
g[self.constrained_negative_indices] = g[self.constrained_negative_indices] * x[self.constrained_negative_indices]
|
||||
[np.put(g, i, g[i] * (x[i] - l) * (h - x[i]) / (h - l)) for i, l, h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
|
||||
for index,constraint in zip(self.constrained_indices, self.constraints):
|
||||
g[index] = g[index] * constraint.gradfactor(x[index])
|
||||
[np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]]
|
||||
if len(self.tied_indices) or len(self.constrained_fixed_indices):
|
||||
to_remove = np.hstack((self.constrained_fixed_indices + [t[1:] for t in self.tied_indices]))
|
||||
return np.delete(g, to_remove)
|
||||
if len(self.tied_indices) or len(self.fixed_indices):
|
||||
to_remove = np.hstack((self.fixed_indices+[t[1:] for t in self.tied_indices]))
|
||||
return np.delete(g,to_remove)
|
||||
else:
|
||||
return g
|
||||
|
||||
|
||||
def randomize(self):
|
||||
"""
|
||||
Randomize the model.
|
||||
|
|
@ -209,7 +207,7 @@ class model(parameterised):
|
|||
"""
|
||||
Ensure that any variables which should clearly be positive have been constrained somehow.
|
||||
"""
|
||||
positive_strings = ['variance', 'lengthscale', 'precision']
|
||||
positive_strings = ['variance','lengthscale', 'precision', 'kappa']
|
||||
param_names = self._get_param_names()
|
||||
currently_constrained = self.all_constrained_indices()
|
||||
to_make_positive = []
|
||||
|
|
|
|||
|
|
@ -9,26 +9,7 @@ import cPickle
|
|||
import os
|
||||
from ..util.squashers import sigmoid
|
||||
import warnings
|
||||
|
||||
def truncate_pad(string, width, align='m'):
|
||||
"""
|
||||
A helper function to make aligned strings for parameterised.__str__
|
||||
"""
|
||||
width = max(width, 4)
|
||||
if len(string) > width:
|
||||
return string[:width - 3] + '...'
|
||||
elif len(string) == width:
|
||||
return string
|
||||
elif len(string) < width:
|
||||
diff = width - len(string)
|
||||
if align == 'm':
|
||||
return ' ' * np.floor(diff / 2.) + string + ' ' * np.ceil(diff / 2.)
|
||||
elif align == 'l':
|
||||
return string + ' ' * diff
|
||||
elif align == 'r':
|
||||
return ' ' * diff + string
|
||||
else:
|
||||
raise ValueError
|
||||
import transformations
|
||||
|
||||
class parameterised(object):
|
||||
def __init__(self):
|
||||
|
|
@ -36,13 +17,10 @@ class parameterised(object):
|
|||
This is the base class for model and kernel. Mostly just handles tieing and constraining of parameters
|
||||
"""
|
||||
self.tied_indices = []
|
||||
self.constrained_fixed_indices = []
|
||||
self.constrained_fixed_values = []
|
||||
self.constrained_positive_indices = np.empty(shape=(0,), dtype=np.int64)
|
||||
self.constrained_negative_indices = np.empty(shape=(0,), dtype=np.int64)
|
||||
self.constrained_bounded_indices = []
|
||||
self.constrained_bounded_uppers = []
|
||||
self.constrained_bounded_lowers = []
|
||||
self.fixed_indices = []
|
||||
self.fixed_values = []
|
||||
self.constrained_indices = []
|
||||
self.constraints = []
|
||||
|
||||
def pickle(self, filename, protocol= -1):
|
||||
f = file(filename, 'w')
|
||||
|
|
@ -50,10 +28,7 @@ class parameterised(object):
|
|||
f.close()
|
||||
|
||||
def copy(self):
|
||||
"""
|
||||
Returns a (deep) copy of the current model
|
||||
"""
|
||||
|
||||
"""Returns a (deep) copy of the current model """
|
||||
return copy.deepcopy(self)
|
||||
|
||||
@property
|
||||
|
|
@ -64,6 +39,7 @@ class parameterised(object):
|
|||
:see_also: :py:func:`GPy.core.parameterised.params_transformed`
|
||||
"""
|
||||
return self._get_params()
|
||||
|
||||
@params.setter
|
||||
def params(self, params):
|
||||
self._set_params(params)
|
||||
|
|
@ -76,6 +52,7 @@ class parameterised(object):
|
|||
:see_also: :py:func:`GPy.core.parameterised.params`
|
||||
"""
|
||||
return self._get_params_transformed()
|
||||
|
||||
@params_transformed.setter
|
||||
def params_transformed(self, params):
|
||||
self._set_params_transformed(params)
|
||||
|
|
@ -97,7 +74,9 @@ class parameterised(object):
|
|||
|
||||
def __getitem__(self, name, return_names=False):
|
||||
"""
|
||||
Get a model parameter by name. The name is applied as a regular expression and all parameters that match that regular expression are returned.
|
||||
Get a model parameter by name. The name is applied as a regular
|
||||
expression and all parameters that match that regular expression are
|
||||
returned.
|
||||
"""
|
||||
matches = self.grep_param_names(name)
|
||||
if len(matches):
|
||||
|
|
@ -110,7 +89,9 @@ class parameterised(object):
|
|||
|
||||
def __setitem__(self, name, val):
|
||||
"""
|
||||
Set model parameter(s) by name. The name is provided as a regular expression. All parameters matching that regular expression are set to ghe given value.
|
||||
Set model parameter(s) by name. The name is provided as a regular
|
||||
expression. All parameters matching that regular expression are set to
|
||||
the given value.
|
||||
"""
|
||||
matches = self.grep_param_names(name)
|
||||
if len(matches):
|
||||
|
|
@ -119,8 +100,6 @@ class parameterised(object):
|
|||
x = self.params
|
||||
x[matches] = val
|
||||
self.params = x
|
||||
# import ipdb;ipdb.set_trace()
|
||||
# self.params[matches] = val
|
||||
else:
|
||||
raise AttributeError, "no parameter matches %s" % name
|
||||
|
||||
|
|
@ -140,13 +119,6 @@ class parameterised(object):
|
|||
"""Unties all parameters by setting tied_indices to an empty list."""
|
||||
self.tied_indices = []
|
||||
|
||||
def all_constrained_indices(self):
|
||||
"""Return a np array of all the constrained indices"""
|
||||
ret = [np.hstack(i) for i in [self.constrained_bounded_indices, self.constrained_positive_indices, self.constrained_negative_indices, self.constrained_fixed_indices] if len(i)]
|
||||
if len(ret):
|
||||
return np.hstack(ret)
|
||||
else:
|
||||
return []
|
||||
def grep_param_names(self, expr):
|
||||
"""
|
||||
Arguments
|
||||
|
|
@ -159,7 +131,7 @@ class parameterised(object):
|
|||
|
||||
Notes
|
||||
-----
|
||||
Other objects are passed through - i.e. integers which were'nt meant for grepping
|
||||
Other objects are passed through - i.e. integers which weren't meant for grepping
|
||||
"""
|
||||
|
||||
if type(expr) in [str, np.string_, np.str]:
|
||||
|
|
@ -171,108 +143,77 @@ class parameterised(object):
|
|||
return expr
|
||||
|
||||
def Nparam_transformed(self):
|
||||
"""
|
||||
Compute the number of parameters after ties and fixing have been performed
|
||||
"""
|
||||
ties = 0
|
||||
for ti in self.tied_indices:
|
||||
ties += ti.size - 1
|
||||
removed = 0
|
||||
for tie in self.tied_indices:
|
||||
removed += tie.size - 1
|
||||
|
||||
fixes = 0
|
||||
for fi in self.constrained_fixed_indices:
|
||||
fixes += len(fi)
|
||||
|
||||
return self.Nparam - fixes - ties
|
||||
|
||||
def constrain_positive(self, which):
|
||||
"""
|
||||
Set positive constraints.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
which -- np.array(dtype=int), or regular expression object or string
|
||||
"""
|
||||
matches = self.grep_param_names(which)
|
||||
assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained"
|
||||
self.constrained_positive_indices = np.hstack((self.constrained_positive_indices, matches))
|
||||
# check to ensure constraint is in place
|
||||
x = self._get_params()
|
||||
for i, xx in enumerate(x):
|
||||
if (xx < 0) & (i in matches):
|
||||
x[i] = -xx
|
||||
self._set_params(x)
|
||||
for fix in self.fixed_indices:
|
||||
removed += fix.size
|
||||
|
||||
return len(self._get_params()) - removed
|
||||
|
||||
def unconstrain(self, which):
|
||||
"""Unconstrain matching parameters. does not untie parameters"""
|
||||
matches = self.grep_param_names(which)
|
||||
# positive/negative
|
||||
self.constrained_positive_indices = np.delete(self.constrained_positive_indices, np.nonzero(np.sum(self.constrained_positive_indices[:, None] == matches[None, :], 1))[0])
|
||||
self.constrained_negative_indices = np.delete(self.constrained_negative_indices, np.nonzero(np.sum(self.constrained_negative_indices[:, None] == matches[None, :], 1))[0])
|
||||
# bounded
|
||||
if len(self.constrained_bounded_indices):
|
||||
self.constrained_bounded_indices = [np.delete(a, np.nonzero(np.sum(a[:, None] == matches[None, :], 1))[0]) for a in self.constrained_bounded_indices]
|
||||
if np.hstack(self.constrained_bounded_indices).size:
|
||||
self.constrained_bounded_uppers, self.constrained_bounded_lowers, self.constrained_bounded_indices = zip(*[(u, l, i) for u, l, i in zip(self.constrained_bounded_uppers, self.constrained_bounded_lowers, self.constrained_bounded_indices) if i.size])
|
||||
self.constrained_bounded_uppers, self.constrained_bounded_lowers, self.constrained_bounded_indices = list(self.constrained_bounded_uppers), list(self.constrained_bounded_lowers), list(self.constrained_bounded_indices)
|
||||
else:
|
||||
self.constrained_bounded_uppers, self.constrained_bounded_lowers, self.constrained_bounded_indices = [], [], []
|
||||
# fixed:
|
||||
for i, indices in enumerate(self.constrained_fixed_indices):
|
||||
self.constrained_fixed_indices[i] = np.delete(indices, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0])
|
||||
# remove empty elements
|
||||
tmp = [(i, v) for i, v in zip(self.constrained_fixed_indices, self.constrained_fixed_values) if len(i)]
|
||||
|
||||
#tranformed contraints:
|
||||
for match in matches:
|
||||
self.constrained_indices = [i[i<>match] for i in self.constrained_indices]
|
||||
|
||||
#remove empty constraints
|
||||
tmp = zip(*[(i,t) for i,t in zip(self.constrained_indices,self.constraints) if len(i)])
|
||||
if tmp:
|
||||
self.constrained_fixed_indices, self.constrained_fixed_values = zip(*tmp)
|
||||
self.constrained_fixed_indices, self.constrained_fixed_values = list(self.constrained_fixed_indices), list(self.constrained_fixed_values)
|
||||
self.constrained_indices, self.constraints = zip(*[(i,t) for i,t in zip(self.constrained_indices,self.constraints) if len(i)])
|
||||
self.constrained_indices, self.constraints = list(self.constrained_indices), list(self.constraints)
|
||||
|
||||
# fixed:
|
||||
self.fixed_values = [np.delete(values, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices,values in zip(self.fixed_indices,self.fixed_values)]
|
||||
self.fixed_indices = [np.delete(indices, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices in self.fixed_indices]
|
||||
|
||||
# remove empty elements
|
||||
tmp = [(i, v) for i, v in zip(self.fixed_indices, self.fixed_values) if len(i)]
|
||||
if tmp:
|
||||
self.fixed_indices, self.fixed_values = zip(*tmp)
|
||||
self.fixed_indices, self.fixed_values = list(self.fixed_indices), list(self.fixed_values)
|
||||
else:
|
||||
self.constrained_fixed_indices, self.constrained_fixed_values = [], []
|
||||
|
||||
|
||||
self.fixed_indices, self.fixed_values = [], []
|
||||
|
||||
def constrain_negative(self, which):
|
||||
"""
|
||||
Set negative constraints.
|
||||
""" Set negative constraints. """
|
||||
self.constrain(which, transformations.negative_exponent())
|
||||
|
||||
:param which: which variables to constrain
|
||||
:type which: regular expression string
|
||||
def constrain_positive(self, which):
|
||||
""" Set positive constraints. """
|
||||
self.constrain(which, transformations.logexp())
|
||||
|
||||
def constrain_bounded(self, which,lower, upper):
|
||||
""" Set bounded constraints. """
|
||||
self.constrain(which, transformations.logistic(lower, upper))
|
||||
|
||||
def all_constrained_indices(self):
|
||||
if len(self.constrained_indices):
|
||||
return np.hstack(self.constrained_indices)
|
||||
else:
|
||||
return np.empty(shape=(0,))
|
||||
|
||||
def constrain(self,which,transform):
|
||||
assert isinstance(transform,transformations.transformation)
|
||||
|
||||
"""
|
||||
matches = self.grep_param_names(which)
|
||||
assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained"
|
||||
self.constrained_negative_indices = np.hstack((self.constrained_negative_indices, matches))
|
||||
# check to ensure constraint is in place
|
||||
overlap = set(matches).intersection(set(self.all_constrained_indices()))
|
||||
if overlap:
|
||||
self.unconstrain(np.asarray(list(overlap)))
|
||||
print 'Warning: re-constraining these parameters'
|
||||
pn = self._get_param_names()
|
||||
for i in overlap:
|
||||
print pn[i]
|
||||
|
||||
self.constrained_indices.append(matches)
|
||||
self.constraints.append(transform)
|
||||
x = self._get_params()
|
||||
for i, xx in enumerate(x):
|
||||
if (xx > 0.) and (i in matches):
|
||||
x[i] = -xx
|
||||
x[matches] = transform.initialize(x[matches])
|
||||
self._set_params(x)
|
||||
|
||||
|
||||
|
||||
def constrain_bounded(self, which, lower, upper):
|
||||
"""Set bounded constraints.
|
||||
|
||||
Arguments
|
||||
---------
|
||||
which -- np.array(dtype=int), or regular expression object or string
|
||||
upper -- (float) the upper bound on the constraint
|
||||
lower -- (float) the lower bound on the constraint
|
||||
"""
|
||||
matches = self.grep_param_names(which)
|
||||
assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained"
|
||||
assert lower < upper, "lower bound must be smaller than upper bound!"
|
||||
self.constrained_bounded_indices.append(matches)
|
||||
self.constrained_bounded_uppers.append(upper)
|
||||
self.constrained_bounded_lowers.append(lower)
|
||||
# check to ensure constraint is in place
|
||||
x = self._get_params()
|
||||
for i, xx in enumerate(x):
|
||||
if ((xx <= lower) | (xx >= upper)) & (i in matches):
|
||||
x[i] = sigmoid(xx) * (upper - lower) + lower
|
||||
self._set_params(x)
|
||||
|
||||
|
||||
def constrain_fixed(self, which, value=None):
|
||||
"""
|
||||
Arguments
|
||||
|
|
@ -288,42 +229,36 @@ class parameterised(object):
|
|||
"""
|
||||
matches = self.grep_param_names(which)
|
||||
assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained"
|
||||
self.constrained_fixed_indices.append(matches)
|
||||
self.fixed_indices.append(matches)
|
||||
if value != None:
|
||||
self.constrained_fixed_values.append(value)
|
||||
self.fixed_values.append(value)
|
||||
else:
|
||||
self.constrained_fixed_values.append(self._get_params()[self.constrained_fixed_indices[-1]])
|
||||
self.fixed_values.append(self._get_params()[self.fixed_indices[-1]])
|
||||
|
||||
# self.constrained_fixed_values.append(value)
|
||||
# self.fixed_values.append(value)
|
||||
self._set_params_transformed(self._get_params_transformed())
|
||||
|
||||
def _get_params_transformed(self):
|
||||
"""use self._get_params to get the 'true' parameters of the model, which are then tied, constrained and fixed"""
|
||||
x = self._get_params()
|
||||
x[self.constrained_positive_indices] = np.log(x[self.constrained_positive_indices])
|
||||
x[self.constrained_negative_indices] = np.log(-x[self.constrained_negative_indices])
|
||||
[np.put(x, i, np.log(np.clip(x[i] - l, 1e-10, np.inf) / np.clip(h - x[i], 1e-10, np.inf))) for i, l, h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
|
||||
[np.put(x,i,t.finv(x[i])) for i,t in zip(self.constrained_indices,self.constraints)]
|
||||
|
||||
to_remove = self.constrained_fixed_indices + [t[1:] for t in self.tied_indices]
|
||||
to_remove = self.fixed_indices + [t[1:] for t in self.tied_indices]
|
||||
if len(to_remove):
|
||||
return np.delete(x, np.hstack(to_remove))
|
||||
else:
|
||||
return x
|
||||
|
||||
|
||||
def _set_params_transformed(self, x):
|
||||
""" takes the vector x, which is then modified (by untying, reparameterising or inserting fixed values), and then call self._set_params"""
|
||||
|
||||
# work out how many places are fixed, and where they are. tricky logic!
|
||||
Nfix_places = 0.
|
||||
if len(self.tied_indices):
|
||||
Nfix_places += np.hstack(self.tied_indices).size - len(self.tied_indices)
|
||||
if len(self.constrained_fixed_indices):
|
||||
Nfix_places += np.hstack(self.constrained_fixed_indices).size
|
||||
if Nfix_places:
|
||||
fix_places = np.hstack(self.constrained_fixed_indices + [t[1:] for t in self.tied_indices])
|
||||
fix_places = self.fixed_indices + [t[1:] for t in self.tied_indices]
|
||||
if len(fix_places):
|
||||
fix_places = np.hstack(fix_places)
|
||||
Nfix_places = fix_places.size
|
||||
else:
|
||||
fix_places = []
|
||||
Nfix_places = 0
|
||||
|
||||
free_places = np.setdiff1d(np.arange(Nfix_places + x.size, dtype=np.int), fix_places)
|
||||
|
||||
|
|
@ -331,11 +266,12 @@ class parameterised(object):
|
|||
xx = np.zeros(Nfix_places + free_places.size, dtype=np.float64)
|
||||
|
||||
xx[free_places] = x
|
||||
[np.put(xx, i, v) for i, v in zip(self.constrained_fixed_indices, self.constrained_fixed_values)]
|
||||
[np.put(xx, i, v) for i, v in zip(self.fixed_indices, self.fixed_values)]
|
||||
[np.put(xx, i, v) for i, v in [(t[1:], xx[t[0]]) for t in self.tied_indices] ]
|
||||
xx[self.constrained_positive_indices] = np.exp(xx[self.constrained_positive_indices])
|
||||
xx[self.constrained_negative_indices] = -np.exp(xx[self.constrained_negative_indices])
|
||||
[np.put(xx, i, low + sigmoid(xx[i]) * (high - low)) for i, low, high in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
|
||||
|
||||
[np.put(xx,i,t.f(xx[i])) for i,t in zip(self.constrained_indices, self.constraints)]
|
||||
if hasattr(self,'debug'):
|
||||
stop
|
||||
self._set_params(xx)
|
||||
|
||||
def _get_param_names_transformed(self):
|
||||
|
|
@ -354,17 +290,13 @@ class parameterised(object):
|
|||
remove = np.empty(shape=(0,), dtype=np.int)
|
||||
|
||||
# also remove the fixed params
|
||||
if len(self.constrained_fixed_indices):
|
||||
remove = np.hstack((remove, np.hstack(self.constrained_fixed_indices)))
|
||||
if len(self.fixed_indices):
|
||||
remove = np.hstack((remove, np.hstack(self.fixed_indices)))
|
||||
|
||||
# add markers to show that some variables are constrained
|
||||
for i in self.constrained_positive_indices:
|
||||
n[i] = n[i] + '(+ve)'
|
||||
for i in self.constrained_negative_indices:
|
||||
n[i] = n[i] + '(-ve)'
|
||||
for i, l, h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers):
|
||||
for i,t in zip(self.constrained_indices,self.constraints):
|
||||
for ii in i:
|
||||
n[ii] = n[ii] + '(bounded)'
|
||||
n[ii] = n[ii] + t.__str__()
|
||||
|
||||
n = [nn for i, nn in enumerate(n) if not i in remove]
|
||||
return n
|
||||
|
|
@ -382,16 +314,12 @@ class parameterised(object):
|
|||
values = self._get_params() # map(str,self._get_params())
|
||||
# sort out the constraints
|
||||
constraints = [''] * len(names)
|
||||
for i in self.constrained_positive_indices:
|
||||
constraints[i] = '(+ve)'
|
||||
for i in self.constrained_negative_indices:
|
||||
constraints[i] = '(-ve)'
|
||||
for i in self.constrained_fixed_indices:
|
||||
for i,t in zip(self.constrained_indices,self.constraints):
|
||||
for ii in i:
|
||||
constraints[ii] = t.__str__()
|
||||
for i in self.fixed_indices:
|
||||
for ii in i:
|
||||
constraints[ii] = 'Fixed'
|
||||
for i, u, l in zip(self.constrained_bounded_indices, self.constrained_bounded_uppers, self.constrained_bounded_lowers):
|
||||
for ii in i:
|
||||
constraints[ii] = '(' + str(l) + ', ' + str(u) + ')'
|
||||
# sort out the ties
|
||||
ties = [''] * len(names)
|
||||
for i, tie in enumerate(self.tied_indices):
|
||||
|
|
|
|||
85
GPy/core/transformations.py
Normal file
85
GPy/core/transformations.py
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||
|
||||
|
||||
import numpy as np
|
||||
|
||||
class transformation(object):
|
||||
def __init__(self):
|
||||
#set the domain. Suggest we use 'positive', 'bounded', etc
|
||||
self.domain = 'undefined'
|
||||
def f(self, x):
|
||||
raise NotImplementedError
|
||||
|
||||
def finv(self,x):
|
||||
raise NotImplementedError
|
||||
|
||||
def gradfactor(self,f):
|
||||
""" df_dx evaluated at self.f(x)=f"""
|
||||
raise NotImplementedError
|
||||
def initialize(self,f):
|
||||
""" produce a sensible initial values for f(x)"""
|
||||
raise NotImplementedError
|
||||
def __str__(self):
|
||||
raise NotImplementedError
|
||||
|
||||
class logexp(transformation):
|
||||
def __init__(self):
|
||||
self.domain= 'positive'
|
||||
def f(self,x):
|
||||
return np.log(1. + np.exp(x))
|
||||
def finv(self,f):
|
||||
return np.log(np.exp(f) - 1.)
|
||||
def gradfactor(self,f):
|
||||
ef = np.exp(f)
|
||||
return (ef - 1.)/ef
|
||||
def initialize(self,f):
|
||||
return np.abs(f)
|
||||
def __str__(self):
|
||||
return '(+ve)'
|
||||
|
||||
class exponent(transformation):
|
||||
def __init__(self):
|
||||
self.domain= 'positive'
|
||||
def f(self,x):
|
||||
return np.exp(x)
|
||||
def finv(self,x):
|
||||
return np.log(x)
|
||||
def gradfactor(self,f):
|
||||
return f
|
||||
def initialize(self,f):
|
||||
return np.abs(f)
|
||||
def __str__(self):
|
||||
return '(+ve)'
|
||||
|
||||
class negative_exponent(transformation):
|
||||
def __init__(self):
|
||||
self.domain= 'negative'
|
||||
def f(self,x):
|
||||
return -np.exp(x)
|
||||
def finv(self,x):
|
||||
return np.log(-x)
|
||||
def gradfactor(self,f):
|
||||
return f
|
||||
def initialize(self,f):
|
||||
return -np.abs(f)
|
||||
def __str__(self):
|
||||
return '(-ve)'
|
||||
|
||||
class logistic(transformation):
|
||||
def __init__(self,lower,upper):
|
||||
self.domain= 'bounded'
|
||||
assert lower < upper
|
||||
self.lower, self.upper = float(lower), float(upper)
|
||||
self.difference = self.upper - self.lower
|
||||
def f(self,x):
|
||||
return self.lower + self.difference/(1.+np.exp(-x))
|
||||
def finv(self,f):
|
||||
return np.log(np.clip(f - self.lower, 1e-10, np.inf) / np.clip(self.upper - f, 1e-10, np.inf))
|
||||
def gradfactor(self,f):
|
||||
return (f-self.lower)*(self.upper-f)/self.difference
|
||||
def initialize(self,f):
|
||||
return self.f(f*0.)
|
||||
def __str__(self):
|
||||
return '({},{})'.format(self.lower,self.upper)
|
||||
|
||||
|
|
@ -182,7 +182,8 @@ def bgplvm_simulation_matlab_compare():
|
|||
from GPy.models import mrd
|
||||
from GPy import kern
|
||||
reload(mrd); reload(kern)
|
||||
k = kern.rbf(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||
#k = kern.rbf(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||
k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
|
||||
m = Bayesian_GPLVM(Y, Q, init="PCA", M=M, kernel=k,
|
||||
# X=mu,
|
||||
# X_variance=S,
|
||||
|
|
|
|||
|
|
@ -48,11 +48,7 @@ class kern(parameterised):
|
|||
|
||||
|
||||
def plot_ARD(self, ax=None):
|
||||
"""
|
||||
If an ARD kernel is present, it bar-plots the ARD parameters
|
||||
|
||||
|
||||
"""
|
||||
"""If an ARD kernel is present, it bar-plots the ARD parameters"""
|
||||
if ax is None:
|
||||
ax = pb.gca()
|
||||
for p in self.parts:
|
||||
|
|
@ -71,12 +67,10 @@ class kern(parameterised):
|
|||
|
||||
def _transform_gradients(self, g):
|
||||
x = self._get_params()
|
||||
g[self.constrained_positive_indices] = g[self.constrained_positive_indices] * x[self.constrained_positive_indices]
|
||||
g[self.constrained_negative_indices] = g[self.constrained_negative_indices] * x[self.constrained_negative_indices]
|
||||
[np.put(g, i, g[i] * (x[i] - l) * (h - x[i]) / (h - l)) for i, l, h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
|
||||
[np.put(x,i,x*t.gradfactor(x[i])) for i,t in zip(self.constrained_indices, self.constraints)]
|
||||
[np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]]
|
||||
if len(self.tied_indices) or len(self.constrained_fixed_indices):
|
||||
to_remove = np.hstack((self.constrained_fixed_indices + [t[1:] for t in self.tied_indices]))
|
||||
if len(self.tied_indices) or len(self.fixed_indices):
|
||||
to_remove = np.hstack((self.fixed_indices + [t[1:] for t in self.tied_indices]))
|
||||
return np.delete(g, to_remove)
|
||||
else:
|
||||
return g
|
||||
|
|
@ -93,13 +87,10 @@ class kern(parameterised):
|
|||
assert self.D == other.D
|
||||
newkern = kern(self.D, self.parts + other.parts, self.input_slices + other.input_slices)
|
||||
# transfer constraints:
|
||||
newkern.constrained_positive_indices = np.hstack((self.constrained_positive_indices, self.Nparam + other.constrained_positive_indices))
|
||||
newkern.constrained_negative_indices = np.hstack((self.constrained_negative_indices, self.Nparam + other.constrained_negative_indices))
|
||||
newkern.constrained_bounded_indices = self.constrained_bounded_indices + [self.Nparam + x for x in other.constrained_bounded_indices]
|
||||
newkern.constrained_bounded_lowers = self.constrained_bounded_lowers + other.constrained_bounded_lowers
|
||||
newkern.constrained_bounded_uppers = self.constrained_bounded_uppers + other.constrained_bounded_uppers
|
||||
newkern.constrained_fixed_indices = self.constrained_fixed_indices + [self.Nparam + x for x in other.constrained_fixed_indices]
|
||||
newkern.constrained_fixed_values = self.constrained_fixed_values + other.constrained_fixed_values
|
||||
newkern.constrained_indices = self.constrained_indices + [i+self.Nparam for i in other.constrained_indices]
|
||||
newkern.constraints = self.constraints + other.constraints
|
||||
newkern.fixed_indices = self.fixed_indices + [self.Nparam + x for x in other.fixed_indices]
|
||||
newkern.fixed_values = self.fixed_values + other.fixed_values
|
||||
newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices]
|
||||
return newkern
|
||||
|
||||
|
|
@ -126,13 +117,12 @@ class kern(parameterised):
|
|||
newkern = kern(D, self.parts + other.parts, self_input_slices + other_input_slices)
|
||||
|
||||
# transfer constraints:
|
||||
newkern.constrained_positive_indices = np.hstack((self.constrained_positive_indices, self.Nparam + other.constrained_positive_indices))
|
||||
newkern.constrained_negative_indices = np.hstack((self.constrained_negative_indices, self.Nparam + other.constrained_negative_indices))
|
||||
newkern.constrained_bounded_indices = self.constrained_bounded_indices + [self.Nparam + x for x in other.constrained_bounded_indices]
|
||||
newkern.constrained_bounded_lowers = self.constrained_bounded_lowers + other.constrained_bounded_lowers
|
||||
newkern.constrained_indices = self.constrained_indices + [x+self.Nparam for x in other.constrained_indices]
|
||||
newkern.constraints = self.constraints + other.constraints
|
||||
newkern.fixed_indices = self.fixed_indices + [self.Nparam + x for x in other.fixed_indices]
|
||||
newkern.fixed_values = self.fixed_values + other.fixed_values
|
||||
newkern.constraints = self.constraints + other.constraints
|
||||
newkern.constrained_bounded_uppers = self.constrained_bounded_uppers + other.constrained_bounded_uppers
|
||||
newkern.constrained_fixed_indices = self.constrained_fixed_indices + [self.Nparam + x for x in other.constrained_fixed_indices]
|
||||
newkern.constrained_fixed_values = self.constrained_fixed_values + other.constrained_fixed_values
|
||||
newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices]
|
||||
return newkern
|
||||
|
||||
|
|
@ -208,15 +198,11 @@ class kern(parameterised):
|
|||
# Get the ties and constrains of the kernels before the multiplication
|
||||
prev_ties = K1.tied_indices + [arr + K1.Nparam for arr in K2.tied_indices]
|
||||
|
||||
prev_constr_pos = np.append(K1.constrained_positive_indices, K1.Nparam + K2.constrained_positive_indices)
|
||||
prev_constr_neg = np.append(K1.constrained_negative_indices, K1.Nparam + K2.constrained_negative_indices)
|
||||
prev_constr_ind = [K1.constrained_indices] + [K1.Nparam + i for i in K2.constrained_indices]
|
||||
prev_constr = K1.constraints + K2.constraints
|
||||
|
||||
prev_constr_fix = K1.constrained_fixed_indices + [arr + K1.Nparam for arr in K2.constrained_fixed_indices]
|
||||
prev_constr_fix_values = K1.constrained_fixed_values + K2.constrained_fixed_values
|
||||
|
||||
prev_constr_bou = K1.constrained_bounded_indices + [arr + K1.Nparam for arr in K2.constrained_bounded_indices]
|
||||
prev_constr_bou_low = K1.constrained_bounded_lowers + K2.constrained_bounded_lowers
|
||||
prev_constr_bou_upp = K1.constrained_bounded_uppers + K2.constrained_bounded_uppers
|
||||
prev_constr_fix = K1.fixed_indices + [arr + K1.Nparam for arr in K2.fixed_indices]
|
||||
prev_constr_fix_values = K1.fixed_values + K2.fixed_values
|
||||
|
||||
# follow the previous ties
|
||||
for arr in prev_ties:
|
||||
|
|
@ -228,14 +214,8 @@ class kern(parameterised):
|
|||
index = np.where(index_param == i)[0]
|
||||
if index.size > 1:
|
||||
self.tie_params(index)
|
||||
for i in prev_constr_pos:
|
||||
self.constrain_positive(np.where(index_param == i)[0])
|
||||
for i in prev_constr_neg:
|
||||
self.constrain_neg(np.where(index_param == i)[0])
|
||||
for j, i in enumerate(prev_constr_fix):
|
||||
self.constrain_fixed(np.where(index_param == i)[0], prev_constr_fix_values[j])
|
||||
for j, i in enumerate(prev_constr_bou):
|
||||
self.constrain_bounded(np.where(index_param == i)[0], prev_constr_bou_low[j], prev_constr_bou_upp[j])
|
||||
for i,t in zip(prev_constr_ind,prev_constr):
|
||||
self.constrain(np.where(index_param == i)[0],t)
|
||||
|
||||
def _get_params(self):
|
||||
return np.hstack([p._get_params() for p in self.parts])
|
||||
|
|
|
|||
|
|
@ -188,12 +188,12 @@ class rbf(kernpart):
|
|||
self._X2 = None
|
||||
X = X/self.lengthscale
|
||||
Xsquare = np.sum(np.square(X),1)
|
||||
self._K_dist2 = (-2.*tdot(X) + Xsquare[:,None] + Xsquare[None,:])
|
||||
self._K_dist2 = -2.*tdot(X) + (Xsquare[:,None] + Xsquare[None,:])
|
||||
else:
|
||||
self._X2 = X2.copy()
|
||||
X = X/self.lengthscale
|
||||
X2 = X2/self.lengthscale
|
||||
self._K_dist2 = (-2.*np.dot(X, X2.T) + np.sum(np.square(X),1)[:,None] + np.sum(np.square(X2),1)[None,:])
|
||||
self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X),1)[:,None] + np.sum(np.square(X2),1)[None,:])
|
||||
self._K_dvar = np.exp(-0.5*self._K_dist2)
|
||||
|
||||
def _psi_computations(self,Z,mu,S):
|
||||
|
|
|
|||
|
|
@ -53,9 +53,11 @@ class probit(likelihood_function):
|
|||
mu = mu.flatten()
|
||||
var = var.flatten()
|
||||
mean = stats.norm.cdf(mu/np.sqrt(1+var))
|
||||
p_025 = np.zeros(mu.shape)
|
||||
p_975 = np.ones(mu.shape)
|
||||
return mean, np.nan*var, p_025, p_975 # TODO: better values here (mean is okay)
|
||||
norm_025 = [stats.norm.ppf(.025,m,v) for m,v in zip(mu,var)]
|
||||
norm_975 = [stats.norm.ppf(.975,m,v) for m,v in zip(mu,var)]
|
||||
p_025 = stats.norm.cdf(norm_025/np.sqrt(1+var))
|
||||
p_975 = stats.norm.cdf(norm_975/np.sqrt(1+var))
|
||||
return mean, np.nan*var, p_025, p_975 # TODO: var
|
||||
|
||||
class Poisson(likelihood_function):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
|
|||
self._savedgradients = []
|
||||
self._savederrors = []
|
||||
self._savedpsiKmm = []
|
||||
|
||||
sparse_GP.__init__(self, X, Gaussian(Y), kernel, Z=Z, X_variance=X_variance, **kwargs)
|
||||
|
||||
@property
|
||||
|
|
|
|||
|
|
@ -35,6 +35,9 @@ class GP(model):
|
|||
self.N, self.Q = self.X.shape
|
||||
assert isinstance(kernel, kern.kern)
|
||||
self.kern = kernel
|
||||
self.likelihood = likelihood
|
||||
assert self.X.shape[0] == self.likelihood.data.shape[0]
|
||||
self.N, self.D = self.likelihood.data.shape
|
||||
|
||||
# here's some simple normalization for the inputs
|
||||
if normalize_X:
|
||||
|
|
@ -47,12 +50,8 @@ class GP(model):
|
|||
self._Xmean = np.zeros((1, self.X.shape[1]))
|
||||
self._Xstd = np.ones((1, self.X.shape[1]))
|
||||
|
||||
self.likelihood = likelihood
|
||||
# assert self.X.shape[0] == self.likelihood.Y.shape[0]
|
||||
# self.N, self.D = self.likelihood.Y.shape
|
||||
assert self.X.shape[0] == self.likelihood.data.shape[0]
|
||||
self.N, self.D = self.likelihood.data.shape
|
||||
|
||||
if not hasattr(self,'has_uncertain_inputs'):
|
||||
self.has_uncertain_inputs = False
|
||||
model.__init__(self)
|
||||
|
||||
def dL_dZ(self):
|
||||
|
|
@ -232,7 +231,7 @@ class GP(model):
|
|||
else:
|
||||
raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
|
||||
|
||||
def plot(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, levels=20):
|
||||
def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20):
|
||||
"""
|
||||
TODO: Docstrings!
|
||||
:param levels: for 2D plotting, the number of contour levels to use
|
||||
|
|
|
|||
|
|
@ -68,47 +68,60 @@ class sparse_GP(GP):
|
|||
sf = self.scale_factor
|
||||
sf2 = sf**2
|
||||
|
||||
#The rather complex computations of psi2_beta_scaled
|
||||
#invert Kmm
|
||||
self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm)
|
||||
|
||||
#The rather complex computations of psi2_beta_scaled and self.A
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
assert self.likelihood.D == 1 #TODO: what if the likelihood is heterscedatic and there are multiple independent outputs?
|
||||
if self.has_uncertain_inputs:
|
||||
self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision.flatten().reshape(self.N,1,1)/sf2)).sum(0)
|
||||
evals, evecs = linalg.eigh(self.psi2_beta_scaled)
|
||||
clipped_evals = np.clip(evals,0.,1e6) # TODO: make clipping configurable
|
||||
if not np.allclose(evals, clipped_evals):
|
||||
print "Warning: clipping posterior eigenvalues"
|
||||
tmp = evecs*np.sqrt(clipped_evals)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
|
||||
self.A = tdot(tmp)
|
||||
else:
|
||||
tmp = self.psi1*(np.sqrt(self.likelihood.precision.flatten().reshape(1,self.N))/sf)
|
||||
#self.psi2_beta_scaled = np.dot(tmp,tmp.T)
|
||||
self.psi2_beta_scaled = tdot(tmp)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
|
||||
self.A = tdot(tmp)
|
||||
else:
|
||||
if self.has_uncertain_inputs:
|
||||
self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision/sf2)).sum(0)
|
||||
evals, evecs = linalg.eigh(self.psi2_beta_scaled)
|
||||
clipped_evals = np.clip(evals,0.,1e6) # TODO: make clipping configurable
|
||||
if not np.allclose(evals, clipped_evals):
|
||||
print "Warning: clipping posterior eigenvalues"
|
||||
tmp = evecs*np.sqrt(clipped_evals)
|
||||
self.psi2_beta_scaled = tdot(tmp)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
|
||||
self.A = tdot(tmp)
|
||||
else:
|
||||
tmp = self.psi1*(np.sqrt(self.likelihood.precision)/sf)
|
||||
#self.psi2_beta_scaled = np.dot(tmp,tmp.T)
|
||||
self.psi2_beta_scaled = tdot(tmp)
|
||||
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
|
||||
self.A = tdot(tmp)
|
||||
|
||||
self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm)
|
||||
|
||||
self.V = (self.likelihood.precision/self.scale_factor)*self.likelihood.Y
|
||||
|
||||
#Compute A = L^-1 psi2 beta L^-T
|
||||
#self. A = mdot(self.Lmi,self.psi2_beta_scaled,self.Lmi.T)
|
||||
tmp = linalg.lapack.flapack.dtrtrs(self.Lm,self.psi2_beta_scaled.T,lower=1)[0]
|
||||
self.A = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1)[0]
|
||||
|
||||
#invert B and compute C. C is the posterior covariance of u
|
||||
self.B = np.eye(self.M)/sf2 + self.A
|
||||
|
||||
self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B)
|
||||
|
||||
self.psi1V = np.dot(self.psi1, self.V)
|
||||
tmp = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.Bi),lower=1,trans=1)[0]
|
||||
self.C = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0]
|
||||
|
||||
self.V = (self.likelihood.precision/self.scale_factor)*self.likelihood.Y
|
||||
self.psi1V = np.dot(self.psi1, self.V)
|
||||
|
||||
#back substutue C into psi1V
|
||||
tmp,info1 = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.psi1V),lower=1,trans=0)
|
||||
self._P = tdot(tmp)
|
||||
tmp,info2 = linalg.lapack.flapack.dpotrs(self.LB,tmp,lower=1)
|
||||
self.Cpsi1V,info3 = linalg.lapack.flapack.dtrtrs(self.Lm,tmp,lower=1,trans=1)
|
||||
#self.Cpsi1V = np.dot(self.C,self.psi1V)
|
||||
|
||||
self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T)
|
||||
self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T) #TODO: this dot can be eliminated
|
||||
|
||||
self.E = tdot(self.Cpsi1V/sf)
|
||||
|
||||
|
|
@ -130,24 +143,22 @@ class sparse_GP(GP):
|
|||
self.dL_dpsi2 = None
|
||||
|
||||
else:
|
||||
#self.dL_dpsi2 = 0.5 * self.likelihood.precision * self.D * self.Kmmi # dB
|
||||
#self.dL_dpsi2 += - 0.5 * self.likelihood.precision/sf2 * self.D * self.C # dC
|
||||
#self.dL_dpsi2 += - 0.5 * self.likelihood.precision * self.E # dD
|
||||
self.dL_dpsi2 = 0.5*self.likelihood.precision*(self.D*(self.Kmmi - self.C/sf2) -self.E)
|
||||
if self.has_uncertain_inputs:
|
||||
#repeat for each of the N psi_2 matrices
|
||||
self.dL_dpsi2 = np.repeat(self.dL_dpsi2[None,:,:],self.N,axis=0)
|
||||
else:
|
||||
#subsume back into psi1 (==Kmn)
|
||||
self.dL_dpsi1 += 2.*np.dot(self.dL_dpsi2,self.psi1)
|
||||
self.dL_dpsi2 = None
|
||||
|
||||
|
||||
# Compute dL_dKmm
|
||||
#self.dL_dKmm_old = -0.5 * self.D * mdot(self.Lmi.T, self.A, self.Lmi)*sf2 # dB
|
||||
#self.dL_dKmm = -0.5 * self.D * mdot(self.Lmi.T, self.A, self.Lmi)*sf2 # dB
|
||||
#self.dL_dKmm += -0.5 * self.D * (- self.C/sf2 - 2.*mdot(self.C, self.psi2_beta_scaled, self.Kmmi) + self.Kmmi) # dC
|
||||
#self.dL_dKmm += np.dot(np.dot(self.E*sf2, self.psi2_beta_scaled) - self.Cpsi1VVpsi1, self.Kmmi) + 0.5*self.E # dD
|
||||
tmp = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.B),lower=1,trans=1)[0]
|
||||
self.dL_dKmm = -0.5*self.D*sf2*linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0] #dA
|
||||
self.dL_dKmm = -0.5*self.D*sf2*linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0]
|
||||
tmp = np.dot(self.D*self.C + self.E*sf2,self.psi2_beta_scaled) - self.Cpsi1VVpsi1
|
||||
tmp = linalg.lapack.flapack.dpotrs(self.Lm,np.asfortranarray(tmp.T),lower=1)[0].T
|
||||
self.dL_dKmm += 0.5*(self.D*self.C/sf2 + self.E) +tmp # d(C+D)
|
||||
|
|
@ -189,14 +200,13 @@ class sparse_GP(GP):
|
|||
self.kern._set_params(p[self.Z.size:self.Z.size+self.kern.Nparam])
|
||||
self.likelihood._set_params(p[self.Z.size+self.kern.Nparam:])
|
||||
self._compute_kernel_matrices()
|
||||
if self.auto_scale_factor:
|
||||
self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
|
||||
#if self.auto_scale_factor:
|
||||
# if self.likelihood.is_heteroscedastic:
|
||||
# self.scale_factor = max(1,np.sqrt(self.psi2_beta_scaled.sum(0).mean()))
|
||||
# else:
|
||||
# self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
|
||||
#self.scale_factor = 1.
|
||||
# self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
|
||||
if self.auto_scale_factor:
|
||||
if self.likelihood.is_heteroscedastic:
|
||||
self.scale_factor = max(100,np.sqrt(self.psi2_beta_scaled.sum(0).mean()))
|
||||
else:
|
||||
self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
|
||||
self._computations()
|
||||
|
||||
def _get_params(self):
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ def jitchol(A,maxtries=5):
|
|||
raise linalg.LinAlgError, "not pd: negative diagonal elements"
|
||||
jitter= diagA.mean()*1e-6
|
||||
for i in range(1,maxtries+1):
|
||||
print '\rWarning: adding jitter of {:.10e} '.format(jitter),
|
||||
print 'Warning: adding jitter of {:.10e}'.format(jitter)
|
||||
try:
|
||||
return linalg.cholesky(A+np.eye(A.shape[0]).T*jitter, lower = True)
|
||||
except:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue