merge devel into mrd > transformations added

This commit is contained in:
Max Zwiessele 2013-05-02 16:40:39 +01:00
commit 83bde47d55
11 changed files with 273 additions and 269 deletions

View file

@ -6,7 +6,7 @@ from .. import likelihoods
from ..inference import optimization from ..inference import optimization
from ..util.linalg import jitchol from ..util.linalg import jitchol
from GPy.util.misc import opt_wrapper from GPy.util.misc import opt_wrapper
from parameterised import parameterised, truncate_pad from parameterised import parameterised
from scipy import optimize from scipy import optimize
import multiprocessing as mp import multiprocessing as mp
import numpy as np import numpy as np
@ -67,9 +67,14 @@ class model(parameterised):
# check constraints are okay # check constraints are okay
if isinstance(what, (priors.gamma, priors.log_Gaussian)): if isinstance(what, (priors.gamma, priors.log_Gaussian)):
assert not np.any(which[:, None] == self.constrained_negative_indices), "constraint and prior incompatible" constrained_positive_indices = [i for i,t in zip(self.constrained_indices, self.constraints) if t.domain=='positive']
assert not np.any(which[:, None] == self.constrained_bounded_indices), "constraint and prior incompatible" if len(constrained_positive_indices):
unconst = np.setdiff1d(which, self.constrained_positive_indices) constrained_positive_indices = np.hstack(constrained_positive_indices)
else:
constrained_positive_indices = np.zeros(shape=(0,))
bad_constraints = np.setdiff1d(self.all_constrained_indices(),constrained_positive_indices)
assert not np.any(which[:, None] == bad_constraints), "constraint and prior incompatible"
unconst = np.setdiff1d(which, constrained_positive_indices)
if len(unconst): if len(unconst):
print "Warning: constraining parameters to be positive:" print "Warning: constraining parameters to be positive:"
print '\n'.join([n for i, n in enumerate(self._get_param_names()) if i in unconst]) print '\n'.join([n for i, n in enumerate(self._get_param_names()) if i in unconst])
@ -80,7 +85,6 @@ class model(parameterised):
else: else:
raise ValueError, "prior not recognised" raise ValueError, "prior not recognised"
# store the prior in a local list # store the prior in a local list
for w in which: for w in which:
self.priors[w] = what self.priors[w] = what
@ -110,22 +114,16 @@ class model(parameterised):
return ret return ret
def _transform_gradients(self, g): def _transform_gradients(self, g):
"""
Takes a list of gradients and return an array of transformed gradients (positive/negative/tied/and so on)
"""
x = self._get_params() x = self._get_params()
g[self.constrained_positive_indices] = g[self.constrained_positive_indices] * x[self.constrained_positive_indices] for index,constraint in zip(self.constrained_indices, self.constraints):
g[self.constrained_negative_indices] = g[self.constrained_negative_indices] * x[self.constrained_negative_indices] g[index] = g[index] * constraint.gradfactor(x[index])
[np.put(g, i, g[i] * (x[i] - l) * (h - x[i]) / (h - l)) for i, l, h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
[np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]] [np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]]
if len(self.tied_indices) or len(self.constrained_fixed_indices): if len(self.tied_indices) or len(self.fixed_indices):
to_remove = np.hstack((self.constrained_fixed_indices + [t[1:] for t in self.tied_indices])) to_remove = np.hstack((self.fixed_indices+[t[1:] for t in self.tied_indices]))
return np.delete(g, to_remove) return np.delete(g,to_remove)
else: else:
return g return g
def randomize(self): def randomize(self):
""" """
Randomize the model. Randomize the model.
@ -209,7 +207,7 @@ class model(parameterised):
""" """
Ensure that any variables which should clearly be positive have been constrained somehow. Ensure that any variables which should clearly be positive have been constrained somehow.
""" """
positive_strings = ['variance', 'lengthscale', 'precision'] positive_strings = ['variance','lengthscale', 'precision', 'kappa']
param_names = self._get_param_names() param_names = self._get_param_names()
currently_constrained = self.all_constrained_indices() currently_constrained = self.all_constrained_indices()
to_make_positive = [] to_make_positive = []

View file

@ -9,26 +9,7 @@ import cPickle
import os import os
from ..util.squashers import sigmoid from ..util.squashers import sigmoid
import warnings import warnings
import transformations
def truncate_pad(string, width, align='m'):
"""
A helper function to make aligned strings for parameterised.__str__
"""
width = max(width, 4)
if len(string) > width:
return string[:width - 3] + '...'
elif len(string) == width:
return string
elif len(string) < width:
diff = width - len(string)
if align == 'm':
return ' ' * np.floor(diff / 2.) + string + ' ' * np.ceil(diff / 2.)
elif align == 'l':
return string + ' ' * diff
elif align == 'r':
return ' ' * diff + string
else:
raise ValueError
class parameterised(object): class parameterised(object):
def __init__(self): def __init__(self):
@ -36,13 +17,10 @@ class parameterised(object):
This is the base class for model and kernel. Mostly just handles tieing and constraining of parameters This is the base class for model and kernel. Mostly just handles tieing and constraining of parameters
""" """
self.tied_indices = [] self.tied_indices = []
self.constrained_fixed_indices = [] self.fixed_indices = []
self.constrained_fixed_values = [] self.fixed_values = []
self.constrained_positive_indices = np.empty(shape=(0,), dtype=np.int64) self.constrained_indices = []
self.constrained_negative_indices = np.empty(shape=(0,), dtype=np.int64) self.constraints = []
self.constrained_bounded_indices = []
self.constrained_bounded_uppers = []
self.constrained_bounded_lowers = []
def pickle(self, filename, protocol= -1): def pickle(self, filename, protocol= -1):
f = file(filename, 'w') f = file(filename, 'w')
@ -50,10 +28,7 @@ class parameterised(object):
f.close() f.close()
def copy(self): def copy(self):
""" """Returns a (deep) copy of the current model """
Returns a (deep) copy of the current model
"""
return copy.deepcopy(self) return copy.deepcopy(self)
@property @property
@ -64,6 +39,7 @@ class parameterised(object):
:see_also: :py:func:`GPy.core.parameterised.params_transformed` :see_also: :py:func:`GPy.core.parameterised.params_transformed`
""" """
return self._get_params() return self._get_params()
@params.setter @params.setter
def params(self, params): def params(self, params):
self._set_params(params) self._set_params(params)
@ -76,6 +52,7 @@ class parameterised(object):
:see_also: :py:func:`GPy.core.parameterised.params` :see_also: :py:func:`GPy.core.parameterised.params`
""" """
return self._get_params_transformed() return self._get_params_transformed()
@params_transformed.setter @params_transformed.setter
def params_transformed(self, params): def params_transformed(self, params):
self._set_params_transformed(params) self._set_params_transformed(params)
@ -97,7 +74,9 @@ class parameterised(object):
def __getitem__(self, name, return_names=False): def __getitem__(self, name, return_names=False):
""" """
Get a model parameter by name. The name is applied as a regular expression and all parameters that match that regular expression are returned. Get a model parameter by name. The name is applied as a regular
expression and all parameters that match that regular expression are
returned.
""" """
matches = self.grep_param_names(name) matches = self.grep_param_names(name)
if len(matches): if len(matches):
@ -110,7 +89,9 @@ class parameterised(object):
def __setitem__(self, name, val): def __setitem__(self, name, val):
""" """
Set model parameter(s) by name. The name is provided as a regular expression. All parameters matching that regular expression are set to ghe given value. Set model parameter(s) by name. The name is provided as a regular
expression. All parameters matching that regular expression are set to
the given value.
""" """
matches = self.grep_param_names(name) matches = self.grep_param_names(name)
if len(matches): if len(matches):
@ -119,8 +100,6 @@ class parameterised(object):
x = self.params x = self.params
x[matches] = val x[matches] = val
self.params = x self.params = x
# import ipdb;ipdb.set_trace()
# self.params[matches] = val
else: else:
raise AttributeError, "no parameter matches %s" % name raise AttributeError, "no parameter matches %s" % name
@ -140,13 +119,6 @@ class parameterised(object):
"""Unties all parameters by setting tied_indices to an empty list.""" """Unties all parameters by setting tied_indices to an empty list."""
self.tied_indices = [] self.tied_indices = []
def all_constrained_indices(self):
"""Return a np array of all the constrained indices"""
ret = [np.hstack(i) for i in [self.constrained_bounded_indices, self.constrained_positive_indices, self.constrained_negative_indices, self.constrained_fixed_indices] if len(i)]
if len(ret):
return np.hstack(ret)
else:
return []
def grep_param_names(self, expr): def grep_param_names(self, expr):
""" """
Arguments Arguments
@ -159,7 +131,7 @@ class parameterised(object):
Notes Notes
----- -----
Other objects are passed through - i.e. integers which were'nt meant for grepping Other objects are passed through - i.e. integers which weren't meant for grepping
""" """
if type(expr) in [str, np.string_, np.str]: if type(expr) in [str, np.string_, np.str]:
@ -171,108 +143,77 @@ class parameterised(object):
return expr return expr
def Nparam_transformed(self): def Nparam_transformed(self):
""" removed = 0
Compute the number of parameters after ties and fixing have been performed for tie in self.tied_indices:
""" removed += tie.size - 1
ties = 0
for ti in self.tied_indices:
ties += ti.size - 1
fixes = 0 for fix in self.fixed_indices:
for fi in self.constrained_fixed_indices: removed += fix.size
fixes += len(fi)
return self.Nparam - fixes - ties
def constrain_positive(self, which):
"""
Set positive constraints.
Arguments
---------
which -- np.array(dtype=int), or regular expression object or string
"""
matches = self.grep_param_names(which)
assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained"
self.constrained_positive_indices = np.hstack((self.constrained_positive_indices, matches))
# check to ensure constraint is in place
x = self._get_params()
for i, xx in enumerate(x):
if (xx < 0) & (i in matches):
x[i] = -xx
self._set_params(x)
return len(self._get_params()) - removed
def unconstrain(self, which): def unconstrain(self, which):
"""Unconstrain matching parameters. does not untie parameters""" """Unconstrain matching parameters. does not untie parameters"""
matches = self.grep_param_names(which) matches = self.grep_param_names(which)
# positive/negative
self.constrained_positive_indices = np.delete(self.constrained_positive_indices, np.nonzero(np.sum(self.constrained_positive_indices[:, None] == matches[None, :], 1))[0]) #tranformed contraints:
self.constrained_negative_indices = np.delete(self.constrained_negative_indices, np.nonzero(np.sum(self.constrained_negative_indices[:, None] == matches[None, :], 1))[0]) for match in matches:
# bounded self.constrained_indices = [i[i<>match] for i in self.constrained_indices]
if len(self.constrained_bounded_indices):
self.constrained_bounded_indices = [np.delete(a, np.nonzero(np.sum(a[:, None] == matches[None, :], 1))[0]) for a in self.constrained_bounded_indices] #remove empty constraints
if np.hstack(self.constrained_bounded_indices).size: tmp = zip(*[(i,t) for i,t in zip(self.constrained_indices,self.constraints) if len(i)])
self.constrained_bounded_uppers, self.constrained_bounded_lowers, self.constrained_bounded_indices = zip(*[(u, l, i) for u, l, i in zip(self.constrained_bounded_uppers, self.constrained_bounded_lowers, self.constrained_bounded_indices) if i.size])
self.constrained_bounded_uppers, self.constrained_bounded_lowers, self.constrained_bounded_indices = list(self.constrained_bounded_uppers), list(self.constrained_bounded_lowers), list(self.constrained_bounded_indices)
else:
self.constrained_bounded_uppers, self.constrained_bounded_lowers, self.constrained_bounded_indices = [], [], []
# fixed:
for i, indices in enumerate(self.constrained_fixed_indices):
self.constrained_fixed_indices[i] = np.delete(indices, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0])
# remove empty elements
tmp = [(i, v) for i, v in zip(self.constrained_fixed_indices, self.constrained_fixed_values) if len(i)]
if tmp: if tmp:
self.constrained_fixed_indices, self.constrained_fixed_values = zip(*tmp) self.constrained_indices, self.constraints = zip(*[(i,t) for i,t in zip(self.constrained_indices,self.constraints) if len(i)])
self.constrained_fixed_indices, self.constrained_fixed_values = list(self.constrained_fixed_indices), list(self.constrained_fixed_values) self.constrained_indices, self.constraints = list(self.constrained_indices), list(self.constraints)
# fixed:
self.fixed_values = [np.delete(values, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices,values in zip(self.fixed_indices,self.fixed_values)]
self.fixed_indices = [np.delete(indices, np.nonzero(np.sum(indices[:, None] == matches[None, :], 1))[0]) for indices in self.fixed_indices]
# remove empty elements
tmp = [(i, v) for i, v in zip(self.fixed_indices, self.fixed_values) if len(i)]
if tmp:
self.fixed_indices, self.fixed_values = zip(*tmp)
self.fixed_indices, self.fixed_values = list(self.fixed_indices), list(self.fixed_values)
else: else:
self.constrained_fixed_indices, self.constrained_fixed_values = [], [] self.fixed_indices, self.fixed_values = [], []
def constrain_negative(self, which): def constrain_negative(self, which):
""" """ Set negative constraints. """
Set negative constraints. self.constrain(which, transformations.negative_exponent())
:param which: which variables to constrain def constrain_positive(self, which):
:type which: regular expression string """ Set positive constraints. """
self.constrain(which, transformations.logexp())
def constrain_bounded(self, which,lower, upper):
""" Set bounded constraints. """
self.constrain(which, transformations.logistic(lower, upper))
def all_constrained_indices(self):
if len(self.constrained_indices):
return np.hstack(self.constrained_indices)
else:
return np.empty(shape=(0,))
def constrain(self,which,transform):
assert isinstance(transform,transformations.transformation)
"""
matches = self.grep_param_names(which) matches = self.grep_param_names(which)
assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained" overlap = set(matches).intersection(set(self.all_constrained_indices()))
self.constrained_negative_indices = np.hstack((self.constrained_negative_indices, matches)) if overlap:
# check to ensure constraint is in place self.unconstrain(np.asarray(list(overlap)))
print 'Warning: re-constraining these parameters'
pn = self._get_param_names()
for i in overlap:
print pn[i]
self.constrained_indices.append(matches)
self.constraints.append(transform)
x = self._get_params() x = self._get_params()
for i, xx in enumerate(x): x[matches] = transform.initialize(x[matches])
if (xx > 0.) and (i in matches):
x[i] = -xx
self._set_params(x) self._set_params(x)
def constrain_bounded(self, which, lower, upper):
"""Set bounded constraints.
Arguments
---------
which -- np.array(dtype=int), or regular expression object or string
upper -- (float) the upper bound on the constraint
lower -- (float) the lower bound on the constraint
"""
matches = self.grep_param_names(which)
assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained"
assert lower < upper, "lower bound must be smaller than upper bound!"
self.constrained_bounded_indices.append(matches)
self.constrained_bounded_uppers.append(upper)
self.constrained_bounded_lowers.append(lower)
# check to ensure constraint is in place
x = self._get_params()
for i, xx in enumerate(x):
if ((xx <= lower) | (xx >= upper)) & (i in matches):
x[i] = sigmoid(xx) * (upper - lower) + lower
self._set_params(x)
def constrain_fixed(self, which, value=None): def constrain_fixed(self, which, value=None):
""" """
Arguments Arguments
@ -288,42 +229,36 @@ class parameterised(object):
""" """
matches = self.grep_param_names(which) matches = self.grep_param_names(which)
assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained" assert not np.any(matches[:, None] == self.all_constrained_indices()), "Some indices are already constrained"
self.constrained_fixed_indices.append(matches) self.fixed_indices.append(matches)
if value != None: if value != None:
self.constrained_fixed_values.append(value) self.fixed_values.append(value)
else: else:
self.constrained_fixed_values.append(self._get_params()[self.constrained_fixed_indices[-1]]) self.fixed_values.append(self._get_params()[self.fixed_indices[-1]])
# self.constrained_fixed_values.append(value) # self.fixed_values.append(value)
self._set_params_transformed(self._get_params_transformed()) self._set_params_transformed(self._get_params_transformed())
def _get_params_transformed(self): def _get_params_transformed(self):
"""use self._get_params to get the 'true' parameters of the model, which are then tied, constrained and fixed""" """use self._get_params to get the 'true' parameters of the model, which are then tied, constrained and fixed"""
x = self._get_params() x = self._get_params()
x[self.constrained_positive_indices] = np.log(x[self.constrained_positive_indices]) [np.put(x,i,t.finv(x[i])) for i,t in zip(self.constrained_indices,self.constraints)]
x[self.constrained_negative_indices] = np.log(-x[self.constrained_negative_indices])
[np.put(x, i, np.log(np.clip(x[i] - l, 1e-10, np.inf) / np.clip(h - x[i], 1e-10, np.inf))) for i, l, h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
to_remove = self.constrained_fixed_indices + [t[1:] for t in self.tied_indices] to_remove = self.fixed_indices + [t[1:] for t in self.tied_indices]
if len(to_remove): if len(to_remove):
return np.delete(x, np.hstack(to_remove)) return np.delete(x, np.hstack(to_remove))
else: else:
return x return x
def _set_params_transformed(self, x): def _set_params_transformed(self, x):
""" takes the vector x, which is then modified (by untying, reparameterising or inserting fixed values), and then call self._set_params""" """ takes the vector x, which is then modified (by untying, reparameterising or inserting fixed values), and then call self._set_params"""
# work out how many places are fixed, and where they are. tricky logic! # work out how many places are fixed, and where they are. tricky logic!
Nfix_places = 0. fix_places = self.fixed_indices + [t[1:] for t in self.tied_indices]
if len(self.tied_indices): if len(fix_places):
Nfix_places += np.hstack(self.tied_indices).size - len(self.tied_indices) fix_places = np.hstack(fix_places)
if len(self.constrained_fixed_indices): Nfix_places = fix_places.size
Nfix_places += np.hstack(self.constrained_fixed_indices).size
if Nfix_places:
fix_places = np.hstack(self.constrained_fixed_indices + [t[1:] for t in self.tied_indices])
else: else:
fix_places = [] Nfix_places = 0
free_places = np.setdiff1d(np.arange(Nfix_places + x.size, dtype=np.int), fix_places) free_places = np.setdiff1d(np.arange(Nfix_places + x.size, dtype=np.int), fix_places)
@ -331,11 +266,12 @@ class parameterised(object):
xx = np.zeros(Nfix_places + free_places.size, dtype=np.float64) xx = np.zeros(Nfix_places + free_places.size, dtype=np.float64)
xx[free_places] = x xx[free_places] = x
[np.put(xx, i, v) for i, v in zip(self.constrained_fixed_indices, self.constrained_fixed_values)] [np.put(xx, i, v) for i, v in zip(self.fixed_indices, self.fixed_values)]
[np.put(xx, i, v) for i, v in [(t[1:], xx[t[0]]) for t in self.tied_indices] ] [np.put(xx, i, v) for i, v in [(t[1:], xx[t[0]]) for t in self.tied_indices] ]
xx[self.constrained_positive_indices] = np.exp(xx[self.constrained_positive_indices])
xx[self.constrained_negative_indices] = -np.exp(xx[self.constrained_negative_indices]) [np.put(xx,i,t.f(xx[i])) for i,t in zip(self.constrained_indices, self.constraints)]
[np.put(xx, i, low + sigmoid(xx[i]) * (high - low)) for i, low, high in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)] if hasattr(self,'debug'):
stop
self._set_params(xx) self._set_params(xx)
def _get_param_names_transformed(self): def _get_param_names_transformed(self):
@ -354,17 +290,13 @@ class parameterised(object):
remove = np.empty(shape=(0,), dtype=np.int) remove = np.empty(shape=(0,), dtype=np.int)
# also remove the fixed params # also remove the fixed params
if len(self.constrained_fixed_indices): if len(self.fixed_indices):
remove = np.hstack((remove, np.hstack(self.constrained_fixed_indices))) remove = np.hstack((remove, np.hstack(self.fixed_indices)))
# add markers to show that some variables are constrained # add markers to show that some variables are constrained
for i in self.constrained_positive_indices: for i,t in zip(self.constrained_indices,self.constraints):
n[i] = n[i] + '(+ve)'
for i in self.constrained_negative_indices:
n[i] = n[i] + '(-ve)'
for i, l, h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers):
for ii in i: for ii in i:
n[ii] = n[ii] + '(bounded)' n[ii] = n[ii] + t.__str__()
n = [nn for i, nn in enumerate(n) if not i in remove] n = [nn for i, nn in enumerate(n) if not i in remove]
return n return n
@ -382,16 +314,12 @@ class parameterised(object):
values = self._get_params() # map(str,self._get_params()) values = self._get_params() # map(str,self._get_params())
# sort out the constraints # sort out the constraints
constraints = [''] * len(names) constraints = [''] * len(names)
for i in self.constrained_positive_indices: for i,t in zip(self.constrained_indices,self.constraints):
constraints[i] = '(+ve)' for ii in i:
for i in self.constrained_negative_indices: constraints[ii] = t.__str__()
constraints[i] = '(-ve)' for i in self.fixed_indices:
for i in self.constrained_fixed_indices:
for ii in i: for ii in i:
constraints[ii] = 'Fixed' constraints[ii] = 'Fixed'
for i, u, l in zip(self.constrained_bounded_indices, self.constrained_bounded_uppers, self.constrained_bounded_lowers):
for ii in i:
constraints[ii] = '(' + str(l) + ', ' + str(u) + ')'
# sort out the ties # sort out the ties
ties = [''] * len(names) ties = [''] * len(names)
for i, tie in enumerate(self.tied_indices): for i, tie in enumerate(self.tied_indices):

View file

@ -0,0 +1,85 @@
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
class transformation(object):
def __init__(self):
#set the domain. Suggest we use 'positive', 'bounded', etc
self.domain = 'undefined'
def f(self, x):
raise NotImplementedError
def finv(self,x):
raise NotImplementedError
def gradfactor(self,f):
""" df_dx evaluated at self.f(x)=f"""
raise NotImplementedError
def initialize(self,f):
""" produce a sensible initial values for f(x)"""
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class logexp(transformation):
def __init__(self):
self.domain= 'positive'
def f(self,x):
return np.log(1. + np.exp(x))
def finv(self,f):
return np.log(np.exp(f) - 1.)
def gradfactor(self,f):
ef = np.exp(f)
return (ef - 1.)/ef
def initialize(self,f):
return np.abs(f)
def __str__(self):
return '(+ve)'
class exponent(transformation):
def __init__(self):
self.domain= 'positive'
def f(self,x):
return np.exp(x)
def finv(self,x):
return np.log(x)
def gradfactor(self,f):
return f
def initialize(self,f):
return np.abs(f)
def __str__(self):
return '(+ve)'
class negative_exponent(transformation):
def __init__(self):
self.domain= 'negative'
def f(self,x):
return -np.exp(x)
def finv(self,x):
return np.log(-x)
def gradfactor(self,f):
return f
def initialize(self,f):
return -np.abs(f)
def __str__(self):
return '(-ve)'
class logistic(transformation):
def __init__(self,lower,upper):
self.domain= 'bounded'
assert lower < upper
self.lower, self.upper = float(lower), float(upper)
self.difference = self.upper - self.lower
def f(self,x):
return self.lower + self.difference/(1.+np.exp(-x))
def finv(self,f):
return np.log(np.clip(f - self.lower, 1e-10, np.inf) / np.clip(self.upper - f, 1e-10, np.inf))
def gradfactor(self,f):
return (f-self.lower)*(self.upper-f)/self.difference
def initialize(self,f):
return self.f(f*0.)
def __str__(self):
return '({},{})'.format(self.lower,self.upper)

View file

@ -182,7 +182,8 @@ def bgplvm_simulation_matlab_compare():
from GPy.models import mrd from GPy.models import mrd
from GPy import kern from GPy import kern
reload(mrd); reload(kern) reload(mrd); reload(kern)
k = kern.rbf(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2)) #k = kern.rbf(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
k = kern.linear(Q, ARD=True) + kern.bias(Q, np.exp(-2)) + kern.white(Q, np.exp(-2))
m = Bayesian_GPLVM(Y, Q, init="PCA", M=M, kernel=k, m = Bayesian_GPLVM(Y, Q, init="PCA", M=M, kernel=k,
# X=mu, # X=mu,
# X_variance=S, # X_variance=S,

View file

@ -48,11 +48,7 @@ class kern(parameterised):
def plot_ARD(self, ax=None): def plot_ARD(self, ax=None):
""" """If an ARD kernel is present, it bar-plots the ARD parameters"""
If an ARD kernel is present, it bar-plots the ARD parameters
"""
if ax is None: if ax is None:
ax = pb.gca() ax = pb.gca()
for p in self.parts: for p in self.parts:
@ -71,12 +67,10 @@ class kern(parameterised):
def _transform_gradients(self, g): def _transform_gradients(self, g):
x = self._get_params() x = self._get_params()
g[self.constrained_positive_indices] = g[self.constrained_positive_indices] * x[self.constrained_positive_indices] [np.put(x,i,x*t.gradfactor(x[i])) for i,t in zip(self.constrained_indices, self.constraints)]
g[self.constrained_negative_indices] = g[self.constrained_negative_indices] * x[self.constrained_negative_indices]
[np.put(g, i, g[i] * (x[i] - l) * (h - x[i]) / (h - l)) for i, l, h in zip(self.constrained_bounded_indices, self.constrained_bounded_lowers, self.constrained_bounded_uppers)]
[np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]] [np.put(g, i, v) for i, v in [(t[0], np.sum(g[t])) for t in self.tied_indices]]
if len(self.tied_indices) or len(self.constrained_fixed_indices): if len(self.tied_indices) or len(self.fixed_indices):
to_remove = np.hstack((self.constrained_fixed_indices + [t[1:] for t in self.tied_indices])) to_remove = np.hstack((self.fixed_indices + [t[1:] for t in self.tied_indices]))
return np.delete(g, to_remove) return np.delete(g, to_remove)
else: else:
return g return g
@ -93,13 +87,10 @@ class kern(parameterised):
assert self.D == other.D assert self.D == other.D
newkern = kern(self.D, self.parts + other.parts, self.input_slices + other.input_slices) newkern = kern(self.D, self.parts + other.parts, self.input_slices + other.input_slices)
# transfer constraints: # transfer constraints:
newkern.constrained_positive_indices = np.hstack((self.constrained_positive_indices, self.Nparam + other.constrained_positive_indices)) newkern.constrained_indices = self.constrained_indices + [i+self.Nparam for i in other.constrained_indices]
newkern.constrained_negative_indices = np.hstack((self.constrained_negative_indices, self.Nparam + other.constrained_negative_indices)) newkern.constraints = self.constraints + other.constraints
newkern.constrained_bounded_indices = self.constrained_bounded_indices + [self.Nparam + x for x in other.constrained_bounded_indices] newkern.fixed_indices = self.fixed_indices + [self.Nparam + x for x in other.fixed_indices]
newkern.constrained_bounded_lowers = self.constrained_bounded_lowers + other.constrained_bounded_lowers newkern.fixed_values = self.fixed_values + other.fixed_values
newkern.constrained_bounded_uppers = self.constrained_bounded_uppers + other.constrained_bounded_uppers
newkern.constrained_fixed_indices = self.constrained_fixed_indices + [self.Nparam + x for x in other.constrained_fixed_indices]
newkern.constrained_fixed_values = self.constrained_fixed_values + other.constrained_fixed_values
newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices] newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices]
return newkern return newkern
@ -126,13 +117,12 @@ class kern(parameterised):
newkern = kern(D, self.parts + other.parts, self_input_slices + other_input_slices) newkern = kern(D, self.parts + other.parts, self_input_slices + other_input_slices)
# transfer constraints: # transfer constraints:
newkern.constrained_positive_indices = np.hstack((self.constrained_positive_indices, self.Nparam + other.constrained_positive_indices)) newkern.constrained_indices = self.constrained_indices + [x+self.Nparam for x in other.constrained_indices]
newkern.constrained_negative_indices = np.hstack((self.constrained_negative_indices, self.Nparam + other.constrained_negative_indices)) newkern.constraints = self.constraints + other.constraints
newkern.constrained_bounded_indices = self.constrained_bounded_indices + [self.Nparam + x for x in other.constrained_bounded_indices] newkern.fixed_indices = self.fixed_indices + [self.Nparam + x for x in other.fixed_indices]
newkern.constrained_bounded_lowers = self.constrained_bounded_lowers + other.constrained_bounded_lowers newkern.fixed_values = self.fixed_values + other.fixed_values
newkern.constraints = self.constraints + other.constraints
newkern.constrained_bounded_uppers = self.constrained_bounded_uppers + other.constrained_bounded_uppers newkern.constrained_bounded_uppers = self.constrained_bounded_uppers + other.constrained_bounded_uppers
newkern.constrained_fixed_indices = self.constrained_fixed_indices + [self.Nparam + x for x in other.constrained_fixed_indices]
newkern.constrained_fixed_values = self.constrained_fixed_values + other.constrained_fixed_values
newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices] newkern.tied_indices = self.tied_indices + [self.Nparam + x for x in other.tied_indices]
return newkern return newkern
@ -208,15 +198,11 @@ class kern(parameterised):
# Get the ties and constrains of the kernels before the multiplication # Get the ties and constrains of the kernels before the multiplication
prev_ties = K1.tied_indices + [arr + K1.Nparam for arr in K2.tied_indices] prev_ties = K1.tied_indices + [arr + K1.Nparam for arr in K2.tied_indices]
prev_constr_pos = np.append(K1.constrained_positive_indices, K1.Nparam + K2.constrained_positive_indices) prev_constr_ind = [K1.constrained_indices] + [K1.Nparam + i for i in K2.constrained_indices]
prev_constr_neg = np.append(K1.constrained_negative_indices, K1.Nparam + K2.constrained_negative_indices) prev_constr = K1.constraints + K2.constraints
prev_constr_fix = K1.constrained_fixed_indices + [arr + K1.Nparam for arr in K2.constrained_fixed_indices] prev_constr_fix = K1.fixed_indices + [arr + K1.Nparam for arr in K2.fixed_indices]
prev_constr_fix_values = K1.constrained_fixed_values + K2.constrained_fixed_values prev_constr_fix_values = K1.fixed_values + K2.fixed_values
prev_constr_bou = K1.constrained_bounded_indices + [arr + K1.Nparam for arr in K2.constrained_bounded_indices]
prev_constr_bou_low = K1.constrained_bounded_lowers + K2.constrained_bounded_lowers
prev_constr_bou_upp = K1.constrained_bounded_uppers + K2.constrained_bounded_uppers
# follow the previous ties # follow the previous ties
for arr in prev_ties: for arr in prev_ties:
@ -228,14 +214,8 @@ class kern(parameterised):
index = np.where(index_param == i)[0] index = np.where(index_param == i)[0]
if index.size > 1: if index.size > 1:
self.tie_params(index) self.tie_params(index)
for i in prev_constr_pos: for i,t in zip(prev_constr_ind,prev_constr):
self.constrain_positive(np.where(index_param == i)[0]) self.constrain(np.where(index_param == i)[0],t)
for i in prev_constr_neg:
self.constrain_neg(np.where(index_param == i)[0])
for j, i in enumerate(prev_constr_fix):
self.constrain_fixed(np.where(index_param == i)[0], prev_constr_fix_values[j])
for j, i in enumerate(prev_constr_bou):
self.constrain_bounded(np.where(index_param == i)[0], prev_constr_bou_low[j], prev_constr_bou_upp[j])
def _get_params(self): def _get_params(self):
return np.hstack([p._get_params() for p in self.parts]) return np.hstack([p._get_params() for p in self.parts])

View file

@ -188,12 +188,12 @@ class rbf(kernpart):
self._X2 = None self._X2 = None
X = X/self.lengthscale X = X/self.lengthscale
Xsquare = np.sum(np.square(X),1) Xsquare = np.sum(np.square(X),1)
self._K_dist2 = (-2.*tdot(X) + Xsquare[:,None] + Xsquare[None,:]) self._K_dist2 = -2.*tdot(X) + (Xsquare[:,None] + Xsquare[None,:])
else: else:
self._X2 = X2.copy() self._X2 = X2.copy()
X = X/self.lengthscale X = X/self.lengthscale
X2 = X2/self.lengthscale X2 = X2/self.lengthscale
self._K_dist2 = (-2.*np.dot(X, X2.T) + np.sum(np.square(X),1)[:,None] + np.sum(np.square(X2),1)[None,:]) self._K_dist2 = -2.*np.dot(X, X2.T) + (np.sum(np.square(X),1)[:,None] + np.sum(np.square(X2),1)[None,:])
self._K_dvar = np.exp(-0.5*self._K_dist2) self._K_dvar = np.exp(-0.5*self._K_dist2)
def _psi_computations(self,Z,mu,S): def _psi_computations(self,Z,mu,S):

View file

@ -53,9 +53,11 @@ class probit(likelihood_function):
mu = mu.flatten() mu = mu.flatten()
var = var.flatten() var = var.flatten()
mean = stats.norm.cdf(mu/np.sqrt(1+var)) mean = stats.norm.cdf(mu/np.sqrt(1+var))
p_025 = np.zeros(mu.shape) norm_025 = [stats.norm.ppf(.025,m,v) for m,v in zip(mu,var)]
p_975 = np.ones(mu.shape) norm_975 = [stats.norm.ppf(.975,m,v) for m,v in zip(mu,var)]
return mean, np.nan*var, p_025, p_975 # TODO: better values here (mean is okay) p_025 = stats.norm.cdf(norm_025/np.sqrt(1+var))
p_975 = stats.norm.cdf(norm_975/np.sqrt(1+var))
return mean, np.nan*var, p_025, p_975 # TODO: var
class Poisson(likelihood_function): class Poisson(likelihood_function):
""" """

View file

@ -54,6 +54,7 @@ class Bayesian_GPLVM(sparse_GP, GPLVM):
self._savedgradients = [] self._savedgradients = []
self._savederrors = [] self._savederrors = []
self._savedpsiKmm = [] self._savedpsiKmm = []
sparse_GP.__init__(self, X, Gaussian(Y), kernel, Z=Z, X_variance=X_variance, **kwargs) sparse_GP.__init__(self, X, Gaussian(Y), kernel, Z=Z, X_variance=X_variance, **kwargs)
@property @property

View file

@ -35,6 +35,9 @@ class GP(model):
self.N, self.Q = self.X.shape self.N, self.Q = self.X.shape
assert isinstance(kernel, kern.kern) assert isinstance(kernel, kern.kern)
self.kern = kernel self.kern = kernel
self.likelihood = likelihood
assert self.X.shape[0] == self.likelihood.data.shape[0]
self.N, self.D = self.likelihood.data.shape
# here's some simple normalization for the inputs # here's some simple normalization for the inputs
if normalize_X: if normalize_X:
@ -47,12 +50,8 @@ class GP(model):
self._Xmean = np.zeros((1, self.X.shape[1])) self._Xmean = np.zeros((1, self.X.shape[1]))
self._Xstd = np.ones((1, self.X.shape[1])) self._Xstd = np.ones((1, self.X.shape[1]))
self.likelihood = likelihood if not hasattr(self,'has_uncertain_inputs'):
# assert self.X.shape[0] == self.likelihood.Y.shape[0] self.has_uncertain_inputs = False
# self.N, self.D = self.likelihood.Y.shape
assert self.X.shape[0] == self.likelihood.data.shape[0]
self.N, self.D = self.likelihood.data.shape
model.__init__(self) model.__init__(self)
def dL_dZ(self): def dL_dZ(self):
@ -232,7 +231,7 @@ class GP(model):
else: else:
raise NotImplementedError, "Cannot define a frame with more than two input dimensions" raise NotImplementedError, "Cannot define a frame with more than two input dimensions"
def plot(self, samples=0, plot_limits=None, which_data='all', which_functions='all', resolution=None, levels=20): def plot(self, samples=0, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20):
""" """
TODO: Docstrings! TODO: Docstrings!
:param levels: for 2D plotting, the number of contour levels to use :param levels: for 2D plotting, the number of contour levels to use

View file

@ -68,47 +68,60 @@ class sparse_GP(GP):
sf = self.scale_factor sf = self.scale_factor
sf2 = sf**2 sf2 = sf**2
#The rather complex computations of psi2_beta_scaled #invert Kmm
self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm)
#The rather complex computations of psi2_beta_scaled and self.A
if self.likelihood.is_heteroscedastic: if self.likelihood.is_heteroscedastic:
assert self.likelihood.D == 1 #TODO: what if the likelihood is heterscedatic and there are multiple independent outputs? assert self.likelihood.D == 1 #TODO: what if the likelihood is heterscedatic and there are multiple independent outputs?
if self.has_uncertain_inputs: if self.has_uncertain_inputs:
self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision.flatten().reshape(self.N,1,1)/sf2)).sum(0) self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision.flatten().reshape(self.N,1,1)/sf2)).sum(0)
evals, evecs = linalg.eigh(self.psi2_beta_scaled)
clipped_evals = np.clip(evals,0.,1e6) # TODO: make clipping configurable
if not np.allclose(evals, clipped_evals):
print "Warning: clipping posterior eigenvalues"
tmp = evecs*np.sqrt(clipped_evals)
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
self.A = tdot(tmp)
else: else:
tmp = self.psi1*(np.sqrt(self.likelihood.precision.flatten().reshape(1,self.N))/sf) tmp = self.psi1*(np.sqrt(self.likelihood.precision.flatten().reshape(1,self.N))/sf)
#self.psi2_beta_scaled = np.dot(tmp,tmp.T)
self.psi2_beta_scaled = tdot(tmp) self.psi2_beta_scaled = tdot(tmp)
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
self.A = tdot(tmp)
else: else:
if self.has_uncertain_inputs: if self.has_uncertain_inputs:
self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision/sf2)).sum(0) self.psi2_beta_scaled = (self.psi2*(self.likelihood.precision/sf2)).sum(0)
evals, evecs = linalg.eigh(self.psi2_beta_scaled)
clipped_evals = np.clip(evals,0.,1e6) # TODO: make clipping configurable
if not np.allclose(evals, clipped_evals):
print "Warning: clipping posterior eigenvalues"
tmp = evecs*np.sqrt(clipped_evals)
self.psi2_beta_scaled = tdot(tmp)
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
self.A = tdot(tmp)
else: else:
tmp = self.psi1*(np.sqrt(self.likelihood.precision)/sf) tmp = self.psi1*(np.sqrt(self.likelihood.precision)/sf)
#self.psi2_beta_scaled = np.dot(tmp,tmp.T)
self.psi2_beta_scaled = tdot(tmp) self.psi2_beta_scaled = tdot(tmp)
tmp, _ = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp),lower=1)
self.A = tdot(tmp)
self.Kmmi, self.Lm, self.Lmi, self.Kmm_logdet = pdinv(self.Kmm) #invert B and compute C. C is the posterior covariance of u
self.V = (self.likelihood.precision/self.scale_factor)*self.likelihood.Y
#Compute A = L^-1 psi2 beta L^-T
#self. A = mdot(self.Lmi,self.psi2_beta_scaled,self.Lmi.T)
tmp = linalg.lapack.flapack.dtrtrs(self.Lm,self.psi2_beta_scaled.T,lower=1)[0]
self.A = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1)[0]
self.B = np.eye(self.M)/sf2 + self.A self.B = np.eye(self.M)/sf2 + self.A
self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B) self.Bi, self.LB, self.LBi, self.B_logdet = pdinv(self.B)
self.psi1V = np.dot(self.psi1, self.V)
tmp = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.Bi),lower=1,trans=1)[0] tmp = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.Bi),lower=1,trans=1)[0]
self.C = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0] self.C = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0]
self.V = (self.likelihood.precision/self.scale_factor)*self.likelihood.Y
self.psi1V = np.dot(self.psi1, self.V)
#back substutue C into psi1V #back substutue C into psi1V
tmp,info1 = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.psi1V),lower=1,trans=0) tmp,info1 = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.psi1V),lower=1,trans=0)
self._P = tdot(tmp)
tmp,info2 = linalg.lapack.flapack.dpotrs(self.LB,tmp,lower=1) tmp,info2 = linalg.lapack.flapack.dpotrs(self.LB,tmp,lower=1)
self.Cpsi1V,info3 = linalg.lapack.flapack.dtrtrs(self.Lm,tmp,lower=1,trans=1) self.Cpsi1V,info3 = linalg.lapack.flapack.dtrtrs(self.Lm,tmp,lower=1,trans=1)
#self.Cpsi1V = np.dot(self.C,self.psi1V) #self.Cpsi1V = np.dot(self.C,self.psi1V)
self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T) self.Cpsi1VVpsi1 = np.dot(self.Cpsi1V,self.psi1V.T) #TODO: this dot can be eliminated
self.E = tdot(self.Cpsi1V/sf) self.E = tdot(self.Cpsi1V/sf)
@ -130,24 +143,22 @@ class sparse_GP(GP):
self.dL_dpsi2 = None self.dL_dpsi2 = None
else: else:
#self.dL_dpsi2 = 0.5 * self.likelihood.precision * self.D * self.Kmmi # dB
#self.dL_dpsi2 += - 0.5 * self.likelihood.precision/sf2 * self.D * self.C # dC
#self.dL_dpsi2 += - 0.5 * self.likelihood.precision * self.E # dD
self.dL_dpsi2 = 0.5*self.likelihood.precision*(self.D*(self.Kmmi - self.C/sf2) -self.E) self.dL_dpsi2 = 0.5*self.likelihood.precision*(self.D*(self.Kmmi - self.C/sf2) -self.E)
if self.has_uncertain_inputs: if self.has_uncertain_inputs:
#repeat for each of the N psi_2 matrices #repeat for each of the N psi_2 matrices
self.dL_dpsi2 = np.repeat(self.dL_dpsi2[None,:,:],self.N,axis=0) self.dL_dpsi2 = np.repeat(self.dL_dpsi2[None,:,:],self.N,axis=0)
else: else:
#subsume back into psi1 (==Kmn)
self.dL_dpsi1 += 2.*np.dot(self.dL_dpsi2,self.psi1) self.dL_dpsi1 += 2.*np.dot(self.dL_dpsi2,self.psi1)
self.dL_dpsi2 = None self.dL_dpsi2 = None
# Compute dL_dKmm # Compute dL_dKmm
#self.dL_dKmm_old = -0.5 * self.D * mdot(self.Lmi.T, self.A, self.Lmi)*sf2 # dB #self.dL_dKmm = -0.5 * self.D * mdot(self.Lmi.T, self.A, self.Lmi)*sf2 # dB
#self.dL_dKmm += -0.5 * self.D * (- self.C/sf2 - 2.*mdot(self.C, self.psi2_beta_scaled, self.Kmmi) + self.Kmmi) # dC #self.dL_dKmm += -0.5 * self.D * (- self.C/sf2 - 2.*mdot(self.C, self.psi2_beta_scaled, self.Kmmi) + self.Kmmi) # dC
#self.dL_dKmm += np.dot(np.dot(self.E*sf2, self.psi2_beta_scaled) - self.Cpsi1VVpsi1, self.Kmmi) + 0.5*self.E # dD #self.dL_dKmm += np.dot(np.dot(self.E*sf2, self.psi2_beta_scaled) - self.Cpsi1VVpsi1, self.Kmmi) + 0.5*self.E # dD
tmp = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.B),lower=1,trans=1)[0] tmp = linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(self.B),lower=1,trans=1)[0]
self.dL_dKmm = -0.5*self.D*sf2*linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0] #dA self.dL_dKmm = -0.5*self.D*sf2*linalg.lapack.flapack.dtrtrs(self.Lm,np.asfortranarray(tmp.T),lower=1,trans=1)[0]
tmp = np.dot(self.D*self.C + self.E*sf2,self.psi2_beta_scaled) - self.Cpsi1VVpsi1 tmp = np.dot(self.D*self.C + self.E*sf2,self.psi2_beta_scaled) - self.Cpsi1VVpsi1
tmp = linalg.lapack.flapack.dpotrs(self.Lm,np.asfortranarray(tmp.T),lower=1)[0].T tmp = linalg.lapack.flapack.dpotrs(self.Lm,np.asfortranarray(tmp.T),lower=1)[0].T
self.dL_dKmm += 0.5*(self.D*self.C/sf2 + self.E) +tmp # d(C+D) self.dL_dKmm += 0.5*(self.D*self.C/sf2 + self.E) +tmp # d(C+D)
@ -189,14 +200,13 @@ class sparse_GP(GP):
self.kern._set_params(p[self.Z.size:self.Z.size+self.kern.Nparam]) self.kern._set_params(p[self.Z.size:self.Z.size+self.kern.Nparam])
self.likelihood._set_params(p[self.Z.size+self.kern.Nparam:]) self.likelihood._set_params(p[self.Z.size+self.kern.Nparam:])
self._compute_kernel_matrices() self._compute_kernel_matrices()
if self.auto_scale_factor:
self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
#if self.auto_scale_factor: #if self.auto_scale_factor:
# if self.likelihood.is_heteroscedastic:
# self.scale_factor = max(1,np.sqrt(self.psi2_beta_scaled.sum(0).mean()))
# else:
# self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision) # self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
#self.scale_factor = 1. if self.auto_scale_factor:
if self.likelihood.is_heteroscedastic:
self.scale_factor = max(100,np.sqrt(self.psi2_beta_scaled.sum(0).mean()))
else:
self.scale_factor = np.sqrt(self.psi2.sum(0).mean()*self.likelihood.precision)
self._computations() self._computations()
def _get_params(self): def _get_params(self):

View file

@ -72,7 +72,7 @@ def jitchol(A,maxtries=5):
raise linalg.LinAlgError, "not pd: negative diagonal elements" raise linalg.LinAlgError, "not pd: negative diagonal elements"
jitter= diagA.mean()*1e-6 jitter= diagA.mean()*1e-6
for i in range(1,maxtries+1): for i in range(1,maxtries+1):
print '\rWarning: adding jitter of {:.10e} '.format(jitter), print 'Warning: adding jitter of {:.10e}'.format(jitter)
try: try:
return linalg.cholesky(A+np.eye(A.shape[0]).T*jitter, lower = True) return linalg.cholesky(A+np.eye(A.shape[0]).T*jitter, lower = True)
except: except: