mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 03:22:38 +02:00
UPD: Added testing, and bug fixing.
This commit is contained in:
parent
9c07bd167c
commit
93e8b71f60
9 changed files with 6084 additions and 4248 deletions
|
|
@ -2,36 +2,11 @@ import GPy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
import GPy.models.state_space_new as SS_new
|
import GPy.models.state_space_model as SS_model
|
||||||
|
|
||||||
X = np.linspace(0, 10, 2000)[:, None]
|
X = np.linspace(0, 10, 2000)[:, None]
|
||||||
Y = np.sin(X) + np.random.randn(*X.shape)*0.1
|
Y = np.sin(X) + np.random.randn(*X.shape)*0.1
|
||||||
|
|
||||||
# Need to run these lines when X and Y are imported ->
|
|
||||||
#X.shape = (X.shape[0],1)
|
|
||||||
#Y.shape = (Y.shape[0],1)
|
|
||||||
# Need to run these lines when X and Y are imported <-
|
|
||||||
|
|
||||||
## Generation of minimal example data ->
|
|
||||||
#X = np.random.rand(3)
|
|
||||||
#sort_index = np.argsort(X)
|
|
||||||
#X = X[sort_index]; X.shape = (X.shape[0],1)
|
|
||||||
#Y = np.sin(10*X) + np.random.randn(*X.shape)*0.1
|
|
||||||
## Generation of minimal example data <-
|
|
||||||
|
|
||||||
#plt.figure()
|
|
||||||
#plt.plot( X, Y)
|
|
||||||
#plt.show()
|
|
||||||
|
|
||||||
kernel = GPy.kern.Matern32(X.shape[1])
|
|
||||||
m = GPy.models.StateSpace(X,Y, kernel)
|
|
||||||
|
|
||||||
print m
|
|
||||||
#
|
|
||||||
m.optimize(optimizer='bfgs',messages=True)
|
|
||||||
#
|
|
||||||
print m
|
|
||||||
|
|
||||||
kernel1 = GPy.kern.Matern32(X.shape[1])
|
kernel1 = GPy.kern.Matern32(X.shape[1])
|
||||||
m1 = GPy.models.GPRegression(X,Y, kernel1)
|
m1 = GPy.models.GPRegression(X,Y, kernel1)
|
||||||
|
|
||||||
|
|
@ -40,9 +15,9 @@ m1.optimize(optimizer='bfgs',messages=True)
|
||||||
|
|
||||||
print m1
|
print m1
|
||||||
|
|
||||||
kernel2 = GPy.kern.Matern32(X.shape[1])
|
kernel2 = GPy.kern.sde_Matern32(X.shape[1])
|
||||||
m2 = SS_new.StateSpace(X,Y, kernel2)
|
#m2 = SS_model.StateSpace(X,Y, kernel2)
|
||||||
|
m2 = GPy.models.StateSpace(X,Y, kernel2)
|
||||||
print m2
|
print m2
|
||||||
|
|
||||||
m2.optimize(optimizer='bfgs',messages=True)
|
m2.optimize(optimizer='bfgs',messages=True)
|
||||||
|
|
|
||||||
|
|
@ -23,4 +23,4 @@ from .one_vs_all_classification import OneVsAllClassification
|
||||||
from .one_vs_all_sparse_classification import OneVsAllSparseClassification
|
from .one_vs_all_sparse_classification import OneVsAllSparseClassification
|
||||||
from .dpgplvm import DPBayesianGPLVM
|
from .dpgplvm import DPBayesianGPLVM
|
||||||
|
|
||||||
from state_space import StateSpace
|
from .state_space_model import StateSpace
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -7,6 +7,10 @@ cimport numpy as np
|
||||||
import scipy as sp
|
import scipy as sp
|
||||||
cimport cython
|
cimport cython
|
||||||
|
|
||||||
|
#from libc.math cimport isnan # for nan checking in kalman filter cycle
|
||||||
|
cdef extern from "numpy/npy_math.h":
|
||||||
|
bint npy_isnan(double x)
|
||||||
|
|
||||||
DTYPE = np.float64
|
DTYPE = np.float64
|
||||||
DTYPE_int = np.int64
|
DTYPE_int = np.int64
|
||||||
|
|
||||||
|
|
@ -913,6 +917,8 @@ def _cont_discr_kalman_filter_raw_Cython(int state_dim, Dynamic_Callables_Cython
|
||||||
cdef np.ndarray[DTYPE_t, ndim=3] dm_pred, dP_pred
|
cdef np.ndarray[DTYPE_t, ndim=3] dm_pred, dP_pred
|
||||||
cdef np.ndarray[DTYPE_t, ndim=2] log_likelihood_update, d_log_likelihood_update
|
cdef np.ndarray[DTYPE_t, ndim=2] log_likelihood_update, d_log_likelihood_update
|
||||||
cdef int k
|
cdef int k
|
||||||
|
|
||||||
|
#print "Hi I am cython"
|
||||||
for k in range(0,steps_no):
|
for k in range(0,steps_no):
|
||||||
# In this loop index for new estimations is (k+1), old - (k)
|
# In this loop index for new estimations is (k+1), old - (k)
|
||||||
# This happened because initial values are stored at 0-th index.
|
# This happened because initial values are stored at 0-th index.
|
||||||
|
|
@ -925,16 +931,27 @@ def _cont_discr_kalman_filter_raw_Cython(int state_dim, Dynamic_Callables_Cython
|
||||||
calc_grad_log_likelihood, dm_upd, dP_upd)
|
calc_grad_log_likelihood, dm_upd, dP_upd)
|
||||||
|
|
||||||
k_measurment = Y[k,:,:]
|
k_measurment = Y[k,:,:]
|
||||||
|
if (np.any(np.isnan(k_measurment)) == False):
|
||||||
# if np.any(np.isnan(k_measurment)):
|
# if np.any(np.isnan(k_measurment)):
|
||||||
# raise ValueError("Nan measurements are currently not supported")
|
# raise ValueError("Nan measurements are currently not supported")
|
||||||
|
|
||||||
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
||||||
_kalman_update_step_SVD_Cython(k, m_pred , P_pred, p_measurement_callables,
|
_kalman_update_step_SVD_Cython(k, m_pred , P_pred, p_measurement_callables,
|
||||||
k_measurment, calc_log_likelihood=calc_log_likelihood,
|
k_measurment, calc_log_likelihood=calc_log_likelihood,
|
||||||
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
p_dm = dm_pred, p_dP = dP_pred)
|
p_dm = dm_pred, p_dP = dP_pred)
|
||||||
|
else:
|
||||||
|
if not np.all(np.isnan(k_measurment)):
|
||||||
|
raise ValueError("""Nan measurements are currently not supported if
|
||||||
|
they are intermixed with not NaN measurements""")
|
||||||
|
else:
|
||||||
|
m_upd = m_pred; P_upd = P_pred; dm_upd = dm_pred; dP_upd = dP_pred
|
||||||
|
if calc_log_likelihood:
|
||||||
|
log_likelihood_update = np.zeros((1,time_series_no))
|
||||||
|
if calc_grad_log_likelihood:
|
||||||
|
d_log_likelihood_update = np.zeros((grad_params_no,time_series_no))
|
||||||
|
|
||||||
|
|
||||||
if calc_log_likelihood:
|
if calc_log_likelihood:
|
||||||
log_likelihood += log_likelihood_update
|
log_likelihood += log_likelihood_update
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,21 +12,37 @@ import numpy as np
|
||||||
import scipy as sp
|
import scipy as sp
|
||||||
import scipy.linalg as linalg
|
import scipy.linalg as linalg
|
||||||
|
|
||||||
|
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
|
try:
|
||||||
|
import state_space_setup
|
||||||
|
setup_available = True
|
||||||
|
except ImportError as e:
|
||||||
|
setup_available = False
|
||||||
|
|
||||||
|
|
||||||
print_verbose = False
|
print_verbose = False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import state_space_cython
|
import state_space_cython
|
||||||
cython_code_available = True
|
cython_code_available = True
|
||||||
print("state_space: cython is available")
|
if print_verbose:
|
||||||
|
print("state_space: cython is available")
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
cython_code_available = False
|
cython_code_available = False
|
||||||
|
|
||||||
|
|
||||||
#cython_code_available = False
|
#cython_code_available = False
|
||||||
if cython_code_available:
|
# Use cython by default
|
||||||
print("state_space: cython is used")
|
use_cython = False
|
||||||
else:
|
if setup_available:
|
||||||
print("state_space: cython is NOT used")
|
use_cython = state_space_setup.use_cython
|
||||||
|
|
||||||
|
if print_verbose:
|
||||||
|
if use_cython:
|
||||||
|
print("state_space: cython is used")
|
||||||
|
else:
|
||||||
|
print("state_space: cython is NOT used")
|
||||||
|
|
||||||
class Dynamic_Callables_Python(object):
|
class Dynamic_Callables_Python(object):
|
||||||
|
|
||||||
|
|
@ -88,7 +104,7 @@ class Dynamic_Callables_Python(object):
|
||||||
|
|
||||||
raise NotImplemented("reset is not implemented!")
|
raise NotImplemented("reset is not implemented!")
|
||||||
|
|
||||||
if cython_code_available:
|
if use_cython:
|
||||||
Dynamic_Callables_Class = state_space_cython.Dynamic_Callables_Cython
|
Dynamic_Callables_Class = state_space_cython.Dynamic_Callables_Cython
|
||||||
else:
|
else:
|
||||||
Dynamic_Callables_Class = Dynamic_Callables_Python
|
Dynamic_Callables_Class = Dynamic_Callables_Python
|
||||||
|
|
@ -157,7 +173,7 @@ class Measurement_Callables_Python(object):
|
||||||
|
|
||||||
raise NotImplemented("reset is not implemented!")
|
raise NotImplemented("reset is not implemented!")
|
||||||
|
|
||||||
if cython_code_available:
|
if use_cython:
|
||||||
Measurement_Callables_Class = state_space_cython.Measurement_Callables_Cython
|
Measurement_Callables_Class = state_space_cython.Measurement_Callables_Cython
|
||||||
else:
|
else:
|
||||||
Measurement_Callables_Class = Measurement_Callables_Python
|
Measurement_Callables_Class = Measurement_Callables_Python
|
||||||
|
|
@ -241,7 +257,7 @@ class R_handling_Python(Measurement_Callables_Class):
|
||||||
|
|
||||||
return inv_square_root
|
return inv_square_root
|
||||||
|
|
||||||
if cython_code_available:
|
if use_cython:
|
||||||
R_handling_Class = state_space_cython.R_handling_Cython
|
R_handling_Class = state_space_cython.R_handling_Cython
|
||||||
else:
|
else:
|
||||||
R_handling_Class = R_handling_Python
|
R_handling_Class = R_handling_Python
|
||||||
|
|
@ -281,7 +297,7 @@ class Std_Measurement_Callables_Python(R_handling_Class):
|
||||||
|
|
||||||
return self.dH # the same dirivative on each iteration
|
return self.dH # the same dirivative on each iteration
|
||||||
|
|
||||||
if cython_code_available:
|
if use_cython:
|
||||||
Std_Measurement_Callables_Class = state_space_cython.Std_Measurement_Callables_Cython
|
Std_Measurement_Callables_Class = state_space_cython.Std_Measurement_Callables_Cython
|
||||||
else:
|
else:
|
||||||
Std_Measurement_Callables_Class = Std_Measurement_Callables_Python
|
Std_Measurement_Callables_Class = Std_Measurement_Callables_Python
|
||||||
|
|
@ -374,7 +390,7 @@ class Q_handling_Python(Dynamic_Callables_Class):
|
||||||
|
|
||||||
return square_root
|
return square_root
|
||||||
|
|
||||||
if cython_code_available:
|
if use_cython:
|
||||||
Q_handling_Class = state_space_cython.Q_handling_Cython
|
Q_handling_Class = state_space_cython.Q_handling_Cython
|
||||||
else:
|
else:
|
||||||
Q_handling_Class = Q_handling_Python
|
Q_handling_Class = Q_handling_Python
|
||||||
|
|
@ -382,7 +398,7 @@ else:
|
||||||
class Std_Dynamic_Callables_Python(Q_handling_Class):
|
class Std_Dynamic_Callables_Python(Q_handling_Class):
|
||||||
|
|
||||||
def __init__(self, A, A_time_var_index, Q, index, Q_time_var_index, unique_Q_number, dA = None, dQ=None):
|
def __init__(self, A, A_time_var_index, Q, index, Q_time_var_index, unique_Q_number, dA = None, dQ=None):
|
||||||
super(Std_Measurement_Callables_Python,self).__init__(Q, index, Q_time_var_index, unique_Q_number,dQ)
|
super(Std_Dynamic_Callables_Python,self).__init__(Q, index, Q_time_var_index, unique_Q_number,dQ)
|
||||||
|
|
||||||
self.A = A
|
self.A = A
|
||||||
self.A_time_var_index = A_time_var_index
|
self.A_time_var_index = A_time_var_index
|
||||||
|
|
@ -414,8 +430,15 @@ class Std_Dynamic_Callables_Python(Q_handling_Class):
|
||||||
raise ValueError("dA derivative is None")
|
raise ValueError("dA derivative is None")
|
||||||
|
|
||||||
return self.dA # the same dirivative on each iteration
|
return self.dA # the same dirivative on each iteration
|
||||||
|
|
||||||
if cython_code_available:
|
def reset(self,compute_derivatives = False):
|
||||||
|
"""
|
||||||
|
Return the state of this object to the beginning of iteration (to k eq. 0)
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
if use_cython:
|
||||||
Std_Dynamic_Callables_Class = state_space_cython.Std_Dynamic_Callables_Cython
|
Std_Dynamic_Callables_Class = state_space_cython.Std_Dynamic_Callables_Cython
|
||||||
else:
|
else:
|
||||||
Std_Dynamic_Callables_Class = Std_Dynamic_Callables_Python
|
Std_Dynamic_Callables_Class = Std_Dynamic_Callables_Python
|
||||||
|
|
@ -460,7 +483,7 @@ class DescreteStateSpaceMeta(type):
|
||||||
After thos method the class object is created
|
After thos method the class object is created
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if cython_code_available:
|
if use_cython:
|
||||||
if '_kalman_prediction_step_SVD' in attributes:
|
if '_kalman_prediction_step_SVD' in attributes:
|
||||||
attributes['_kalman_prediction_step_SVD'] = AddMethodToClass(state_space_cython._kalman_prediction_step_SVD_Cython)
|
attributes['_kalman_prediction_step_SVD'] = AddMethodToClass(state_space_cython._kalman_prediction_step_SVD_Cython)
|
||||||
|
|
||||||
|
|
@ -542,7 +565,8 @@ class DescreteStateSpace(object):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def kalman_filter(cls,p_A, p_Q, p_H, p_R, Y, index = None, m_init=None,
|
def kalman_filter(cls,p_A, p_Q, p_H, p_R, Y, index = None, m_init=None,
|
||||||
P_init=None, calc_log_likelihood=False,
|
P_init=None, p_kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=False,
|
||||||
calc_grad_log_likelihood=False, grad_params_no=None,
|
calc_grad_log_likelihood=False, grad_params_no=None,
|
||||||
grad_calc_params=None):
|
grad_calc_params=None):
|
||||||
"""
|
"""
|
||||||
|
|
@ -670,7 +694,9 @@ class DescreteStateSpace(object):
|
||||||
use in smoother for convenience. They are: 'p_a', 'p_f_A', 'p_f_Q'
|
use in smoother for convenience. They are: 'p_a', 'p_f_A', 'p_f_Q'
|
||||||
The dictionary contains the same fields.
|
The dictionary contains the same fields.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
# Parameters checking ->
|
# Parameters checking ->
|
||||||
# index
|
# index
|
||||||
p_A = np.atleast_1d(p_A)
|
p_A = np.atleast_1d(p_A)
|
||||||
|
|
@ -735,6 +761,9 @@ class DescreteStateSpace(object):
|
||||||
P_init = np.eye(state_dim)
|
P_init = np.eye(state_dim)
|
||||||
elif not isinstance(P_init, collections.Iterable): #scalar
|
elif not isinstance(P_init, collections.Iterable): #scalar
|
||||||
P_init = P_init*np.eye(state_dim)
|
P_init = P_init*np.eye(state_dim)
|
||||||
|
|
||||||
|
if p_kalman_filter_type not in ('regular', 'svd'):
|
||||||
|
raise ValueError("Kalman filer type neither 'regular nor 'svd'.")
|
||||||
|
|
||||||
# Functions to pass to the kalman_filter algorithm:
|
# Functions to pass to the kalman_filter algorithm:
|
||||||
# Parameters:
|
# Parameters:
|
||||||
|
|
@ -745,7 +774,6 @@ class DescreteStateSpace(object):
|
||||||
c_p_Q = p_A.copy() # create a copy because this object is passed to the smoother
|
c_p_Q = p_A.copy() # create a copy because this object is passed to the smoother
|
||||||
c_index = index.copy() # create a copy because this object is passed to the smoother
|
c_index = index.copy() # create a copy because this object is passed to the smoother
|
||||||
|
|
||||||
grad_calc_params_pass_further = None
|
|
||||||
if calc_grad_log_likelihood:
|
if calc_grad_log_likelihood:
|
||||||
if model_matrices_chage_with_time:
|
if model_matrices_chage_with_time:
|
||||||
raise ValueError("When computing likelihood gradient A and Q can not change over time.")
|
raise ValueError("When computing likelihood gradient A and Q can not change over time.")
|
||||||
|
|
@ -757,25 +785,32 @@ class DescreteStateSpace(object):
|
||||||
|
|
||||||
dm_init = grad_calc_params.get('dm_init')
|
dm_init = grad_calc_params.get('dm_init')
|
||||||
if dm_init is None:
|
if dm_init is None:
|
||||||
|
# multiple time series mode. Keep grad_params always as a last dimension
|
||||||
dm_init = np.zeros((state_dim, time_series_no, grad_params_no))
|
dm_init = np.zeros((state_dim, time_series_no, grad_params_no))
|
||||||
|
|
||||||
dP_init = grad_calc_params.get('dP_init')
|
dP_init = grad_calc_params.get('dP_init')
|
||||||
if dP_init is None:
|
if dP_init is None:
|
||||||
dP_init = np.zeros((state_dim,state_dim,grad_params_no))
|
dP_init = np.zeros((state_dim,state_dim,grad_params_no))
|
||||||
|
else:
|
||||||
|
dA = None
|
||||||
|
dQ = None
|
||||||
|
dH = None
|
||||||
|
dR = None
|
||||||
|
dm_init = None
|
||||||
|
dP_init = None
|
||||||
|
|
||||||
grad_calc_params_pass_further = {}
|
|
||||||
grad_calc_params_pass_further['dm_init'] = dm_init
|
|
||||||
grad_calc_params_pass_further['dP_init'] = dP_init
|
|
||||||
|
|
||||||
dynamic_callables = Std_Dynamic_Callables_Class(c_p_A, A_time_var_index, c_p_Q, c_index, Q_time_var_index, 20, dA, dQ)
|
dynamic_callables = Std_Dynamic_Callables_Class(c_p_A, A_time_var_index, c_p_Q, c_index, Q_time_var_index, 20, dA, dQ)
|
||||||
measurement_callables = Std_Measurement_Callables_Class(p_H, H_time_var_index, p_R, index, R_time_var_index, 20, dH, dR)
|
measurement_callables = Std_Measurement_Callables_Class(p_H, H_time_var_index, p_R, index, R_time_var_index, 20, dH, dR)
|
||||||
|
|
||||||
(M, P,log_likelihood, grad_log_likelihood) = cls._kalman_algorithm_raw(state_dim, dynamic_callables,
|
(M, P,log_likelihood, grad_log_likelihood, dynamic_callables) = \
|
||||||
|
cls._kalman_algorithm_raw(state_dim, dynamic_callables,
|
||||||
measurement_callables, Y, m_init,
|
measurement_callables, Y, m_init,
|
||||||
P_init, calc_log_likelihood=calc_log_likelihood,
|
P_init, p_kalman_filter_type = p_kalman_filter_type,
|
||||||
|
calc_log_likelihood=calc_log_likelihood,
|
||||||
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
grad_calc_params=grad_calc_params_pass_further)
|
grad_params_no=grad_params_no,
|
||||||
|
dm_init=dm_init, dP_init=dP_init)
|
||||||
|
|
||||||
# restore shapes so that input parameters are unchenged
|
# restore shapes so that input parameters are unchenged
|
||||||
if old_index_shape is not None:
|
if old_index_shape is not None:
|
||||||
index.shape = old_index_shape
|
index.shape = old_index_shape
|
||||||
|
|
@ -968,8 +1003,10 @@ class DescreteStateSpace(object):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _kalman_algorithm_raw(cls,state_dim, p_dynamic_callables, p_measurement_callables, Y, m_init,
|
def _kalman_algorithm_raw(cls,state_dim, p_dynamic_callables, p_measurement_callables, Y, m_init,
|
||||||
P_init, calc_log_likelihood=False,
|
P_init, p_kalman_filter_type='regular',
|
||||||
calc_grad_log_likelihood=False, grad_calc_params=None):
|
calc_log_likelihood=False,
|
||||||
|
calc_grad_log_likelihood=False, grad_params_no=None,
|
||||||
|
dm_init=None, dP_init=None):
|
||||||
"""
|
"""
|
||||||
General nonlinear filtering algorithm for inference in the state-space
|
General nonlinear filtering algorithm for inference in the state-space
|
||||||
model:
|
model:
|
||||||
|
|
@ -1040,6 +1077,9 @@ class DescreteStateSpace(object):
|
||||||
"multiple time series mode" does not affect it, since it does not
|
"multiple time series mode" does not affect it, since it does not
|
||||||
affect anything related to state variaces.
|
affect anything related to state variaces.
|
||||||
|
|
||||||
|
p_kalman_filter_type: string
|
||||||
|
|
||||||
|
|
||||||
calc_log_likelihood: boolean
|
calc_log_likelihood: boolean
|
||||||
Whether to calculate marginal likelihood of the state-space model.
|
Whether to calculate marginal likelihood of the state-space model.
|
||||||
|
|
||||||
|
|
@ -1081,19 +1121,21 @@ class DescreteStateSpace(object):
|
||||||
steps_no = Y.shape[0] # number of steps in the Kalman Filter
|
steps_no = Y.shape[0] # number of steps in the Kalman Filter
|
||||||
time_series_no = Y.shape[2] # multiple time series mode
|
time_series_no = Y.shape[2] # multiple time series mode
|
||||||
|
|
||||||
if calc_grad_log_likelihood:
|
|
||||||
dm_init = grad_calc_params['dm_init']; dP_init = grad_calc_params['dP_init']
|
|
||||||
else:
|
|
||||||
dm_init = None; dP_init = None
|
|
||||||
|
|
||||||
# Allocate space for results
|
# Allocate space for results
|
||||||
# Mean estimations. Initial values will be included
|
# Mean estimations. Initial values will be included
|
||||||
M = np.empty(((steps_no+1),state_dim,time_series_no))
|
M = np.empty(((steps_no+1),state_dim,time_series_no))
|
||||||
M[0,:,:] = m_init # Initialize mean values
|
M[0,:,:] = m_init # Initialize mean values
|
||||||
# Variance estimations. Initial values will be included
|
# Variance estimations. Initial values will be included
|
||||||
P = np.empty(((steps_no+1),state_dim,state_dim))
|
P = np.empty(((steps_no+1),state_dim,state_dim))
|
||||||
|
P_init = 0.5*( P_init + P_init.T) # symmetrize initial covariance. In some ustable cases this is uiseful
|
||||||
P[0,:,:] = P_init # Initialize initial covariance matrix
|
P[0,:,:] = P_init # Initialize initial covariance matrix
|
||||||
|
|
||||||
|
if p_kalman_filter_type == 'svd':
|
||||||
|
(U,S,Vh) = sp.linalg.svd( P_init,full_matrices=False, compute_uv=True,
|
||||||
|
overwrite_a=False,check_finite=True)
|
||||||
|
S[ (S==0) ] = 1e-17 # allows to run algorithm for singular initial variance
|
||||||
|
P_upd = (P_init, S,U)
|
||||||
|
|
||||||
log_likelihood = 0 if calc_log_likelihood else None
|
log_likelihood = 0 if calc_log_likelihood else None
|
||||||
grad_log_likelihood = 0 if calc_grad_log_likelihood else None
|
grad_log_likelihood = 0 if calc_grad_log_likelihood else None
|
||||||
|
|
||||||
|
|
@ -1107,21 +1149,63 @@ class DescreteStateSpace(object):
|
||||||
|
|
||||||
prev_mean = M[k,:,:] # mean from the previous step
|
prev_mean = M[k,:,:] # mean from the previous step
|
||||||
|
|
||||||
m_pred, P_pred, dm_pred, dP_pred = \
|
if p_kalman_filter_type == 'svd':
|
||||||
cls._kalman_prediction_step(k, prev_mean ,P[k,:,:], p_dynamic_callables,
|
m_pred, P_pred, dm_pred, dP_pred = \
|
||||||
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
cls._kalman_prediction_step_SVD(k, prev_mean ,P_upd, p_dynamic_callables,
|
||||||
p_dm = dm_upd, p_dP = dP_upd)
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
|
p_dm = dm_upd, p_dP = dP_upd)
|
||||||
|
else:
|
||||||
|
m_pred, P_pred, dm_pred, dP_pred = \
|
||||||
|
cls._kalman_prediction_step(k, prev_mean ,P[k,:,:], p_dynamic_callables,
|
||||||
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
|
p_dm = dm_upd, p_dP = dP_upd )
|
||||||
|
|
||||||
k_measurment = Y[k,:,:]
|
k_measurment = Y[k,:,:]
|
||||||
|
|
||||||
if np.any(np.isnan(k_measurment)):
|
if (np.any(np.isnan(k_measurment)) == False):
|
||||||
raise ValueError("Nan measurements are currently not supported")
|
if p_kalman_filter_type == 'svd':
|
||||||
|
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
||||||
|
cls._kalman_update_step_SVD(k, m_pred , P_pred, p_measurement_callables,
|
||||||
|
k_measurment, calc_log_likelihood=calc_log_likelihood,
|
||||||
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
|
p_dm = dm_pred, p_dP = dP_pred )
|
||||||
|
|
||||||
|
|
||||||
|
# m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
||||||
|
# cls._kalman_update_step(k, m_pred , P_pred[0], f_h, f_H, p_R.f_R, k_measurment,
|
||||||
|
# calc_log_likelihood=calc_log_likelihood,
|
||||||
|
# calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
|
# p_dm = dm_pred, p_dP = dP_pred, grad_calc_params_2 = (dH, dR))
|
||||||
|
#
|
||||||
|
# (U,S,Vh) = sp.linalg.svd( P_upd,full_matrices=False, compute_uv=True,
|
||||||
|
# overwrite_a=False,check_finite=True)
|
||||||
|
# P_upd = (P_upd, S,U)
|
||||||
|
else:
|
||||||
|
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
||||||
|
cls._kalman_update_step(k, m_pred , P_pred, p_measurement_callables, k_measurment,
|
||||||
|
calc_log_likelihood=calc_log_likelihood,
|
||||||
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
|
p_dm = dm_pred, p_dP = dP_pred )
|
||||||
|
|
||||||
|
else:
|
||||||
|
# if k_measurment.shape != (1,1):
|
||||||
|
# raise ValueError("Nan measurements are currently not supported for \
|
||||||
|
# multidimensional output and multiple time series.")
|
||||||
|
# else:
|
||||||
|
# m_upd = m_pred; P_upd = P_pred; dm_upd = dm_pred; dP_upd = dP_pred
|
||||||
|
# log_likelihood_update = 0.0;
|
||||||
|
# d_log_likelihood_update = 0.0;
|
||||||
|
|
||||||
|
if not np.all(np.isnan(k_measurment)):
|
||||||
|
raise ValueError("""Nan measurements are currently not supported if
|
||||||
|
they are intermixed with not NaN measurements""")
|
||||||
|
else:
|
||||||
|
m_upd = m_pred; P_upd = P_pred; dm_upd = dm_pred; dP_upd = dP_pred
|
||||||
|
if calc_log_likelihood:
|
||||||
|
log_likelihood_update = np.zeros((time_series_no,))
|
||||||
|
if calc_grad_log_likelihood:
|
||||||
|
d_log_likelihood_update = np.zeros((grad_params_no,time_series_no))
|
||||||
|
|
||||||
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
|
||||||
cls._kalman_update_step(k, m_pred , P_pred, p_measurement_callables, k_measurment,
|
|
||||||
calc_log_likelihood=calc_log_likelihood,
|
|
||||||
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
|
||||||
p_dm = dm_pred, p_dP = dP_pred)
|
|
||||||
|
|
||||||
if calc_log_likelihood:
|
if calc_log_likelihood:
|
||||||
log_likelihood += log_likelihood_update
|
log_likelihood += log_likelihood_update
|
||||||
|
|
@ -1131,7 +1215,10 @@ class DescreteStateSpace(object):
|
||||||
|
|
||||||
M[k+1,:,:] = m_upd # separate mean value for each time series
|
M[k+1,:,:] = m_upd # separate mean value for each time series
|
||||||
|
|
||||||
P[k+1,:,:] = P_upd
|
if p_kalman_filter_type == 'svd':
|
||||||
|
P[k+1,:,:] = P_upd[0]
|
||||||
|
else:
|
||||||
|
P[k+1,:,:] = P_upd
|
||||||
|
|
||||||
# !!!Print statistics! Print sizes of matrices
|
# !!!Print statistics! Print sizes of matrices
|
||||||
# !!!Print statistics! Print iteration time base on another boolean variable
|
# !!!Print statistics! Print iteration time base on another boolean variable
|
||||||
|
|
@ -1372,6 +1459,7 @@ class DescreteStateSpace(object):
|
||||||
adds extra columns to the gradient.
|
adds extra columns to the gradient.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
m_pred = p_m # from prediction step
|
m_pred = p_m # from prediction step
|
||||||
P_pred = p_P # from prediction step
|
P_pred = p_P # from prediction step
|
||||||
|
|
@ -1596,19 +1684,27 @@ class DescreteStateSpace(object):
|
||||||
S = H.dot(P_pred).dot(H.T) + R
|
S = H.dot(P_pred).dot(H.T) + R
|
||||||
if measurement.shape[0]==1: # measurements are one dimensional
|
if measurement.shape[0]==1: # measurements are one dimensional
|
||||||
if (S < 0):
|
if (S < 0):
|
||||||
raise ValueError("Kalman Filter Update SVD: S is negative step %i" % k )
|
raise ValueError("Kalman Filter Update: S is negative step %i" % k )
|
||||||
#import pdb; pdb.set_trace()
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
K = P_pred.dot(H.T) / S
|
K = P_pred.dot(H.T) / S
|
||||||
if calc_log_likelihood:
|
if calc_log_likelihood:
|
||||||
log_likelihood_update = -0.5 * ( np.log(2*np.pi) + np.log(S) +
|
log_likelihood_update = -0.5 * ( np.log(2*np.pi) + np.log(S) +
|
||||||
v*v / S)
|
v*v / S)
|
||||||
|
#log_likelihood_update = log_likelihood_update[0,0] # to make int
|
||||||
if np.any(np.isnan(log_likelihood_update)): # some member in P_pred is None.
|
if np.any(np.isnan(log_likelihood_update)): # some member in P_pred is None.
|
||||||
raise ValueError("Nan values in likelihood update!")
|
raise ValueError("Nan values in likelihood update!")
|
||||||
LL = None; islower = None
|
LL = None; islower = None
|
||||||
else:
|
else:
|
||||||
raise ValueError("""Measurement dimension larger then 1 is currently not supported""")
|
LL,islower = linalg.cho_factor(S)
|
||||||
|
K = linalg.cho_solve((LL,islower), H.dot(P_pred.T)).T
|
||||||
|
|
||||||
|
if calc_log_likelihood:
|
||||||
|
log_likelihood_update = -0.5 * ( v.shape[0]*np.log(2*np.pi) +
|
||||||
|
2*np.sum( np.log(np.diag(LL)) ) +\
|
||||||
|
np.sum((linalg.cho_solve((LL,islower),v)) * v, axis = 0) ) # diagonal of v.T*S^{-1}*v
|
||||||
|
|
||||||
|
|
||||||
# Old method of computing updated covariance (for testing) ->
|
# Old method of computing updated covariance (for testing) ->
|
||||||
#P_upd_tst = K.dot(S).dot(K.T)
|
#P_upd_tst = K.dot(S).dot(K.T)
|
||||||
#P_upd_tst = 0.5*(P_upd_tst + P_upd_tst.T)
|
#P_upd_tst = 0.5*(P_upd_tst + P_upd_tst.T)
|
||||||
|
|
@ -1816,7 +1912,7 @@ class DescreteStateSpace(object):
|
||||||
p_m ,filter_covars[k,:,:],
|
p_m ,filter_covars[k,:,:],
|
||||||
m_pred, P_pred, p_m_prev_step ,P[k+1,:,:], p_dynamic_callables)
|
m_pred, P_pred, p_m_prev_step ,P[k+1,:,:], p_dynamic_callables)
|
||||||
|
|
||||||
M[k,:] = np.squeeze(m_upd)
|
M[k,:] = m_upd#np.squeeze(m_upd)
|
||||||
P[k,:,:] = P_upd
|
P[k,:,:] = P_upd
|
||||||
#G[k,:,:] = G_upd.T # store transposed G.
|
#G[k,:,:] = G_upd.T # store transposed G.
|
||||||
# Return values
|
# Return values
|
||||||
|
|
@ -2545,13 +2641,10 @@ class ContDescrStateSpace(DescreteStateSpace):
|
||||||
raise ValueError("Only one dimensional X data is supported.")
|
raise ValueError("Only one dimensional X data is supported.")
|
||||||
|
|
||||||
Y.shape, old_Y_shape = cls._reshape_input_data(Y.shape) # represent as column
|
Y.shape, old_Y_shape = cls._reshape_input_data(Y.shape) # represent as column
|
||||||
|
|
||||||
state_dim = F.shape[0]
|
state_dim = F.shape[0]
|
||||||
measurement_dim = Y.shape[1]
|
measurement_dim = Y.shape[1]
|
||||||
|
time_series_no = Y.shape[2] # multiple time series mode
|
||||||
if len(Y.shape) == 2:
|
|
||||||
time_series_no = 1 # regular case
|
|
||||||
elif len(Y.shape) == 3:
|
|
||||||
time_series_no = Y.shape[2] # multiple time series mode
|
|
||||||
|
|
||||||
if ((len(p_H.shape) == 3) and (len(p_H.shape[2]) != 1)) or\
|
if ((len(p_H.shape) == 3) and (len(p_H.shape[2]) != 1)) or\
|
||||||
((len(p_R.shape) == 3) and (len(p_R.shape[2]) != 1)):
|
((len(p_R.shape) == 3) and (len(p_R.shape[2]) != 1)):
|
||||||
|
|
@ -2592,8 +2685,6 @@ class ContDescrStateSpace(DescreteStateSpace):
|
||||||
if P_init is None:
|
if P_init is None:
|
||||||
P_init = P_inf.copy()
|
P_init = P_inf.copy()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if p_kalman_filter_type not in ('regular', 'svd'):
|
if p_kalman_filter_type not in ('regular', 'svd'):
|
||||||
raise ValueError("Kalman filer type neither 'regular nor 'svd'.")
|
raise ValueError("Kalman filer type neither 'regular nor 'svd'.")
|
||||||
|
|
||||||
|
|
@ -2608,8 +2699,8 @@ class ContDescrStateSpace(DescreteStateSpace):
|
||||||
|
|
||||||
if calc_grad_log_likelihood:
|
if calc_grad_log_likelihood:
|
||||||
|
|
||||||
dF = cls._check_grad_state_matrices( grad_calc_params.get('dF'), state_dim, grad_params_no, which = 'dA')
|
dF = cls._check_grad_state_matrices(grad_calc_params.get('dF'), state_dim, grad_params_no, which = 'dA')
|
||||||
dQc = cls._check_grad_state_matrices( grad_calc_params.get('dQc'), state_dim, grad_params_no, which = 'dQ')
|
dQc = cls._check_grad_state_matrices(grad_calc_params.get('dQc'), state_dim, grad_params_no, which = 'dQ')
|
||||||
dP_inf = cls._check_grad_state_matrices(grad_calc_params.get('dP_inf'), state_dim, grad_params_no, which = 'dA')
|
dP_inf = cls._check_grad_state_matrices(grad_calc_params.get('dP_inf'), state_dim, grad_params_no, which = 'dA')
|
||||||
|
|
||||||
dH = cls._check_grad_measurement_matrices(grad_calc_params.get('dH'), state_dim, grad_params_no, measurement_dim, which = 'dH')
|
dH = cls._check_grad_measurement_matrices(grad_calc_params.get('dH'), state_dim, grad_params_no, measurement_dim, which = 'dH')
|
||||||
|
|
@ -2670,10 +2761,11 @@ class ContDescrStateSpace(DescreteStateSpace):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _cont_discr_kalman_filter_raw(cls,state_dim, p_dynamic_callables, p_measurement_callables, X, Y,
|
def _cont_discr_kalman_filter_raw(cls,state_dim, p_dynamic_callables, p_measurement_callables, X, Y,
|
||||||
m_init=None, P_init=None,
|
m_init, P_init,
|
||||||
p_kalman_filter_type='regular',
|
p_kalman_filter_type='regular',
|
||||||
calc_log_likelihood=False,
|
calc_log_likelihood=False,
|
||||||
calc_grad_log_likelihood=False, grad_params_no=None, dm_init=None, dP_init=None):
|
calc_grad_log_likelihood=False, grad_params_no=None,
|
||||||
|
dm_init=None, dP_init=None):
|
||||||
"""
|
"""
|
||||||
General filtering algorithm for inference in the continuos-discrete
|
General filtering algorithm for inference in the continuos-discrete
|
||||||
state-space model:
|
state-space model:
|
||||||
|
|
@ -2772,7 +2864,7 @@ class ContDescrStateSpace(DescreteStateSpace):
|
||||||
P_init = 0.5*( P_init + P_init.T) # symmetrize initial covariance. In some ustable cases this is uiseful
|
P_init = 0.5*( P_init + P_init.T) # symmetrize initial covariance. In some ustable cases this is uiseful
|
||||||
P[0,:,:] = P_init # Initialize initial covariance matrix
|
P[0,:,:] = P_init # Initialize initial covariance matrix
|
||||||
|
|
||||||
#import pdb; pdb.set_trace()
|
#import pdb;pdb.set_trace()
|
||||||
if p_kalman_filter_type == 'svd':
|
if p_kalman_filter_type == 'svd':
|
||||||
(U,S,Vh) = sp.linalg.svd( P_init,full_matrices=False, compute_uv=True,
|
(U,S,Vh) = sp.linalg.svd( P_init,full_matrices=False, compute_uv=True,
|
||||||
overwrite_a=False,check_finite=True)
|
overwrite_a=False,check_finite=True)
|
||||||
|
|
@ -2798,43 +2890,51 @@ class ContDescrStateSpace(DescreteStateSpace):
|
||||||
m_pred, P_pred, dm_pred, dP_pred = \
|
m_pred, P_pred, dm_pred, dP_pred = \
|
||||||
cls._kalman_prediction_step_SVD(k, prev_mean ,P_upd, p_dynamic_callables,
|
cls._kalman_prediction_step_SVD(k, prev_mean ,P_upd, p_dynamic_callables,
|
||||||
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
p_dm = dm_upd, p_dP = dP_upd )
|
p_dm = dm_upd, p_dP = dP_upd)
|
||||||
else:
|
else:
|
||||||
m_pred, P_pred, dm_pred, dP_pred = \
|
m_pred, P_pred, dm_pred, dP_pred = \
|
||||||
cls._kalman_prediction_step(k, M[k,:] ,P[k,:,:], p_dynamic_callables,
|
cls._kalman_prediction_step(k, prev_mean ,P[k,:,:], p_dynamic_callables,
|
||||||
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
p_dm = dm_upd, p_dP = dP_upd )
|
p_dm = dm_upd, p_dP = dP_upd )
|
||||||
|
|
||||||
#import pdb; pdb.set_trace()
|
#import pdb; pdb.set_trace()
|
||||||
k_measurment = Y[k,:,:]
|
k_measurment = Y[k,:,:]
|
||||||
|
|
||||||
if np.any(np.isnan(k_measurment)):
|
if (np.any(np.isnan(k_measurment)) == False):
|
||||||
raise ValueError("Nan measurements are currently not supported")
|
|
||||||
|
|
||||||
if p_kalman_filter_type == 'svd':
|
if p_kalman_filter_type == 'svd':
|
||||||
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
||||||
cls._kalman_update_step_SVD(k, m_pred , P_pred, p_measurement_callables,
|
cls._kalman_update_step_SVD(k, m_pred , P_pred, p_measurement_callables,
|
||||||
k_measurment, calc_log_likelihood=calc_log_likelihood,
|
k_measurment, calc_log_likelihood=calc_log_likelihood,
|
||||||
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
p_dm = dm_pred, p_dP = dP_pred )
|
p_dm = dm_pred, p_dP = dP_pred )
|
||||||
|
|
||||||
|
|
||||||
# m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
# m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
||||||
# cls._kalman_update_step(k, m_pred , P_pred[0], f_h, f_H, p_R.f_R, k_measurment,
|
# cls._kalman_update_step(k, m_pred , P_pred[0], f_h, f_H, p_R.f_R, k_measurment,
|
||||||
# calc_log_likelihood=calc_log_likelihood,
|
# calc_log_likelihood=calc_log_likelihood,
|
||||||
# calc_grad_log_likelihood=calc_grad_log_likelihood,
|
# calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
# p_dm = dm_pred, p_dP = dP_pred, grad_calc_params_2 = (dH, dR))
|
# p_dm = dm_pred, p_dP = dP_pred, grad_calc_params_2 = (dH, dR))
|
||||||
#
|
#
|
||||||
# (U,S,Vh) = sp.linalg.svd( P_upd,full_matrices=False, compute_uv=True,
|
# (U,S,Vh) = sp.linalg.svd( P_upd,full_matrices=False, compute_uv=True,
|
||||||
# overwrite_a=False,check_finite=True)
|
# overwrite_a=False,check_finite=True)
|
||||||
# P_upd = (P_upd, S,U)
|
# P_upd = (P_upd, S,U)
|
||||||
|
else:
|
||||||
|
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
||||||
|
cls._kalman_update_step(k, m_pred , P_pred, p_measurement_callables, k_measurment,
|
||||||
|
calc_log_likelihood=calc_log_likelihood,
|
||||||
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
|
p_dm = dm_pred, p_dP = dP_pred )
|
||||||
else:
|
else:
|
||||||
m_upd, P_upd, log_likelihood_update, dm_upd, dP_upd, d_log_likelihood_update = \
|
if k_measurment.shape != (1,1):
|
||||||
cls._kalman_update_step(k, m_pred , P_pred, p_measurement_callables, k_measurment,
|
raise ValueError("Nan measurements are currently not supported for \
|
||||||
calc_log_likelihood=calc_log_likelihood,
|
multidimensional output and multiple tiem series.")
|
||||||
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
else:
|
||||||
p_dm = dm_pred, p_dP = dP_pred )
|
m_upd = m_pred; P_upd = P_pred; dm_upd = dm_pred; dP_upd = dP_pred
|
||||||
|
log_likelihood_update = 0.0;
|
||||||
|
d_log_likelihood_update = 0.0;
|
||||||
|
|
||||||
|
|
||||||
if calc_log_likelihood:
|
if calc_log_likelihood:
|
||||||
log_likelihood += log_likelihood_update
|
log_likelihood += log_likelihood_update
|
||||||
|
|
||||||
|
|
@ -2882,7 +2982,7 @@ class ContDescrStateSpace(DescreteStateSpace):
|
||||||
A, Q, dA, dQ fro discrete model from the continuos model.
|
A, Q, dA, dQ fro discrete model from the continuos model.
|
||||||
|
|
||||||
X, F, L, Qc: matrices
|
X, F, L, Qc: matrices
|
||||||
If AQcomp thiese matrices are used to create this object from scratch.
|
If AQcomp is None, these matrices are used to create this object from scratch.
|
||||||
|
|
||||||
Output:
|
Output:
|
||||||
-------------
|
-------------
|
||||||
|
|
@ -2973,7 +3073,7 @@ class ContDescrStateSpace(DescreteStateSpace):
|
||||||
number_unique_indices = len(unique_indices)
|
number_unique_indices = len(unique_indices)
|
||||||
|
|
||||||
#import pdb; pdb.set_trace()
|
#import pdb; pdb.set_trace()
|
||||||
if cython_code_available:
|
if use_cython:
|
||||||
class AQcompute_batch(state_space_cython.AQcompute_batch_Cython):
|
class AQcompute_batch(state_space_cython.AQcompute_batch_Cython):
|
||||||
def __init__(self, F,L,Qc,dt,compute_derivatives=False, grad_params_no=None, P_inf=None, dP_inf=None, dF = None, dQc=None):
|
def __init__(self, F,L,Qc,dt,compute_derivatives=False, grad_params_no=None, P_inf=None, dP_inf=None, dF = None, dQc=None):
|
||||||
As, Qs, reconstruct_indices, dAs, dQs = ContDescrStateSpace.lti_sde_to_descrete(F,
|
As, Qs, reconstruct_indices, dAs, dQs = ContDescrStateSpace.lti_sde_to_descrete(F,
|
||||||
|
|
|
||||||
|
|
@ -29,9 +29,10 @@ import GPy
|
||||||
from .. import likelihoods
|
from .. import likelihoods
|
||||||
|
|
||||||
from . import state_space_main as ssm
|
from . import state_space_main as ssm
|
||||||
|
from . import state_space_setup as ss_setup
|
||||||
|
|
||||||
class StateSpace(Model):
|
class StateSpace(Model):
|
||||||
def __init__(self, X, Y, kernel=None, noise_var=1.0, kalman_filter_type = 'regular', name='StateSpace'):
|
def __init__(self, X, Y, kernel=None, noise_var=1.0, kalman_filter_type = 'regular', use_cython = False, name='StateSpace'):
|
||||||
super(StateSpace, self).__init__(name=name)
|
super(StateSpace, self).__init__(name=name)
|
||||||
self.num_data, input_dim = X.shape
|
self.num_data, input_dim = X.shape
|
||||||
assert input_dim==1, "State space methods for time only"
|
assert input_dim==1, "State space methods for time only"
|
||||||
|
|
@ -43,9 +44,15 @@ class StateSpace(Model):
|
||||||
assert self.output_dim == 1, "State space methods for single outputs only"
|
assert self.output_dim == 1, "State space methods for single outputs only"
|
||||||
|
|
||||||
self.kalman_filter_type = kalman_filter_type
|
self.kalman_filter_type = kalman_filter_type
|
||||||
self.kalman_filter_type = 'svd' # temp test
|
#self.kalman_filter_type = 'svd' # temp test
|
||||||
|
ss_setup.use_cython = use_cython
|
||||||
|
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
|
global ssm
|
||||||
|
#from . import state_space_main as ssm
|
||||||
|
if (ssm.cython_code_available) and (ssm.use_cython != ss_setup.use_cython):
|
||||||
|
reload(ssm)
|
||||||
# Make sure the observations are ordered in time
|
# Make sure the observations are ordered in time
|
||||||
sort_index = np.argsort(X[:,0])
|
sort_index = np.argsort(X[:,0])
|
||||||
self.X = X[sort_index]
|
self.X = X[sort_index]
|
||||||
|
|
@ -73,7 +80,7 @@ class StateSpace(Model):
|
||||||
Parameters have now changed
|
Parameters have now changed
|
||||||
"""
|
"""
|
||||||
np.set_printoptions(16)
|
np.set_printoptions(16)
|
||||||
print(self.param_array)
|
#print(self.param_array)
|
||||||
#import pdb; pdb.set_trace()
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
# Get the model matrices from the kernel
|
# Get the model matrices from the kernel
|
||||||
|
|
@ -111,6 +118,10 @@ class StateSpace(Model):
|
||||||
grad_calc_params['dP_init'] = dP0
|
grad_calc_params['dP_init'] = dP0
|
||||||
|
|
||||||
kalman_filter_type = self.kalman_filter_type
|
kalman_filter_type = self.kalman_filter_type
|
||||||
|
|
||||||
|
# if ss_use_cython:
|
||||||
|
# reload(ssm)
|
||||||
|
# from . import state_space_main as ssm
|
||||||
|
|
||||||
(filter_means, filter_covs, log_likelihood,
|
(filter_means, filter_covs, log_likelihood,
|
||||||
grad_log_likelihood,SmootherMatrObject) = ssm.ContDescrStateSpace.cont_discr_kalman_filter(F,L,Qc,H,
|
grad_log_likelihood,SmootherMatrObject) = ssm.ContDescrStateSpace.cont_discr_kalman_filter(F,L,Qc,H,
|
||||||
|
|
@ -123,10 +134,12 @@ class StateSpace(Model):
|
||||||
#import pdb; pdb.set_trace()
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
if np.any( np.isfinite(log_likelihood) == False):
|
if np.any( np.isfinite(log_likelihood) == False):
|
||||||
import pdb; pdb.set_trace()
|
#import pdb; pdb.set_trace()
|
||||||
|
print("State-Space: NaN valkues in the log_likelihood")
|
||||||
|
|
||||||
if np.any( np.isfinite(grad_log_likelihood) == False):
|
if np.any( np.isfinite(grad_log_likelihood) == False):
|
||||||
import pdb; pdb.set_trace()
|
#import pdb; pdb.set_trace()
|
||||||
|
print("State-Space: NaN valkues in the grad_log_likelihood")
|
||||||
#print(grad_log_likelihood)
|
#print(grad_log_likelihood)
|
||||||
|
|
||||||
grad_log_likelihood_sum = np.sum(grad_log_likelihood,axis=1)
|
grad_log_likelihood_sum = np.sum(grad_log_likelihood,axis=1)
|
||||||
|
|
@ -191,16 +204,14 @@ class StateSpace(Model):
|
||||||
(F,L,Qc,H,P_inf, P0, dF,dQc,dP_inf,dP0) = self.kern.sde()
|
(F,L,Qc,H,P_inf, P0, dF,dQc,dP_inf,dP0) = self.kern.sde()
|
||||||
state_dim = F.shape[0]
|
state_dim = F.shape[0]
|
||||||
|
|
||||||
#import pdb; pdb.set_trace()
|
|
||||||
#Y = self.Y[:, 0,0]
|
#Y = self.Y[:, 0,0]
|
||||||
# Run the Kalman filter
|
# Run the Kalman filter
|
||||||
#import pdb; pdb.set_trace()
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
kalman_filter_type = self.kalman_filter_type
|
kalman_filter_type = self.kalman_filter_type
|
||||||
|
|
||||||
(M, P, log_likelihood,
|
(M, P, log_likelihood,
|
||||||
grad_log_likelihood,SmootherMatrObject) = ssm.ContDescrStateSpace.cont_discr_kalman_filter(
|
grad_log_likelihood,SmootherMatrObject) = ssm.ContDescrStateSpace.cont_discr_kalman_filter(
|
||||||
F,L,Qc,H,float(self.Gaussian_noise.variance),P_inf,self.X,Y,m_init=None,
|
F,L,Qc,H,float(self.Gaussian_noise.variance),P_inf,X,Y,m_init=None,
|
||||||
P_init=P0, p_kalman_filter_type = kalman_filter_type,
|
P_init=P0, p_kalman_filter_type = kalman_filter_type,
|
||||||
calc_log_likelihood=False,
|
calc_log_likelihood=False,
|
||||||
calc_grad_log_likelihood=False)
|
calc_grad_log_likelihood=False)
|
||||||
8
GPy/models/state_space_setup.py
Normal file
8
GPy/models/state_space_setup.py
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
This module is intended for the setup of state_space_main module.
|
||||||
|
The need of this module appeared because of the way state_space_main module
|
||||||
|
connected with cython code.
|
||||||
|
"""
|
||||||
|
|
||||||
|
use_cython = False
|
||||||
330
GPy/testing/gpy_kernels_state_space_tests.py
Normal file
330
GPy/testing/gpy_kernels_state_space_tests.py
Normal file
|
|
@ -0,0 +1,330 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Testing state space related functions.
|
||||||
|
"""
|
||||||
|
import unittest
|
||||||
|
import numpy as np
|
||||||
|
import GPy
|
||||||
|
import GPy.models.state_space_model as SS_model
|
||||||
|
from state_space_main_tests import generate_x_points, generate_sine_data, \
|
||||||
|
generate_linear_data, generate_brownian_data, generate_linear_plus_sin
|
||||||
|
|
||||||
|
|
||||||
|
class StateSpaceKernelsTests(np.testing.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run_for_model(self, X, Y, ss_kernel, kalman_filter_type = 'regular',
|
||||||
|
use_cython=False, check_gradients=True,
|
||||||
|
optimize = True, predict_X=None,
|
||||||
|
compare_with_GP=True, gp_kernel=None,
|
||||||
|
mean_compare_decimal=10, var_compare_decimal=7):
|
||||||
|
|
||||||
|
m1 = SS_model.StateSpace(X,Y, ss_kernel,
|
||||||
|
kalman_filter_type=kalman_filter_type,
|
||||||
|
use_cython=use_cython)
|
||||||
|
|
||||||
|
if check_gradients:
|
||||||
|
self.assertTrue(m1.checkgrad())
|
||||||
|
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
|
if optimize:
|
||||||
|
m1.optimize(optimizer='bfgs')
|
||||||
|
|
||||||
|
if compare_with_GP and (predict_X is None):
|
||||||
|
predict_X = X
|
||||||
|
|
||||||
|
if (predict_X is not None):
|
||||||
|
x_pred_reg_1 = m1.predict(predict_X)
|
||||||
|
x_quant_reg_1 = m1.predict_quantiles(predict_X)
|
||||||
|
|
||||||
|
if compare_with_GP:
|
||||||
|
m2 = GPy.models.GPRegression(X,Y, gp_kernel)
|
||||||
|
m2.optimize(optimizer='bfgs')
|
||||||
|
#print(m2)
|
||||||
|
|
||||||
|
x_pred_reg_2 = m2.predict(predict_X)
|
||||||
|
x_quant_reg_2 = m2.predict_quantiles(predict_X)
|
||||||
|
|
||||||
|
# Test values
|
||||||
|
#print np.max(np.abs(x_pred_reg_1[0]-x_pred_reg_2[0]))
|
||||||
|
np.testing.assert_almost_equal(np.max(np.abs(x_pred_reg_1[0]- \
|
||||||
|
x_pred_reg_2[0])), 0, decimal=mean_compare_decimal)
|
||||||
|
|
||||||
|
# Test variances
|
||||||
|
#print np.max(np.abs(x_pred_reg_1[1]-x_pred_reg_2[1]))
|
||||||
|
|
||||||
|
np.testing.assert_almost_equal(np.max(np.abs(x_pred_reg_1[1]- \
|
||||||
|
x_pred_reg_2[1])), 0, decimal=var_compare_decimal)
|
||||||
|
|
||||||
|
def test_Matern32_kernel(self,):
|
||||||
|
np.random.seed(234) # seed the random number generator
|
||||||
|
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
ss_kernel = GPy.kern.sde_Matern32(1,active_dims=[0,])
|
||||||
|
gp_kernel = GPy.kern.Matern32(1,active_dims=[0,])
|
||||||
|
|
||||||
|
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
compare_with_GP=True,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=10, var_compare_decimal=7)
|
||||||
|
|
||||||
|
def test_Matern52_kernel(self,):
|
||||||
|
np.random.seed(234) # seed the random number generator
|
||||||
|
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
ss_kernel = GPy.kern.sde_Matern52(1,active_dims=[0,])
|
||||||
|
gp_kernel = GPy.kern.Matern52(1,active_dims=[0,])
|
||||||
|
|
||||||
|
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
|
||||||
|
optimize = True, predict_X=X,
|
||||||
|
compare_with_GP=True, gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=8, var_compare_decimal=7)
|
||||||
|
|
||||||
|
def test_RBF_kernel(self,):
|
||||||
|
np.random.seed(234) # seed the random number generator
|
||||||
|
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
ss_kernel = GPy.kern.sde_RBF(1,active_dims=[0,])
|
||||||
|
gp_kernel = GPy.kern.RBF(1,active_dims=[0,])
|
||||||
|
|
||||||
|
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=1, var_compare_decimal=1)
|
||||||
|
|
||||||
|
def test_periodic_kernel(self,):
|
||||||
|
np.random.seed(322) # seed the random number generator
|
||||||
|
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
ss_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
|
||||||
|
ss_kernel.lengthscales.constrain_bounded(0.25, 1000)
|
||||||
|
ss_kernel.wavelengths.constrain_bounded(0.15, 100)
|
||||||
|
|
||||||
|
gp_kernel = GPy.kern.StdPeriodic(1,active_dims=[0,])
|
||||||
|
gp_kernel.lengthscales.constrain_bounded(0.25, 1000)
|
||||||
|
gp_kernel.wavelengths.constrain_bounded(0.15, 100)
|
||||||
|
|
||||||
|
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=4, var_compare_decimal=4)
|
||||||
|
|
||||||
|
def test_quasi_periodic_kernel(self,):
|
||||||
|
np.random.seed(329) # seed the random number generator
|
||||||
|
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
ss_kernel = GPy.kern.sde_Matern32(1)*GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
|
||||||
|
ss_kernel.std_periodic.lengthscales.constrain_bounded(0.25, 1000)
|
||||||
|
ss_kernel.std_periodic.wavelengths.constrain_bounded(0.15, 100)
|
||||||
|
|
||||||
|
gp_kernel = GPy.kern.Matern32(1)*GPy.kern.StdPeriodic(1,active_dims=[0,])
|
||||||
|
gp_kernel.std_periodic.lengthscales.constrain_bounded(0.25, 1000)
|
||||||
|
gp_kernel.std_periodic.wavelengths.constrain_bounded(0.15, 100)
|
||||||
|
|
||||||
|
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=1, var_compare_decimal=2)
|
||||||
|
|
||||||
|
def test_linear_kernel(self,):
|
||||||
|
|
||||||
|
np.random.seed(234) # seed the random number generator
|
||||||
|
(X,Y) = generate_linear_data(x_points=None, tangent=2.0, add_term=20.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
ss_kernel = GPy.kern.sde_Linear(1,X,active_dims=[0,]) + GPy.kern.sde_Bias(1, active_dims=[0,])
|
||||||
|
gp_kernel = GPy.kern.Linear(1, active_dims=[0,]) + GPy.kern.Bias(1, active_dims=[0,])
|
||||||
|
|
||||||
|
self.run_for_model(X, Y, ss_kernel, check_gradients= False,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=5, var_compare_decimal=5)
|
||||||
|
|
||||||
|
def test_brownian_kernel(self,):
|
||||||
|
np.random.seed(234) # seed the random number generator
|
||||||
|
(X,Y) = generate_brownian_data(x_points=None, kernel_var=2.0, noise_var = 0.1,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
ss_kernel = GPy.kern.sde_Brownian()
|
||||||
|
gp_kernel = GPy.kern.Brownian()
|
||||||
|
|
||||||
|
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=10, var_compare_decimal=7)
|
||||||
|
|
||||||
|
def test_exponential_kernel(self,):
|
||||||
|
np.random.seed(234) # seed the random number generator
|
||||||
|
(X,Y) = generate_linear_data(x_points=None, tangent=1.0, add_term=20.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
ss_kernel = GPy.kern.sde_Exponential(1, active_dims=[0,])
|
||||||
|
gp_kernel = GPy.kern.Exponential(1, active_dims=[0,])
|
||||||
|
|
||||||
|
self.run_for_model(X, Y, ss_kernel, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=5, var_compare_decimal=6)
|
||||||
|
|
||||||
|
def test_kernel_addition(self,):
|
||||||
|
np.random.seed(329) # seed the random number generator
|
||||||
|
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
def get_new_kernels():
|
||||||
|
ss_kernel = GPy.kern.sde_Matern32(1) + GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
|
||||||
|
ss_kernel.std_periodic.lengthscales.constrain_bounded(0.25, 1000)
|
||||||
|
ss_kernel.std_periodic.wavelengths.constrain_bounded(0.15, 100)
|
||||||
|
|
||||||
|
gp_kernel = GPy.kern.Matern32(1) + GPy.kern.StdPeriodic(1,active_dims=[0,])
|
||||||
|
gp_kernel.std_periodic.lengthscales.constrain_bounded(0.25, 1000)
|
||||||
|
gp_kernel.std_periodic.wavelengths.constrain_bounded(0.15, 100)
|
||||||
|
|
||||||
|
return ss_kernel, gp_kernel
|
||||||
|
|
||||||
|
# Cython is available only with svd.
|
||||||
|
ss_kernel, gp_kernel = get_new_kernels()
|
||||||
|
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'svd',
|
||||||
|
use_cython=True, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=0, var_compare_decimal=-1)
|
||||||
|
|
||||||
|
ss_kernel, gp_kernel = get_new_kernels()
|
||||||
|
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'regular',
|
||||||
|
use_cython=False, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=4, var_compare_decimal=3)
|
||||||
|
|
||||||
|
ss_kernel, gp_kernel = get_new_kernels()
|
||||||
|
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'svd',
|
||||||
|
use_cython=False, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=0, var_compare_decimal=-1)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_kernel_multiplication(self,):
|
||||||
|
np.random.seed(329) # seed the random number generator
|
||||||
|
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=10.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=50, x_interval = (0, 20), random=True)
|
||||||
|
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
|
||||||
|
def get_new_kernels():
|
||||||
|
ss_kernel = GPy.kern.sde_Matern32(1)*GPy.kern.sde_Matern52(1)
|
||||||
|
gp_kernel = GPy.kern.Matern32(1)*GPy.kern.sde_Matern52(1)
|
||||||
|
|
||||||
|
return ss_kernel, gp_kernel
|
||||||
|
|
||||||
|
ss_kernel, gp_kernel = get_new_kernels()
|
||||||
|
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'svd',
|
||||||
|
use_cython=True, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=-1, var_compare_decimal=0)
|
||||||
|
|
||||||
|
ss_kernel, gp_kernel = get_new_kernels()
|
||||||
|
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'regular',
|
||||||
|
use_cython=False, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=-1, var_compare_decimal=0)
|
||||||
|
|
||||||
|
ss_kernel, gp_kernel = get_new_kernels()
|
||||||
|
self.run_for_model(X, Y, ss_kernel, kalman_filter_type = 'svd',
|
||||||
|
use_cython=False, check_gradients=True,
|
||||||
|
predict_X=X,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=-1, var_compare_decimal=0)
|
||||||
|
|
||||||
|
def test_forecast(self,):
|
||||||
|
"""
|
||||||
|
Test time series forecaasting.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Generate data ->
|
||||||
|
np.random.seed(339) # seed the random number generator
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
(X,Y) = generate_sine_data(x_points=None, sin_period=5.0, sin_ampl=5.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=100, x_interval = (0, 40), random=True)
|
||||||
|
|
||||||
|
(X1,Y1) = generate_linear_data(x_points=X, tangent=1.0, add_term=20.0, noise_var=0.0,
|
||||||
|
plot = False, points_num=100, x_interval = (0, 40), random=True)
|
||||||
|
|
||||||
|
Y = Y + Y1
|
||||||
|
|
||||||
|
X_train = X[X <= 20]
|
||||||
|
Y_train = Y[X <= 20]
|
||||||
|
X_test = X[X > 20]
|
||||||
|
Y_test = Y[X > 20]
|
||||||
|
|
||||||
|
X.shape = (X.shape[0],1); Y.shape = (Y.shape[0],1)
|
||||||
|
X_train.shape = (X_train.shape[0],1); Y_train.shape = (Y_train.shape[0],1)
|
||||||
|
X_test.shape = (X_test.shape[0],1); Y_test.shape = (Y_test.shape[0],1)
|
||||||
|
# Generate data <-
|
||||||
|
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
|
def get_new_kernels():
|
||||||
|
periodic_kernel = GPy.kern.StdPeriodic(1,active_dims=[0,])
|
||||||
|
gp_kernel = GPy.kern.Linear(1, active_dims=[0,]) + GPy.kern.Bias(1, active_dims=[0,]) + periodic_kernel
|
||||||
|
gp_kernel.std_periodic.lengthscales.constrain_bounded(0.25, 1000)
|
||||||
|
gp_kernel.std_periodic.wavelengths.constrain_bounded(0.15, 100)
|
||||||
|
|
||||||
|
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
|
||||||
|
ss_kernel = GPy.kern.sde_Linear(1,X,active_dims=[0,]) + \
|
||||||
|
GPy.kern.sde_Bias(1, active_dims=[0,]) + periodic_kernel
|
||||||
|
|
||||||
|
ss_kernel.std_periodic.lengthscales.constrain_bounded(0.25, 1000)
|
||||||
|
ss_kernel.std_periodic.wavelengths.constrain_bounded(0.15, 100)
|
||||||
|
|
||||||
|
return ss_kernel, gp_kernel
|
||||||
|
|
||||||
|
ss_kernel, gp_kernel = get_new_kernels()
|
||||||
|
self.run_for_model(X_train, Y_train, ss_kernel, kalman_filter_type = 'regular',
|
||||||
|
use_cython=False, check_gradients=True,
|
||||||
|
predict_X=X_test,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=0, var_compare_decimal=0)
|
||||||
|
|
||||||
|
ss_kernel, gp_kernel = get_new_kernels()
|
||||||
|
self.run_for_model(X_train, Y_train, ss_kernel, kalman_filter_type = 'svd',
|
||||||
|
use_cython=False, check_gradients=False,
|
||||||
|
predict_X=X_test,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=0, var_compare_decimal=-1)
|
||||||
|
|
||||||
|
ss_kernel, gp_kernel = get_new_kernels()
|
||||||
|
self.run_for_model(X_train, Y_train, ss_kernel, kalman_filter_type = 'svd',
|
||||||
|
use_cython=True, check_gradients=False,
|
||||||
|
predict_X=X_test,
|
||||||
|
gp_kernel=gp_kernel,
|
||||||
|
mean_compare_decimal=0, var_compare_decimal=-1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("Running state-space inference tests...")
|
||||||
|
unittest.main()
|
||||||
975
GPy/testing/state_space_main_tests.py
Normal file
975
GPy/testing/state_space_main_tests.py
Normal file
|
|
@ -0,0 +1,975 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Test module for state_space_main.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from scipy.stats import norm
|
||||||
|
|
||||||
|
import GPy.models.state_space_setup as ss_setup
|
||||||
|
import GPy.models.state_space_main as ssm
|
||||||
|
|
||||||
|
def generate_x_points(points_num=100, x_interval = (0, 20), random=True):
|
||||||
|
"""
|
||||||
|
Function generates (sorted) points on the x axis.
|
||||||
|
|
||||||
|
Input:
|
||||||
|
---------------------------
|
||||||
|
points_num: int
|
||||||
|
How many points to generate
|
||||||
|
x_interval: tuple (a,b)
|
||||||
|
On which interval to generate points
|
||||||
|
random: bool
|
||||||
|
Regular points or random
|
||||||
|
|
||||||
|
Output:
|
||||||
|
---------------------------
|
||||||
|
x_points: np.array
|
||||||
|
Generated points
|
||||||
|
"""
|
||||||
|
|
||||||
|
x_interval = np.asarray( x_interval )
|
||||||
|
|
||||||
|
if random:
|
||||||
|
x_points = np.random.rand(points_num) * ( x_interval[1] - x_interval[0] ) + x_interval[0]
|
||||||
|
x_points = np.sort( x_points )
|
||||||
|
else:
|
||||||
|
x_points = np.linspace(x_interval[0], x_interval[1], num=points_num )
|
||||||
|
|
||||||
|
return x_points
|
||||||
|
|
||||||
|
def generate_sine_data(x_points=None, sin_period=2.0, sin_ampl=10.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=100, x_interval = (0, 20), random=True):
|
||||||
|
"""
|
||||||
|
Function generates sinusoidal data.
|
||||||
|
|
||||||
|
Input:
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
x_points: np.array
|
||||||
|
Previously generated X points
|
||||||
|
sin_period: float
|
||||||
|
Sine period
|
||||||
|
sin_ampl: float
|
||||||
|
Sine amplitude
|
||||||
|
noise_var: float
|
||||||
|
Gaussian noise variance added to the sine function
|
||||||
|
plot: bool
|
||||||
|
Whether to plot generated data
|
||||||
|
|
||||||
|
(if x_points is None, the the following parameters are used to generate
|
||||||
|
those. They are the same as in 'generate_x_points' function)
|
||||||
|
|
||||||
|
points_num: int
|
||||||
|
|
||||||
|
x_interval: tuple (a,b)
|
||||||
|
|
||||||
|
random: bool
|
||||||
|
"""
|
||||||
|
|
||||||
|
sin_function = lambda xx: sin_ampl * np.sin( 2*np.pi/sin_period * xx )
|
||||||
|
|
||||||
|
if x_points is None:
|
||||||
|
x_points = generate_x_points(points_num, x_interval, random)
|
||||||
|
|
||||||
|
y_points = sin_function( x_points ) + np.random.randn( len(x_points) ) * np.sqrt(noise_var)
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return x_points, y_points
|
||||||
|
|
||||||
|
def generate_linear_data(x_points=None, tangent=2.0, add_term=1.0, noise_var=2.0,
|
||||||
|
plot = False, points_num=100, x_interval = (0, 20), random=True):
|
||||||
|
"""
|
||||||
|
Function generates linear data.
|
||||||
|
|
||||||
|
Input:
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
x_points: np.array
|
||||||
|
Previously generated X points
|
||||||
|
tangent: float
|
||||||
|
Factor with which independent variable is multiplied in linear equation.
|
||||||
|
add_term: float
|
||||||
|
Additive term in linear equation.
|
||||||
|
noise_var: float
|
||||||
|
Gaussian noise variance added to the sine function
|
||||||
|
plot: bool
|
||||||
|
Whether to plot generated data
|
||||||
|
|
||||||
|
(if x_points is None, the the following parameters are used to generate
|
||||||
|
those. They are the same as in 'generate_x_points' function)
|
||||||
|
|
||||||
|
points_num: int
|
||||||
|
|
||||||
|
x_interval: tuple (a,b)
|
||||||
|
|
||||||
|
random: bool
|
||||||
|
"""
|
||||||
|
|
||||||
|
linear_function = lambda xx: tangent*xx + add_term
|
||||||
|
|
||||||
|
if x_points is None:
|
||||||
|
x_points = generate_x_points(points_num, x_interval, random)
|
||||||
|
|
||||||
|
y_points = linear_function( x_points ) + np.random.randn( len(x_points) ) * np.sqrt(noise_var)
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return x_points, y_points
|
||||||
|
|
||||||
|
def generate_brownian_data(x_points=None, kernel_var = 2.0, noise_var = 2.0,
|
||||||
|
plot = False, points_num=100, x_interval = (0, 20), random=True):
|
||||||
|
"""
|
||||||
|
Generate brownian data - data from Brownian motion.
|
||||||
|
First point is always 0, and \Beta(0) = 0 - standard conditions for Brownian motion.
|
||||||
|
|
||||||
|
Input:
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
x_points: np.array
|
||||||
|
Previously generated X points
|
||||||
|
variance: float
|
||||||
|
Gaussian noise variance added to the sine function
|
||||||
|
plot: bool
|
||||||
|
Whether to plot generated data
|
||||||
|
|
||||||
|
(if x_points is None, the the following parameters are used to generate
|
||||||
|
those. They are the same as in 'generate_x_points' function)
|
||||||
|
|
||||||
|
points_num: int
|
||||||
|
|
||||||
|
x_interval: tuple (a,b)
|
||||||
|
|
||||||
|
random: bool
|
||||||
|
|
||||||
|
"""
|
||||||
|
if x_points is None:
|
||||||
|
x_points = generate_x_points(points_num, x_interval, random)
|
||||||
|
if x_points[0] != 0:
|
||||||
|
x_points[0] = 0
|
||||||
|
|
||||||
|
y_points = np.zeros( (points_num,) )
|
||||||
|
for i in range(1, points_num):
|
||||||
|
noise = np.random.randn() * np.sqrt(kernel_var * (x_points[i] - x_points[i-1]))
|
||||||
|
y_points[i] = y_points[i-1] + noise
|
||||||
|
|
||||||
|
y_points += np.random.randn( len(x_points) ) * np.sqrt(noise_var)
|
||||||
|
|
||||||
|
return x_points, y_points
|
||||||
|
|
||||||
|
def generate_linear_plus_sin(x_points=None, tangent=2.0, add_term=1.0, noise_var=2.0,
|
||||||
|
sin_period=2.0, sin_ampl=10.0, plot = False,
|
||||||
|
points_num=100, x_interval = (0, 20), random=True):
|
||||||
|
"""
|
||||||
|
Generate the sum of linear trend and the sine function.
|
||||||
|
|
||||||
|
For parameters see the 'generate_linear' and 'generate_sine'.
|
||||||
|
|
||||||
|
Comment: Gaussian noise variance is added only once (for linear function).
|
||||||
|
"""
|
||||||
|
|
||||||
|
x_points, y_linear_points = generate_linear_data(x_points, tangent, add_term, noise_var,
|
||||||
|
False, points_num, x_interval, random)
|
||||||
|
|
||||||
|
x_points, y_sine_points = generate_sine_data(x_points, sin_period, sin_ampl, 0.0,
|
||||||
|
False, points_num, x_interval, random)
|
||||||
|
|
||||||
|
y_points = y_linear_points + y_sine_points
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return x_points, y_points
|
||||||
|
|
||||||
|
def generate_random_y_data(samples, dim, ts_no):
|
||||||
|
"""
|
||||||
|
Generate data:
|
||||||
|
|
||||||
|
Input:
|
||||||
|
------------------
|
||||||
|
|
||||||
|
samples - how many samples
|
||||||
|
dim - dimensionality of the data
|
||||||
|
ts_no - number of time series
|
||||||
|
|
||||||
|
Output:
|
||||||
|
--------------------------
|
||||||
|
Y: np.array((samples, dim, ts_no))
|
||||||
|
"""
|
||||||
|
|
||||||
|
Y = np.empty((samples, dim, ts_no));
|
||||||
|
|
||||||
|
for i in range(0,samples):
|
||||||
|
for j in range(0,ts_no):
|
||||||
|
sample = np.random.randn(dim)
|
||||||
|
Y[i,:,j] = sample
|
||||||
|
|
||||||
|
if (Y.shape[2] == 1): # ts_no = 1
|
||||||
|
Y.shape=(Y.shape[0], Y.shape[1])
|
||||||
|
return Y
|
||||||
|
|
||||||
|
|
||||||
|
class StateSpaceKernelsTests(np.testing.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run_descr_model(self, measurements, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=8,
|
||||||
|
m_init=None, P_init=None, dA=None,dQ=None,
|
||||||
|
dH=None,dR=None, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True):
|
||||||
|
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
|
state_dim = 1 if not isinstance(A,np.ndarray) else A.shape[0]
|
||||||
|
ts_no = 1 if (len(measurements.shape) < 3) else measurements.shape[2]
|
||||||
|
grad_params_no = None if dA is None else dA.shape[2]
|
||||||
|
|
||||||
|
|
||||||
|
ss_setup.use_cython = use_cython
|
||||||
|
global ssm
|
||||||
|
if (ssm.cython_code_available) and (ssm.use_cython != use_cython):
|
||||||
|
reload(ssm)
|
||||||
|
|
||||||
|
grad_calc_params = None
|
||||||
|
if calc_grad_log_likelihood:
|
||||||
|
grad_calc_params = {}
|
||||||
|
grad_calc_params['dA'] = dA
|
||||||
|
grad_calc_params['dQ'] = dQ
|
||||||
|
grad_calc_params['dH'] = dH
|
||||||
|
grad_calc_params['dR'] = dR
|
||||||
|
|
||||||
|
(f_mean, f_var, loglikelhood, g_loglikelhood, \
|
||||||
|
dynamic_callables_smoother) = ssm.DescreteStateSpace.kalman_filter(A, Q, H, R, measurements, index=None,
|
||||||
|
m_init=m_init, P_init=P_init, p_kalman_filter_type = kalman_filter_type,
|
||||||
|
calc_log_likelihood=calc_log_likelihood,
|
||||||
|
calc_grad_log_likelihood=calc_grad_log_likelihood,
|
||||||
|
grad_params_no=grad_params_no,
|
||||||
|
grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
f_mean_squeezed = np.squeeze(f_mean[1:,:]) # exclude initial value
|
||||||
|
f_var_squeezed = np.squeeze(f_var[1:,:]) # exclude initial value
|
||||||
|
|
||||||
|
if true_states is not None:
|
||||||
|
#print np.max(np.abs(f_mean_squeezed-true_states))
|
||||||
|
np.testing.assert_almost_equal(np.max(np.abs(f_mean_squeezed- \
|
||||||
|
true_states)), 0, decimal=mean_compare_decimal)
|
||||||
|
|
||||||
|
np.testing.assert_equal(f_mean.shape, (measurements.shape[0]+1,state_dim,ts_no) )
|
||||||
|
np.testing.assert_equal(f_var.shape, (measurements.shape[0]+1,state_dim,state_dim) )
|
||||||
|
|
||||||
|
(M_smooth, P_smooth) = ssm.DescreteStateSpace.rts_smoother(state_dim, dynamic_callables_smoother, f_mean,
|
||||||
|
f_var)
|
||||||
|
|
||||||
|
return f_mean, f_var
|
||||||
|
|
||||||
|
def run_continuous_model(self, F, L, Qc, p_H, p_R, P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=None, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True,
|
||||||
|
grad_params_no=0, grad_calc_params=None):
|
||||||
|
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
|
||||||
|
state_dim = 1 if not isinstance(F,np.ndarray) else F.shape[0]
|
||||||
|
ts_no = 1 if (len(Y_data.shape) < 3) else Y_data.shape[2]
|
||||||
|
|
||||||
|
ss_setup.use_cython = use_cython
|
||||||
|
global ssm
|
||||||
|
if (ssm.cython_code_available) and (ssm.use_cython != use_cython):
|
||||||
|
reload(ssm)
|
||||||
|
|
||||||
|
(f_mean, f_var, loglikelhood, g_loglikelhood, \
|
||||||
|
dynamic_callables_smoother) = ssm.ContDescrStateSpace.cont_discr_kalman_filter(F, L, Qc, p_H, p_R,
|
||||||
|
P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=None,
|
||||||
|
p_kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=False,
|
||||||
|
calc_grad_log_likelihood=False,
|
||||||
|
grad_params_no=0, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
f_mean_squeezed = np.squeeze(f_mean[1:,:]) # exclude initial value
|
||||||
|
f_var_squeezed = np.squeeze(f_var[1:,:]) # exclude initial value
|
||||||
|
|
||||||
|
np.testing.assert_equal(f_mean.shape, (Y_data.shape[0]+1,state_dim,ts_no))
|
||||||
|
np.testing.assert_equal(f_var.shape, (Y_data.shape[0]+1,state_dim,state_dim))
|
||||||
|
|
||||||
|
(M_smooth, P_smooth) = ssm.ContDescrStateSpace.cont_discr_rts_smoother(state_dim, f_mean, \
|
||||||
|
f_var,dynamic_callables_smoother)
|
||||||
|
|
||||||
|
return f_mean, f_var
|
||||||
|
|
||||||
|
def test_discrete_ss_first(self,plot=False):
|
||||||
|
"""
|
||||||
|
Tests discrete State-Space model with different data dimensions.
|
||||||
|
"""
|
||||||
|
np.random.seed(235) # seed the random number generator
|
||||||
|
|
||||||
|
A = 1.0 # For cython code to run properly need float input
|
||||||
|
H = 1.0
|
||||||
|
Q = 1.0
|
||||||
|
R = 1.0
|
||||||
|
|
||||||
|
steps_num = 100
|
||||||
|
|
||||||
|
# generate data ->
|
||||||
|
true_states = np.zeros((steps_num,))
|
||||||
|
init_state = 0
|
||||||
|
measurements = np.zeros((steps_num,))
|
||||||
|
|
||||||
|
for s in range(0, steps_num):
|
||||||
|
if s== 0:
|
||||||
|
true_states[0] = init_state + np.sqrt(Q)*np.random.randn()
|
||||||
|
else:
|
||||||
|
true_states[s] = true_states[s-1] + np.sqrt(R)*np.random.randn()
|
||||||
|
measurements[s] = true_states[s] + np.sqrt(R)*np.random.randn()
|
||||||
|
# generate data <-
|
||||||
|
|
||||||
|
# descrete kalman filter ->
|
||||||
|
m_init = 0; P_init = 1
|
||||||
|
d_num = 1000
|
||||||
|
state_discr = np.linspace(-10,10,d_num)
|
||||||
|
|
||||||
|
state_trans_matrix = np.empty((d_num,d_num))
|
||||||
|
for i in range(d_num):
|
||||||
|
state_trans_matrix[:,i] = norm.pdf(state_discr, loc=A*state_discr[i], scale=np.sqrt(Q))
|
||||||
|
|
||||||
|
m_prev = norm.pdf(state_discr, loc = m_init, scale = np.sqrt(P_init)); #m_prev / np.sum(m_prev)
|
||||||
|
m = np.zeros((d_num, steps_num))
|
||||||
|
i_mean = np.zeros((steps_num,))
|
||||||
|
|
||||||
|
for s in range(0, steps_num):
|
||||||
|
# Prediction step:
|
||||||
|
if (s==0):
|
||||||
|
m[:,s] = np.dot(state_trans_matrix, m_prev)
|
||||||
|
else:
|
||||||
|
m[:,s] = np.dot(state_trans_matrix, m[:,s-1])
|
||||||
|
# Update step:
|
||||||
|
#meas_ind = np.argmin(np.abs(state_discr - measurements[s])
|
||||||
|
y_vec = np.zeros( (d_num,))
|
||||||
|
for i in range(d_num):
|
||||||
|
y_vec[i] = norm.pdf(measurements[s], loc=H*state_discr[i], scale=np.sqrt(R))
|
||||||
|
norm_const = np.dot( y_vec, m[:,s] )
|
||||||
|
m[:,s] = y_vec * m[:,s] / norm_const
|
||||||
|
|
||||||
|
i_mean[s] = state_discr[ np.argmax(m[:,s]) ]
|
||||||
|
# descrete kalman filter <-
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(measurements, A,Q,H,R, true_states=i_mean,
|
||||||
|
mean_compare_decimal=1,
|
||||||
|
m_init=m_init, P_init=P_init,use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=False)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(measurements, A,Q,H,R, true_states=i_mean,
|
||||||
|
mean_compare_decimal=1,
|
||||||
|
m_init=m_init, P_init=P_init,use_cython=False,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=False)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(measurements, A,Q,H,R, true_states=i_mean,
|
||||||
|
mean_compare_decimal=1,
|
||||||
|
m_init=m_init, P_init=P_init,use_cython=True,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=False)
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot( true_states, 'g.-',label='true states')
|
||||||
|
#plt.plot( measurements, 'b.-', label='measurements')
|
||||||
|
plt.plot( f_mean, 'r.-',label='Kalman filter estimates')
|
||||||
|
plt.plot( i_mean, 'k.-', label='Discretization')
|
||||||
|
|
||||||
|
plt.plot( f_mean + 2*np.sqrt(f_var), 'r.--')
|
||||||
|
plt.plot( f_mean - 2*np.sqrt(f_var), 'r.--')
|
||||||
|
plt.legend()
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
return None
|
||||||
|
|
||||||
|
def test_discrete_ss_1D(self,plot=False):
|
||||||
|
"""
|
||||||
|
This function tests Kalman filter and smoothing when the state
|
||||||
|
dimensionality is one dimensional.
|
||||||
|
"""
|
||||||
|
|
||||||
|
np.random.seed(234) # seed the random number generator
|
||||||
|
|
||||||
|
# 1D ss model
|
||||||
|
state_dim = 1;
|
||||||
|
param_num = 2 # sigma_Q, sigma_R - parameters
|
||||||
|
measurement_dim = 1 # dimensionality od measurement
|
||||||
|
|
||||||
|
A = 1.0
|
||||||
|
Q = 2.0
|
||||||
|
dA= np.zeros((state_dim,state_dim,param_num))
|
||||||
|
dQ = np.zeros((state_dim,state_dim,param_num)); dQ[0,0,0] = 1.0
|
||||||
|
|
||||||
|
# measurement related parameters (subject to change) ->
|
||||||
|
H = np.ones((measurement_dim,state_dim ))
|
||||||
|
R = 0.5 * np.eye(measurement_dim)
|
||||||
|
dH = np.zeros((measurement_dim,state_dim,param_num))
|
||||||
|
dR = np.zeros((measurement_dim,measurement_dim,param_num)); dR[:,:,1] = np.eye(measurement_dim)
|
||||||
|
# measurement related parameters (subject to change) <-
|
||||||
|
|
||||||
|
# 1D measurement, 1 ts_no ->
|
||||||
|
data = generate_random_y_data(10, 1, 1) # np.array((samples, dim, ts_no))
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=True,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot( np.squeeze(data), 'g.-', label='measurements')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:]), 'b.-',label='Kalman filter estimates')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:]+H*f_var[1:]*H), 'b--')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:]-H*f_var[1:]*H), 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:]), 'r.-',label='Smoother Estimates')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:]+H*P_sm[1:]*H), 'r--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:]-H*P_sm[1:]*H), 'r--')
|
||||||
|
plt.legend()
|
||||||
|
plt.title("1D state-space, 1D measurements, 1 ts_no")
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
# 1D measurement, 1 ts_no <-
|
||||||
|
|
||||||
|
|
||||||
|
# 1D measurement, 3 ts_no ->
|
||||||
|
data = generate_random_y_data(10, 1, 3) # np.array((samples, dim, ts_no))
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=True,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
#import pdb; pdb.set_trace()
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot( np.squeeze(data[:,:,1]), 'g.-', label='measurements')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1]), 'b.-',label='Kalman filter estimates')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1])+np.squeeze(H*f_var[1:]*H), 'b--')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1])-np.squeeze(H*f_var[1:]*H), 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1]), 'r.-',label='Smoother Estimates')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1])+H*np.squeeze(P_sm[1:])*H, 'r--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1])-H*np.squeeze(P_sm[1:])*H, 'r--')
|
||||||
|
plt.legend()
|
||||||
|
plt.title("1D state-space, 1D measurements, 3 ts_no. 2-nd ts ploted")
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
# 1D measurement, 3 ts_no <-
|
||||||
|
measurement_dim = 2 # dimensionality of measurement
|
||||||
|
|
||||||
|
H = np.ones((measurement_dim,state_dim))
|
||||||
|
R = 0.5 * np.eye(measurement_dim)
|
||||||
|
dH = np.zeros((measurement_dim,state_dim,param_num))
|
||||||
|
dR = np.zeros((measurement_dim,measurement_dim,param_num)); dR[:,:,1] = np.eye(measurement_dim)
|
||||||
|
# measurement related parameters (subject to change) <
|
||||||
|
|
||||||
|
data = generate_random_y_data(10, 2, 3) # np.array((samples, dim, ts_no))
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
# (f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
# mean_compare_decimal=16,
|
||||||
|
# m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
# dH=dH,dR=dR, use_cython=True,
|
||||||
|
# kalman_filter_type='svd',
|
||||||
|
# calc_log_likelihood=True,
|
||||||
|
# calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot( np.squeeze(data[:,0,1]), 'g.-', label='measurements')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1]), 'b.-',label='Kalman filter estimates')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1]), 'r.-',label='Smoother Estimates')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
plt.legend()
|
||||||
|
plt.title("1D state-space, 2D measurements, 3 ts_no. 1-st measurement, 2-nd ts ploted")
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
# 2D measurement, 3 ts_no <-
|
||||||
|
|
||||||
|
def test_discrete_ss_2D(self,plot=False):
|
||||||
|
"""
|
||||||
|
This function tests Kalman filter and smoothing when the state
|
||||||
|
dimensionality is two dimensional.
|
||||||
|
"""
|
||||||
|
|
||||||
|
np.random.seed(234) # seed the random number generator
|
||||||
|
|
||||||
|
# 1D ss model
|
||||||
|
state_dim = 2;
|
||||||
|
param_num = 3 # sigma_Q, sigma_R, one parameters in A - parameters
|
||||||
|
measurement_dim = 1 # dimensionality od measurement
|
||||||
|
|
||||||
|
A = np.eye(state_dim); A[0,0] = 0.5
|
||||||
|
Q = np.ones((state_dim,state_dim));
|
||||||
|
dA = np.zeros((state_dim,state_dim,param_num)); dA[1,1,2] = 1
|
||||||
|
dQ = np.zeros((state_dim,state_dim,param_num)); dQ[:,:,1] = np.eye(measurement_dim)
|
||||||
|
|
||||||
|
# measurement related parameters (subject to change) ->
|
||||||
|
H = np.ones((measurement_dim,state_dim))
|
||||||
|
R = 0.5 * np.eye(measurement_dim)
|
||||||
|
dH = np.zeros((measurement_dim,state_dim,param_num))
|
||||||
|
dR = np.zeros((measurement_dim,measurement_dim,param_num)); dR[:,:,1] = np.eye(measurement_dim)
|
||||||
|
# measurement related parameters (subject to change) <-
|
||||||
|
|
||||||
|
# 1D measurement, 1 ts_no ->
|
||||||
|
data = generate_random_y_data(10, 1, 1) # np.array((samples, dim, ts_no))
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=True,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot( np.squeeze(data), 'g.-', label='measurements')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0]), 'b.-',label='Kalman filter estimates')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0]), 'r.-',label='Smoother Estimates')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
plt.legend()
|
||||||
|
plt.title("2D state-space, 1D measurements, 1 ts_no")
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
# 1D measurement, 1 ts_no <-
|
||||||
|
|
||||||
|
# 1D measurement, 3 ts_no ->
|
||||||
|
data = generate_random_y_data(10, 1, 3) # np.array((samples, dim, ts_no))
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=True,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot( np.squeeze(data[:,:,1]), 'g.-', label='measurements')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1]), 'b.-',label='Kalman filter estimates')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1]), 'r.-',label='Smoother Estimates')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
plt.legend()
|
||||||
|
plt.title("2D state-space, 1D measurements, 3 ts_no. 2-nd ts ploted")
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
# 1D measurement, 3 ts_no <-
|
||||||
|
|
||||||
|
# 2D measurement, 3 ts_no ->
|
||||||
|
# measurement related parameters (subject to change) ->
|
||||||
|
measurement_dim = 2 # dimensionality od measurement
|
||||||
|
|
||||||
|
H = np.ones((measurement_dim,state_dim))
|
||||||
|
R = 0.5 * np.eye(measurement_dim)
|
||||||
|
dH = np.zeros((measurement_dim,state_dim,param_num))
|
||||||
|
dR = np.zeros((measurement_dim,measurement_dim,param_num)); dR[:,:,1] = np.eye(measurement_dim)
|
||||||
|
# measurement related parameters (subject to change) <
|
||||||
|
|
||||||
|
data = generate_random_y_data(10, 2, 3) # np.array((samples, dim, ts_no))
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
mean_compare_decimal=16,
|
||||||
|
m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
dH=dH,dR=dR, use_cython=False,
|
||||||
|
kalman_filter_type='svd',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
# (f_mean, f_var) = self.run_descr_model(data, A,Q,H,R, true_states=None,
|
||||||
|
# mean_compare_decimal=16,
|
||||||
|
# m_init=None, P_init=None, dA=dA,dQ=dQ,
|
||||||
|
# dH=dH,dR=dR, use_cython=True,
|
||||||
|
# kalman_filter_type='svd',
|
||||||
|
# calc_log_likelihood=True,
|
||||||
|
# calc_grad_log_likelihood=True)
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot( np.squeeze(data[:,0,1]), 'g.-', label='measurements')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1]), 'b.-',label='Kalman filter estimates')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
plt.plot( np.squeeze(f_mean[1:,0,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1]), 'r.-',label='Smoother Estimates')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,0,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
plt.legend()
|
||||||
|
plt.title("2D state-space, 2D measurements, 3 ts_no. 1-st measurement, 2-nd ts ploted")
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
# 2D measurement, 3 ts_no <-
|
||||||
|
|
||||||
|
def test_continuos_ss(self,plot=False):
|
||||||
|
"""
|
||||||
|
This function tests the continuos state-space model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 1D measurements, 1 ts_no ->
|
||||||
|
measurement_dim = 1 # dimensionality of measurement
|
||||||
|
|
||||||
|
X_data = generate_x_points(points_num=10, x_interval = (0, 20), random=True)
|
||||||
|
Y_data = generate_random_y_data(10, 1, 1) # np.array((samples, dim, ts_no))
|
||||||
|
|
||||||
|
try:
|
||||||
|
import GPy
|
||||||
|
except ImportError as e:
|
||||||
|
return None
|
||||||
|
|
||||||
|
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
|
||||||
|
(F,L,Qc,H,P_inf,P0, dFt,dQct,dP_inft,dP0) = periodic_kernel.sde()
|
||||||
|
|
||||||
|
state_dim = dFt.shape[0];
|
||||||
|
param_num = dFt.shape[2]
|
||||||
|
|
||||||
|
|
||||||
|
grad_calc_params = {}
|
||||||
|
grad_calc_params['dP_inf'] = dP_inft
|
||||||
|
grad_calc_params['dF'] = dFt
|
||||||
|
grad_calc_params['dQc'] = dQct
|
||||||
|
grad_calc_params['dR'] = np.zeros((measurement_dim,measurement_dim,param_num))
|
||||||
|
grad_calc_params['dP_init'] = dP0
|
||||||
|
# dH matrix is None
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=P0, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True,
|
||||||
|
grad_params_no=param_num, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=P0, use_cython=False,
|
||||||
|
kalman_filter_type='rbc',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True,
|
||||||
|
grad_params_no=param_num, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=P0, use_cython=True,
|
||||||
|
kalman_filter_type='rbc',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True,
|
||||||
|
grad_params_no=param_num, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot( X_data, np.squeeze(Y_data[:,0]), 'g.-', label='measurements')
|
||||||
|
plt.plot( X_data, np.squeeze(f_mean[1:,15]), 'b.-',label='Kalman filter estimates')
|
||||||
|
plt.plot( X_data, np.squeeze(f_mean[1:,15])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
plt.plot( X_data, np.squeeze(f_mean[1:,15])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,15]), 'r.-',label='Smoother Estimates')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,15])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,15])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
plt.legend()
|
||||||
|
plt.title("1D measurements, 1 ts_no")
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
# 1D measurements, 1 ts_no <-
|
||||||
|
|
||||||
|
# 1D measurements, 3 ts_no ->
|
||||||
|
measurement_dim = 1 # dimensionality od measurement
|
||||||
|
|
||||||
|
X_data = generate_x_points(points_num=10, x_interval = (0, 20), random=True)
|
||||||
|
Y_data = generate_random_y_data(10, 1, 3) # np.array((samples, dim, ts_no))
|
||||||
|
|
||||||
|
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
|
||||||
|
(F,L,Qc,H,P_inf,P0, dFt,dQct,dP_inft,dP0) = periodic_kernel.sde()
|
||||||
|
|
||||||
|
state_dim = dFt.shape[0];
|
||||||
|
param_num = dFt.shape[2]
|
||||||
|
|
||||||
|
grad_calc_params = {}
|
||||||
|
grad_calc_params['dP_inf'] = dP_inft
|
||||||
|
grad_calc_params['dF'] = dFt
|
||||||
|
grad_calc_params['dQc'] = dQct
|
||||||
|
grad_calc_params['dR'] = np.zeros((measurement_dim,measurement_dim,param_num))
|
||||||
|
grad_calc_params['dP_init'] = dP0
|
||||||
|
# dH matrix is None
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=P0, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True,
|
||||||
|
grad_params_no=param_num, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=P0, use_cython=False,
|
||||||
|
kalman_filter_type='rbc',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True,
|
||||||
|
grad_params_no=param_num, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, 1.5, P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=P0, use_cython=True,
|
||||||
|
kalman_filter_type='rbc',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True,
|
||||||
|
grad_params_no=param_num, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot(X_data, np.squeeze(Y_data[:,0,1]), 'g.-', label='measurements')
|
||||||
|
plt.plot(X_data, np.squeeze(f_mean[1:,15,1]), 'b.-',label='Kalman filter estimates')
|
||||||
|
plt.plot(X_data, np.squeeze(f_mean[1:,15,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
plt.plot(X_data, np.squeeze(f_mean[1:,15,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,15,1]), 'r.-',label='Smoother Estimates')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,15,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,15,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
plt.legend()
|
||||||
|
plt.title("1D measurements, 3 ts_no. 2-nd ts ploted")
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
# 1D measurements, 3 ts_no <-
|
||||||
|
|
||||||
|
|
||||||
|
# 2D measurements, 3 ts_no ->
|
||||||
|
measurement_dim = 2 # dimensionality od measurement
|
||||||
|
|
||||||
|
X_data = generate_x_points(points_num=10, x_interval = (0, 20), random=True)
|
||||||
|
Y_data = generate_random_y_data(10, 2, 3) # np.array((samples, dim, ts_no))
|
||||||
|
|
||||||
|
periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
|
||||||
|
(F,L,Qc,H,P_inf,P0, dFt,dQct,dP_inft,dP0) = periodic_kernel.sde()
|
||||||
|
H = np.vstack((H,H)) # make 2D measurements
|
||||||
|
R = 1.5 * np.eye(measurement_dim)
|
||||||
|
|
||||||
|
state_dim = dFt.shape[0];
|
||||||
|
param_num = dFt.shape[2]
|
||||||
|
|
||||||
|
|
||||||
|
grad_calc_params = {}
|
||||||
|
grad_calc_params['dP_inf'] = dP_inft
|
||||||
|
grad_calc_params['dF'] = dFt
|
||||||
|
grad_calc_params['dQc'] = dQct
|
||||||
|
grad_calc_params['dR'] = np.zeros((measurement_dim,measurement_dim,param_num))
|
||||||
|
grad_calc_params['dP_init'] = dP0
|
||||||
|
# dH matrix is None
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, R, P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=P0, use_cython=False,
|
||||||
|
kalman_filter_type='regular',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True,
|
||||||
|
grad_params_no=param_num, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
(f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, R, P_inf, X_data, Y_data, index = None,
|
||||||
|
m_init=None, P_init=P0, use_cython=False,
|
||||||
|
kalman_filter_type='rbc',
|
||||||
|
calc_log_likelihood=True,
|
||||||
|
calc_grad_log_likelihood=True,
|
||||||
|
grad_params_no=param_num, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
# (f_mean, f_var) = self.run_continuous_model(F, L, Qc, H, R, P_inf, X_data, Y_data, index = None,
|
||||||
|
# m_init=None, P_init=P0, use_cython=True,
|
||||||
|
# kalman_filter_type='rbc',
|
||||||
|
# calc_log_likelihood=True,
|
||||||
|
# calc_grad_log_likelihood=True,
|
||||||
|
# grad_params_no=param_num, grad_calc_params=grad_calc_params)
|
||||||
|
|
||||||
|
if plot:
|
||||||
|
# plotting ->
|
||||||
|
plt.figure()
|
||||||
|
plt.plot(X_data, np.squeeze(Y_data[:,0,1]), 'g.-', label='measurements')
|
||||||
|
plt.plot(X_data, np.squeeze(f_mean[1:,15,1]), 'b.-',label='Kalman filter estimates')
|
||||||
|
plt.plot(X_data, np.squeeze(f_mean[1:,15,1])+np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
plt.plot(X_data, np.squeeze(f_mean[1:,15,1])-np.einsum('ij,ajk,kl', H, f_var[1:], H.T)[:,0,0], 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,15,1]), 'r.-',label='Smoother Estimates')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,15,1])+np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
# plt.plot( np.squeeze(M_sm[1:,15,1])-np.einsum('ij,ajk,kl', H, P_sm[1:], H.T)[:,0,0], 'r--')
|
||||||
|
plt.legend()
|
||||||
|
plt.title("1D measurements, 3 ts_no. 2-nd ts ploted")
|
||||||
|
plt.show()
|
||||||
|
# plotting <-
|
||||||
|
# 2D measurements, 3 ts_no <-
|
||||||
|
|
||||||
|
#def test_EM_gradient(plot=False):
|
||||||
|
# """
|
||||||
|
# Test EM gradient calculation. This method works (the formulas are such)
|
||||||
|
# that it works only for time invariant matrices A, Q, H, R. For the continuous
|
||||||
|
# model it means that time intervals are the same.
|
||||||
|
# """
|
||||||
|
#
|
||||||
|
# np.random.seed(234) # seed the random number generator
|
||||||
|
#
|
||||||
|
# # 1D measurements, 1 ts_no ->
|
||||||
|
# measurement_dim = 1 # dimensionality of measurement
|
||||||
|
#
|
||||||
|
# x_data = generate_x_points(points_num=10, x_interval = (0, 20), random=False)
|
||||||
|
# data = generate_random_y_data(10, 1, 1) # np.array((samples, dim, ts_no))
|
||||||
|
#
|
||||||
|
# import GPy
|
||||||
|
# #periodic_kernel = GPy.kern.sde_Matern32(1,active_dims=[0,])
|
||||||
|
# periodic_kernel = GPy.kern.sde_StdPeriodic(1,active_dims=[0,])
|
||||||
|
# (F,L,Qc,H,P_inf,P0, dFt,dQct,dP_inft,dP0t) = periodic_kernel.sde()
|
||||||
|
#
|
||||||
|
# state_dim = dFt.shape[0];
|
||||||
|
# param_num = dFt.shape[2]
|
||||||
|
#
|
||||||
|
# grad_calc_params = {}
|
||||||
|
# grad_calc_params['dP_inf'] = dP_inft
|
||||||
|
# grad_calc_params['dF'] = dFt
|
||||||
|
# grad_calc_params['dQc'] = dQct
|
||||||
|
# grad_calc_params['dR'] = np.zeros((measurement_dim,measurement_dim,param_num))
|
||||||
|
# grad_calc_params['dP_init'] = dP0t
|
||||||
|
# # dH matrix is None
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# #(F,L,Qc,H,P_inf,dF,dQc,dP_inf) = ssm.balance_ss_model(F,L,Qc,H,P_inf,dF,dQc,dP_inf)
|
||||||
|
# # Use the Kalman filter to evaluate the likelihood
|
||||||
|
#
|
||||||
|
# #import pdb; pdb.set_trace()
|
||||||
|
# (M_kf, P_kf, log_likelihood,
|
||||||
|
# grad_log_likelihood,SmootherMatrObject) = ss.ContDescrStateSpace.cont_discr_kalman_filter(F,
|
||||||
|
# L, Qc, H, 1.5, P_inf, x_data, data, m_init=None,
|
||||||
|
# P_init=P0, calc_log_likelihood=True,
|
||||||
|
# calc_grad_log_likelihood=True,
|
||||||
|
# grad_params_no=param_num,
|
||||||
|
# grad_calc_params=grad_calc_params)
|
||||||
|
#
|
||||||
|
# if plot:
|
||||||
|
# # plotting ->
|
||||||
|
# plt.figure()
|
||||||
|
# plt.plot( np.squeeze(data[:,0]), 'g.-', label='measurements')
|
||||||
|
# plt.plot( np.squeeze(M_kf[1:,15]), 'b.-',label='Kalman filter estimates')
|
||||||
|
# plt.plot( np.squeeze(M_kf[1:,15])+np.einsum('ij,ajk,kl', H, P_kf[1:], H.T)[:,0,0], 'b--')
|
||||||
|
# plt.plot( np.squeeze(M_kf[1:,15])-np.einsum('ij,ajk,kl', H, P_kf[1:], H.T)[:,0,0], 'b--')
|
||||||
|
# plt.title("1D measurements, 1 ts_no")
|
||||||
|
# plt.show()
|
||||||
|
# # plotting <-
|
||||||
|
# # 1D measurements, 1 ts_no <-
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print("Running state-space inference tests...")
|
||||||
|
unittest.main()
|
||||||
|
|
||||||
|
#tt = StateSpaceKernelsTests('test_discrete_ss_first')
|
||||||
|
#res = tt.test_discrete_ss_first(plot=True)
|
||||||
|
#res = tt.test_discrete_ss_1D(plot=True)
|
||||||
|
#res = tt.test_discrete_ss_2D(plot=False)
|
||||||
|
#res = tt.test_continuos_ss(plot=True)
|
||||||
|
|
||||||
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue