mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-05-08 03:22:38 +02:00
added Brownian motion
This commit is contained in:
parent
0dc9a32ba3
commit
365bc42140
6 changed files with 65 additions and 83 deletions
|
|
@ -2,8 +2,8 @@ from _src.rbf import RBF
|
||||||
from _src.white import White
|
from _src.white import White
|
||||||
from _src.kern import Kern
|
from _src.kern import Kern
|
||||||
from _src.linear import Linear
|
from _src.linear import Linear
|
||||||
#import bias
|
from _src.brownian import Brownian
|
||||||
#import Brownian
|
#from _src.bias import Bias
|
||||||
#import coregionalize
|
#import coregionalize
|
||||||
#import exponential
|
#import exponential
|
||||||
#import eq_ode1
|
#import eq_ode1
|
||||||
|
|
|
||||||
|
|
@ -1,65 +0,0 @@
|
||||||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
|
||||||
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
|
||||||
|
|
||||||
|
|
||||||
from kernpart import Kernpart
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
def theta(x):
|
|
||||||
"""Heavisdie step function"""
|
|
||||||
return np.where(x>=0.,1.,0.)
|
|
||||||
|
|
||||||
class Brownian(Kernpart):
|
|
||||||
"""
|
|
||||||
Brownian Motion kernel.
|
|
||||||
|
|
||||||
:param input_dim: the number of input dimensions
|
|
||||||
:type input_dim: int
|
|
||||||
:param variance:
|
|
||||||
:type variance: float
|
|
||||||
"""
|
|
||||||
def __init__(self,input_dim,variance=1.):
|
|
||||||
self.input_dim = input_dim
|
|
||||||
assert self.input_dim==1, "Brownian motion in 1D only"
|
|
||||||
self.num_params = 1
|
|
||||||
self.name = 'Brownian'
|
|
||||||
self._set_params(np.array([variance]).flatten())
|
|
||||||
|
|
||||||
def _get_params(self):
|
|
||||||
return self.variance
|
|
||||||
|
|
||||||
def _set_params(self,x):
|
|
||||||
assert x.shape==(1,)
|
|
||||||
self.variance = x
|
|
||||||
|
|
||||||
def _get_param_names(self):
|
|
||||||
return ['variance']
|
|
||||||
|
|
||||||
def K(self,X,X2,target):
|
|
||||||
if X2 is None:
|
|
||||||
X2 = X
|
|
||||||
target += self.variance*np.fmin(X,X2.T)
|
|
||||||
|
|
||||||
def Kdiag(self,X,target):
|
|
||||||
target += self.variance*X.flatten()
|
|
||||||
|
|
||||||
def _param_grad_helper(self,dL_dK,X,X2,target):
|
|
||||||
if X2 is None:
|
|
||||||
X2 = X
|
|
||||||
target += np.sum(np.fmin(X,X2.T)*dL_dK)
|
|
||||||
|
|
||||||
def dKdiag_dtheta(self,dL_dKdiag,X,target):
|
|
||||||
target += np.dot(X.flatten(), dL_dKdiag)
|
|
||||||
|
|
||||||
def gradients_X(self,dL_dK,X,X2,target):
|
|
||||||
raise NotImplementedError, "TODO"
|
|
||||||
#target += self.variance
|
|
||||||
#target -= self.variance*theta(X-X2.T)
|
|
||||||
#if X.shape==X2.shape:
|
|
||||||
#if np.all(X==X2):
|
|
||||||
#np.add(target[:,:,0],self.variance*np.diag(X2.flatten()-X.flatten()),target[:,:,0])
|
|
||||||
|
|
||||||
|
|
||||||
def dKdiag_dX(self,dL_dKdiag,X,target):
|
|
||||||
target += self.variance*dL_dKdiag[:,None]
|
|
||||||
|
|
||||||
50
GPy/kern/_src/brownian.py
Normal file
50
GPy/kern/_src/brownian.py
Normal file
|
|
@ -0,0 +1,50 @@
|
||||||
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
|
||||||
|
# Licensed under the BSD 3-clause license (see LICENSE.txt)
|
||||||
|
|
||||||
|
from kern import Kern
|
||||||
|
from ...core.parameterization import Param
|
||||||
|
from ...core.parameterization.transformations import Logexp
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
class Brownian(Kern):
|
||||||
|
"""
|
||||||
|
Brownian motion in 1D only.
|
||||||
|
|
||||||
|
Negative times are treated as a separate (backwards!) Brownian motion.
|
||||||
|
|
||||||
|
:param input_dim: the number of input dimensions
|
||||||
|
:type input_dim: int
|
||||||
|
:param variance:
|
||||||
|
:type variance: float
|
||||||
|
"""
|
||||||
|
def __init__(self, input_dim=1, variance=1., name='Brownian'):
|
||||||
|
assert input_dim==1, "Brownian motion in 1D only"
|
||||||
|
super(Brownian, self).__init__(input_dim, name)
|
||||||
|
|
||||||
|
self.variance = Param('variance', variance, Logexp())
|
||||||
|
self.add_parameters(self.variance)
|
||||||
|
|
||||||
|
def K(self,X,X2=None):
|
||||||
|
if X2 is None:
|
||||||
|
X2 = X
|
||||||
|
return self.variance*np.where(np.sign(X)==np.sign(X2.T),np.fmin(np.abs(X),np.abs(X2.T)), 0.)
|
||||||
|
|
||||||
|
def Kdiag(self,X):
|
||||||
|
return self.variance*np.abs(X.flatten())
|
||||||
|
|
||||||
|
def update_gradients_full(self, dL_dK, X, X2=None):
|
||||||
|
if X2 is None:
|
||||||
|
X2 = X
|
||||||
|
self.variance.gradient = np.sum(dL_dK * np.where(np.sign(X)==np.sign(X2.T),np.fmin(np.abs(X),np.abs(X2.T)), 0.))
|
||||||
|
|
||||||
|
#def update_gradients_diag(self, dL_dKdiag, X):
|
||||||
|
#self.variance.gradient = np.dot(np.abs(X.flatten()), dL_dKdiag)
|
||||||
|
|
||||||
|
#def gradients_X(self, dL_dK, X, X2=None):
|
||||||
|
#if X2 is None:
|
||||||
|
#return np.sum(self.variance*dL_dK*np.abs(X),1)[:,None]
|
||||||
|
#else:
|
||||||
|
#return np.sum(np.where(np.logical_and(np.abs(X)<np.abs(X2.T), np.sign(X)==np.sign(X2)), self.variance*dL_dK,0.),1)[:,None]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -40,8 +40,15 @@ class Kern(Parameterized):
|
||||||
"""Set the gradients of all parameters when doing full (N) inference."""
|
"""Set the gradients of all parameters when doing full (N) inference."""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z):
|
def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z):
|
||||||
"""Set the gradients of all parameters when doing sparse (M) inference."""
|
target = np.zeros(self.size)
|
||||||
raise NotImplementedError
|
self.update_gradients_diag(dL_dKdiag, X)
|
||||||
|
self._collect_gradient(target)
|
||||||
|
self.update_gradients_full(dL_dKnm, X, Z)
|
||||||
|
self._collect_gradient(target)
|
||||||
|
self.update_gradients_full(dL_dKmm, Z, None)
|
||||||
|
self._collect_gradient(target)
|
||||||
|
self._set_gradient(target)
|
||||||
|
|
||||||
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
def update_gradients_variational(self, dL_dKmm, dL_dpsi0, dL_dpsi1, dL_dpsi2, mu, S, Z):
|
||||||
"""Set the gradients of all parameters when doing variational (M) inference with uncertain inputs."""
|
"""Set the gradients of all parameters when doing variational (M) inference with uncertain inputs."""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
|
||||||
|
|
@ -74,16 +74,6 @@ class Linear(Kern):
|
||||||
def Kdiag(self, X):
|
def Kdiag(self, X):
|
||||||
return np.sum(self.variances * np.square(X), -1)
|
return np.sum(self.variances * np.square(X), -1)
|
||||||
|
|
||||||
def update_gradients_sparse(self, dL_dKmm, dL_dKnm, dL_dKdiag, X, Z):
|
|
||||||
target = np.zeros(self.size)
|
|
||||||
self.update_gradients_diag(dL_dKdiag, X)
|
|
||||||
self._collect_gradient(target)
|
|
||||||
self.update_gradients_full(dL_dKnm, X, Z)
|
|
||||||
self._collect_gradient(target)
|
|
||||||
self.update_gradients_full(dL_dKmm, Z, None)
|
|
||||||
self._collect_gradient(target)
|
|
||||||
self._set_gradient(target)
|
|
||||||
|
|
||||||
def update_gradients_full(self, dL_dK, X, X2=None):
|
def update_gradients_full(self, dL_dK, X, X2=None):
|
||||||
if self.ARD:
|
if self.ARD:
|
||||||
if X2 is None:
|
if X2 is None:
|
||||||
|
|
|
||||||
|
|
@ -92,11 +92,11 @@ def plot_fit(model, plot_limits=None, which_data_rows='all',
|
||||||
ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25)
|
ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25)
|
||||||
#ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs.
|
#ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs.
|
||||||
|
|
||||||
|
|
||||||
#add error bars for uncertain (if input uncertainty is being modelled)
|
#add error bars for uncertain (if input uncertainty is being modelled)
|
||||||
if hasattr(model,"has_uncertain_inputs"):
|
if hasattr(model,"has_uncertain_inputs") and model.has_uncertain_inputs():
|
||||||
ax.errorbar(model.X[which_data, free_dims], model.likelihood.data[which_data, 0],
|
ax.errorbar(model.X[which_data_rows, free_dims], model.Y[which_data_rows, which_data_ycols],
|
||||||
xerr=2 * np.sqrt(model.X_variance[which_data, free_dims]),
|
xerr=2 * np.sqrt(model.X_variance[which_data_rows, free_dims]),
|
||||||
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
ecolor='k', fmt=None, elinewidth=.5, alpha=.5)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue