diff --git a/GPy/kern/__init__.py b/GPy/kern/__init__.py index f479c387..6f70033a 100644 --- a/GPy/kern/__init__.py +++ b/GPy/kern/__init__.py @@ -17,7 +17,7 @@ from ._src.eq_ode2 import EQ_ODE2 from ._src.trunclinear import TruncLinear,TruncLinear_inf from ._src.splitKern import SplitKern,DEtime from ._src.splitKern import DEtime as DiffGenomeKern - - +from ._src.spline import Spline +from ._src.eq_ode2 import EQ_ODE2 from ._src.basis_funcs import LinearSlopeBasisFuncKernel, BasisFuncKernel, ChangePointBasisFuncKernel, DomainKernel diff --git a/GPy/kern/_src/spline.py b/GPy/kern/_src/spline.py new file mode 100644 index 00000000..bcef8548 --- /dev/null +++ b/GPy/kern/_src/spline.py @@ -0,0 +1,52 @@ +# Copyright (c) 2015, Thomas Hornung +# Licensed under the BSD 3-clause license (see LICENSE.txt) + +import numpy as np +from kern import Kern +from ...core.parameterization import Param +from ...core.parameterization.transformations import Logexp +class Spline(Kern): + """ + Linear spline kernel. You need to specify 2 parameters: the variance and c. + The variance is defined in powers of 10. Thus specifying -2 means 10^-2. + The parameter c allows to define the stiffness of the spline fit. A very stiff + spline equals linear regression. + See https://www.youtube.com/watch?v=50Vgw11qn0o starting at minute 1:17:28 + Lit: Wahba, 1990 + """ + + def __init__(self, input_dim, variance=1., c=1., active_dims=None, name='spline'): + super(Spline, self).__init__(input_dim, active_dims, name) + self.variance = Param('variance', variance, Logexp()) + self.c = Param('c', c) + self.link_parameters(self.variance,self.c) + + + def K(self, X, X2=None): + if X2 is None: X2=X + term1 = (X+8.)*(X2.T+8.)/16. + term2 = abs((X-X2.T)/16.)**3 + term3 = ((X+8.)/16.)**3 + ((X2.T+8.)/16.)**3 + return (self.variance**2 * (1. + (1.+self.c) * term1 + self.c/3. * (term2 - term3))) + + def Kdiag(self, X): + term1 = np.square(X+8.,X+8.)/16. + term3 = 2. * ((X+8.)/16.)**3 + return (self.variance**2 * (1. + (1.+self.c) * term1 - self.c/3. * term3))[:,0] + + def update_gradients_full(self, dL_dK, X, X2=None): + if X2 is None: X2=X + term1 = (X+8.)*(X2.T+8.)/16. + term2 = abs((X-X2.T)/16.)**3 + term3 = ((X+8.)/16.)**3 + ((X2.T+8.)/16.)**3 + self.variance.gradient = np.sum(dL_dK * (2*self.variance * (1. + (1.+self.c) * term1 + self.c/3. * ( term2 - term3)))) + self.c.gradient = np.sum(dL_dK * (self.variance**2* (term1 + 1./3.*(term2 - term3)))) + + def update_gradients_diag(self, dL_dKdiag, X): + raise NotImplementedError + + def gradients_X(self, dL_dK, X, X2=None): + raise NotImplementedError + + def gradients_X_diag(self, dL_dKdiag, X): + raise NotImplementedError diff --git a/GPy/plotting/matplot_dep/base_plots.py b/GPy/plotting/matplot_dep/base_plots.py index f25aee49..dade87cf 100644 --- a/GPy/plotting/matplot_dep/base_plots.py +++ b/GPy/plotting/matplot_dep/base_plots.py @@ -3,7 +3,7 @@ try: - import Tango + #import Tango import pylab as pb except: pass @@ -17,11 +17,11 @@ def ax_default(fignum, ax): fig = ax.figure return fig, ax -def meanplot(x, mu, color=Tango.colorsHex['darkBlue'], ax=None, fignum=None, linewidth=2,**kw): +def meanplot(x, mu, color='#3300FF', ax=None, fignum=None, linewidth=2,**kw): _, axes = ax_default(fignum, ax) return axes.plot(x,mu,color=color,linewidth=linewidth,**kw) -def gpplot(x, mu, lower, upper, edgecol=Tango.colorsHex['darkBlue'], fillcol=Tango.colorsHex['lightBlue'], ax=None, fignum=None, **kwargs): +def gpplot(x, mu, lower, upper, edgecol='#3300FF', fillcol='#33CCFF', ax=None, fignum=None, **kwargs): _, axes = ax_default(fignum, ax) mu = mu.flatten() diff --git a/GPy/plotting/matplot_dep/models_plots.py b/GPy/plotting/matplot_dep/models_plots.py index 9f841372..a198b443 100644 --- a/GPy/plotting/matplot_dep/models_plots.py +++ b/GPy/plotting/matplot_dep/models_plots.py @@ -2,7 +2,7 @@ # Licensed under the BSD 3-clause license (see LICENSE.txt) try: - import Tango +# import Tango import pylab as pb except: pass @@ -17,8 +17,12 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', which_data_ycols='all', fixed_inputs=[], levels=20, samples=0, fignum=None, ax=None, resolution=None, plot_raw=False, +<<<<<<< HEAD linecol=Tango.colorsHex['darkBlue'],fillcol=Tango.colorsHex['lightBlue'], Y_metadata=None, data_symbol='kx', apply_link=False, samples_f=0, plot_uncertain_inputs=True, predict_kw=None): +======= + linecol='#3300FF',fillcol='#00FFFF', Y_metadata=None, data_symbol='kx'): +>>>>>>> e115778d743c8979af2a10143e33861b88fb883a """ Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. @@ -126,7 +130,7 @@ def plot_fit(model, plot_limits=None, which_data_rows='all', print Ysim.shape print Xnew.shape for yi in Ysim.T: - plots['posterior_samples'] = ax.plot(Xnew, yi[:,None], Tango.colorsHex['darkBlue'], linewidth=0.25) + plots['posterior_samples'] = ax.plot(Xnew, yi[:,None], '#3300FF', linewidth=0.25) #ax.plot(Xnew, yi[:,None], marker='x', linestyle='--',color=Tango.colorsHex['darkBlue']) #TODO apply this line for discrete outputs. if samples_f: #NOTE not tested with fixed_inputs