Partial changes to symbolic, including adding mapping covariance and beginning to unify code generation.

This commit is contained in:
Neil Lawrence 2014-04-07 10:31:13 +02:00
parent 19b3784389
commit 9b5a1edb23
9 changed files with 252 additions and 132 deletions

View file

@ -1,7 +1,17 @@
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Copyright (c) 2013, 2014 GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kernel import Kernel
from linear import Linear
from mlp import MLP
#from rbf import RBF
# TODO need to fix this in a config file.
try:
import sympy as sym
sympy_available=True
except ImportError:
sympy_available=False
if sympy_available:
# These are likelihoods that rely on symbolic.
from symbolic import Symbolic

View file

@ -1,11 +1,11 @@
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Copyright (c) 2013, 2014 GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core.mapping import Mapping
from ..core.mapping import Bijective_mapping
from ..core.parameterization import Param
class Linear(Mapping):
class Linear(Bijective_mapping):
"""
Mapping based on a linear model.
@ -20,8 +20,8 @@ class Linear(Mapping):
"""
def __init__(self, input_dim=1, output_dim=1, name='linear_map'):
Mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name)
def __init__(self, input_dim=1, output_dim=1, name='linear'):
Bijective_mapping.__init__(self, input_dim=input_dim, output_dim=output_dim, name=name)
self.W = Param('W',np.array((self.input_dim, self.output_dim)))
self.bias = Param('bias',np.array(self.output_dim))
self.add_parameters(self.W, self.bias)
@ -29,10 +29,15 @@ class Linear(Mapping):
def f(self, X):
return np.dot(X,self.W) + self.bias
def g(self, f):
V = np.linalg.solve(np.dot(self.W.T, self.W), W.T)
return np.dot(f-self.bias, V)
def df_dtheta(self, dL_df, X):
df_dW = (dL_df[:, :, None]*X[:, None, :]).sum(0).T
df_dbias = (dL_df.sum(0))
return np.hstack((df_dW.flatten(), df_dbias))
def dL_dX(self, dL_df, X):
return (dL_df[:, None, :]*self.W[None, :, :]).sum(2)
def dL_dX(self, partial, X):
"""The gradient of L with respect to the inputs to the mapping, where L is a function that is dependent on the output of the mapping, f."""
return (partial[:, None, :]*self.W[None, :, :]).sum(2)