From c3d8ea2e0d04b5befe4e658519201778689fc109 Mon Sep 17 00:00:00 2001 From: Chris Tomaszewski Date: Wed, 13 Sep 2017 04:54:25 -0400 Subject: [PATCH] fix: replacing np.power with np.square for efficiency --- GPy/mappings/mlp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPy/mappings/mlp.py b/GPy/mappings/mlp.py index d56c475e..5a1f7aaa 100644 --- a/GPy/mappings/mlp.py +++ b/GPy/mappings/mlp.py @@ -35,7 +35,7 @@ class MLP(Mapping): # Backpropagation to hidden layer. dL_dact = np.dot(dL_dF, self.W2.T) - dL_dlayer1 = dL_dact * (1 - np.power(activations, 2)) + dL_dlayer1 = dL_dact * (1 - np.square(activations)) # Finally, evaluate the first-layer gradients. self.W1.gradient = np.dot(X.T,dL_dlayer1) @@ -47,7 +47,7 @@ class MLP(Mapping): # Backpropagation to hidden layer. dL_dact = np.dot(dL_dF, self.W2.T) - dL_dlayer1 = dL_dact * (1 - np.power(activations, 2)) + dL_dlayer1 = dL_dact * (1 - np.square(activations)) return np.dot(dL_dlayer1, self.W1.T)