fix: replacing np.power with np.square for efficiency

This commit is contained in:
Chris Tomaszewski 2017-09-13 04:54:25 -04:00
parent cb4f05296f
commit c3d8ea2e0d

View file

@ -35,7 +35,7 @@ class MLP(Mapping):
# Backpropagation to hidden layer. # Backpropagation to hidden layer.
dL_dact = np.dot(dL_dF, self.W2.T) dL_dact = np.dot(dL_dF, self.W2.T)
dL_dlayer1 = dL_dact * (1 - np.power(activations, 2)) dL_dlayer1 = dL_dact * (1 - np.square(activations))
# Finally, evaluate the first-layer gradients. # Finally, evaluate the first-layer gradients.
self.W1.gradient = np.dot(X.T,dL_dlayer1) self.W1.gradient = np.dot(X.T,dL_dlayer1)
@ -47,7 +47,7 @@ class MLP(Mapping):
# Backpropagation to hidden layer. # Backpropagation to hidden layer.
dL_dact = np.dot(dL_dF, self.W2.T) dL_dact = np.dot(dL_dF, self.W2.T)
dL_dlayer1 = dL_dact * (1 - np.power(activations, 2)) dL_dlayer1 = dL_dact * (1 - np.square(activations))
return np.dot(dL_dlayer1, self.W1.T) return np.dot(dL_dlayer1, self.W1.T)