純Python和PyTorch對比實現迴圈神經網路RNNCell及反向傳播
摘要
本文使用純 Python 和 PyTorch 對比實現迴圈神經網路RNNCell單元及其反向傳播
相關
原理和詳細解釋, 請參考:
迴圈神經網路RNNCell單元詳解及反向傳播的梯度求導
https://blog.csdn.net/oBrightLamp/article/details/85015325
正文
import torch
import numpy as np
np.random.seed(12)
torch.random.manual_seed(12)
np.set_printoptions(precision=6, suppress=True)
class RNNCell:
def __init__(self, weight_ih, weight_hh,
bias_ih, bias_hh):
self.weight_ih = weight_ih
self.weight_hh = weight_hh
self.bias_ih = bias_ih
self.bias_hh = bias_hh
self.x_stack = []
self.dx_list = []
self.dw_ih_stack = []
self.dw_hh_stack = []
self.db_ih_stack = []
self.db_hh_stack = []
self.prev_hidden_stack = []
self.next_hidden_stack = []
# temporary cache
self.prev_dh = None
def __call__(self, x, prev_hidden):
self.x_stack.append(x)
next_h = np.tanh(
np.dot(x, self.weight_ih.T)
+ np.dot(prev_hidden, self.weight_hh.T)
+ self.bias_ih + self.bias_hh)
self.prev_hidden_stack.append(prev_hidden)
self.next_hidden_stack.append(next_h)
# clean cache
self.prev_dh = np.zeros(next_h.shape)
return next_h
def backward(self, dh):
x = self.x_stack.pop()
prev_hidden = self.prev_hidden_stack.pop()
next_hidden = self.next_hidden_stack.pop()
d_tanh = (dh + self.prev_dh) * (1 - next_hidden ** 2)
self.prev_dh = np.dot(d_tanh, self.weight_hh)
dx = np.dot(d_tanh, self.weight_ih)
self.dx_list.insert(0, dx)
dw_ih = np.dot(d_tanh.T, x)
self.dw_ih_stack.append(dw_ih)
dw_hh = np.dot(d_tanh.T, prev_hidden)
self.dw_hh_stack.append(dw_hh)
self.db_ih_stack.append(d_tanh)
self.db_hh_stack.append(d_tanh)
return self.dx_list
if __name__ == '__main__':
rnn_cell_tensor = torch.nn.RNNCell(4, 5).double()
rnn_cell_numpy = RNNCell(
rnn_cell_tensor.weight_ih.data.numpy(),
rnn_cell_tensor.weight_hh.data.numpy(),
rnn_cell_tensor.bias_ih.data.numpy(),
rnn_cell_tensor.bias_hh.data.numpy())
x_numpy = np.random.random((3, 4))
x_tensor = torch.tensor(x_numpy, requires_grad=True)
h_numpy = np.random.random((3, 5))
h_tensor = torch.tensor(h_numpy, requires_grad=True)
dh_numpy = np.random.random((3, 5))
dh_tensor = torch.tensor(dh_numpy, requires_grad=True)
next_h_numpy = rnn_cell_numpy(x_numpy, h_numpy)
next_h_tensor = rnn_cell_tensor(x_tensor, h_tensor)
rnn_cell_numpy.backward(dh_numpy)
next_h_tensor.backward(dh_tensor)
print("numpy_hidden :\n", h_numpy)
print("tensor_hidden :\n", h_tensor.data.numpy())
print("------")
print("dx_numpy :\n", np.array(rnn_cell_numpy.dx_list))
print("dx_tensor :\n", x_tensor.grad.data.numpy())
print("------")
print("dw_ih_numpy :\n",
np.sum(rnn_cell_numpy.dw_ih_stack, axis=0))
print("dw_ih_tensor :\n",
rnn_cell_tensor.weight_ih.grad.data.numpy())
print("------")
print("dw_hh_numpy :\n",
np.sum(rnn_cell_numpy.dw_hh_stack, axis=0))
print("dw_hh_tensor :\n",
rnn_cell_tensor.weight_hh.grad.data.numpy())
print("------")
print("db_ih_numpy :\n",
np.sum(rnn_cell_numpy.db_ih_stack, axis=(0, 1)))
print("db_hh_numpy :\n",
np.sum(rnn_cell_numpy.db_hh_stack, axis=(0, 1)))
print("------")
print("db_ih_tensor :\n",
rnn_cell_tensor.bias_ih.grad.data.numpy())
print("db_hh_tensor :\n",
rnn_cell_tensor.bias_hh.grad.data.numpy())
"""
程式碼輸出
numpy_hidden :
[[ 0.944225 0.852736 0.002259 0.521226 0.552038]
[ 0.485377 0.768134 0.160717 0.76456 0.02081 ]
[ 0.13521 0.116273 0.309898 0.671453 0.47123 ]]
tensor_hidden :
[[ 0.944225 0.852736 0.002259 0.521226 0.552038]
[ 0.485377 0.768134 0.160717 0.76456 0.02081 ]
[ 0.13521 0.116273 0.309898 0.671453 0.47123 ]]
------
dx_numpy :
[[[ 0.234823 0.001947 -0.221488 -0.120629]
[ 0.399758 0.061028 -0.244361 -0.42483 ]
[ 0.28308 0.016405 -0.252444 -0.098564]]]
dx_tensor :
[[ 0.234823 0.001947 -0.221488 -0.120629]
[ 0.399758 0.061028 -0.244361 -0.42483 ]
[ 0.28308 0.016405 -0.252444 -0.098564]]
------
dw_ih_numpy :
[[ 0.778769 0.979517 0.700974 0.842186]
[ 0.358268 1.077404 0.969949 0.37424 ]
[ 0.540533 1.158021 0.862288 0.676237]
[ 0.498534 1.444171 1.151646 0.643482]
[ 0.507196 0.819969 0.791703 0.417976]]
dw_ih_tensor :
[[ 0.778769 0.979517 0.700974 0.842186]
[ 0.358268 1.077404 0.969949 0.37424 ]
[ 0.540533 1.158021 0.862288 0.676237]
[ 0.498534 1.444171 1.151646 0.643482]
[ 0.507196 0.819969 0.791703 0.417976]]
------
dw_hh_numpy :
[[ 0.992737 1.002905 0.267135 1.12167 0.760192]
[ 0.748401 0.968729 0.24167 1.044483 0.325857]
[ 1.044248 1.140167 0.234594 1.138324 0.622972]
[ 1.174287 1.371561 0.27636 1.355196 0.591165]
[ 0.566084 0.729519 0.260056 0.93798 0.346813]]
dw_hh_tensor :
[[ 0.992737 1.002905 0.267135 1.12167 0.760192]
[ 0.748401 0.968729 0.24167 1.044483 0.325857]
[ 1.044248 1.140167 0.234594 1.138324 0.622972]
[ 1.174287 1.371561 0.27636 1.355196 0.591165]
[ 0.566084 0.729519 0.260056 0.93798 0.346813]]
------
db_ih_numpy :
[ 1.798989 1.496149 1.77515 2.042895 1.345267]
db_hh_numpy :
[ 1.798989 1.496149 1.77515 2.042895 1.345267]
------
db_ih_tensor :
[ 1.798989 1.496149 1.77515 2.042895 1.345267]
db_hh_tensor :
[ 1.798989 1.496149 1.77515 2.042895 1.345267]
"""
相關文章
- numpy實現神經網路-反向傳播神經網路反向傳播
- 關於 RNN 迴圈神經網路的反向傳播求導RNN神經網路反向傳播求導
- 迴圈神經網路(RNN)模型與前向反向傳播演算法神經網路RNN模型反向傳播演算法
- pytorch--迴圈神經網路PyTorch神經網路
- 用張量廣播機制實現神經網路反向傳播神經網路反向傳播
- 詳解神經網路中反向傳播和梯度下降神經網路反向傳播梯度
- 【DL筆記4】神經網路詳解,正向傳播和反向傳播筆記神經網路反向傳播
- NLP教程(3) | 神經網路與反向傳播神經網路反向傳播
- NLP教程(3) - 神經網路與反向傳播神經網路反向傳播
- YJango的迴圈神經網路——實現LSTMGo神經網路
- 神經網路入門篇之深層神經網路:詳解前向傳播和反向傳播(Forward and backward propagation)神經網路反向傳播Forward
- 迴圈神經網路神經網路
- 卷積神經網路(CNN)反向傳播演算法卷積神經網路CNN反向傳播演算法
- 深度神經網路(DNN)反向傳播演算法(BP)神經網路DNN反向傳播演算法
- YJango的迴圈神經網路——scan實現LSTMGo神經網路
- 迴圈神經網路(RNN)神經網路RNN
- 迴圈神經網路 RNN神經網路RNN
- 【python實現卷積神經網路】卷積層Conv2D反向傳播過程Python卷積神經網路反向傳播
- Tensorflow實現神經網路的前向傳播神經網路
- 迴圈神經網路介紹神經網路
- 迴圈神經網路入門神經網路
- 深度學習與CV教程(4) | 神經網路與反向傳播深度學習神經網路反向傳播
- 神經網路之反向傳播訓練(8行程式碼)神經網路反向傳播行程
- python對BP神經網路實現Python神經網路
- 《神經網路和深度學習》系列文章十五:反向傳播演算法神經網路深度學習反向傳播演算法
- 使用PyTorch從零開始構建Elman迴圈神經網路PyTorch神經網路
- 一文弄懂神經網路中的反向傳播法——BackPropagation神經網路反向傳播
- 第五週:迴圈神經網路神經網路
- 動畫圖解迴圈神經網路動畫圖解神經網路
- (一)線性迴圈神經網路(RNN)神經網路RNN
- 常見迴圈神經網路結構神經網路
- TensorFlow構建迴圈神經網路神經網路
- 一文讀懂LSTM和迴圈神經網路神經網路
- 使用PyTorch演示實現神經網路過程PyTorch神經網路
- 卷積神經網路(CNN)反向傳播演算法公式詳細推導卷積神經網路CNN反向傳播演算法公式
- TensorFlow筆記-05-反向傳播,搭建神經網路的八股筆記反向傳播神經網路
- 神經網路理論基礎及 Python 實現神經網路Python
- 卷積神經網路的原理及Python實現卷積神經網路Python