Pytorch实现RNN原理

编程入门 行业动态 更新时间:2024-10-28 05:27:30

Pytorch实现RNN<a href=https://www.elefans.com/category/jswz/34/1770123.html style=原理"/>

Pytorch实现RNN原理

Pytorch实现RNN原理

rnn公式如下。
h t = W h h h t − 1 + W i h X t {{\rm{h}}_t} = {W_{hh}}{{\rm{h}}_{t - 1}} + {W_{ih}}{X_t} ht​=Whh​ht−1​+Wih​Xt​
其中 X t {X_t} Xt​表示t时刻的输入序列。Pytorch中RNN的输入 X {X} X大小为[seq, batch_size, embedding]。
所以 X t {X_t} Xt​的大小为[batch_size, embedding]。其中embedding维度是要参与运算的维度,batch_size是要保留的信息。所以一般将 X t {X_t} Xt​的大小写成转置的形式[embedding, batch_size]。

W i h X t {W_{ih}}{X_t} Wih​Xt​ 的结果为[hidden_size, batch_size]。从矩阵的角度来理解就是batchsize个维度为hidden_size的列向量产生都是由原本的序列产生,依赖于自身序列。而与其他的句子无关

上次写的还是有很多小错误,进行了改正

import numpy as np
import torch
from torch import nn
from torch.nn.parameter import Parameterclass Rnn(nn.Module):def __init__(self, input_size, hidden_size, num_layers, bidirectional=False):super(Rnn, self).__init__()self.input_size = input_sizeself.hidden_size = hidden_sizeself.num_layers = num_layersself.bidirectional = bidirectionalWih = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(1, self.num_layers)]Whh = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(self.num_layers)]self.Wih, self.Whh = Parameter(torch.tensor(Wih)), Parameter(torch.tensor(Whh))Wih0 = np.random.random((self.hidden_size, self.hidden_size))self.Wih0 = Parameter(torch.tensor(Wih0))def forward(self, x):''':param x: [seq, batch_size, embedding]:return: out, hidden'''# x.shape [sep, batch, feature]# hidden.shape [hidden_size, batch]# Whh0.shape [hidden_size, hidden_size]  Wih0.shape [hidden_size, feature]# Whh1.shape [hidden_size, hidden_size]  Wih1.size  [hidden_size, hidden_size]if not isinstance(x, torch.Tensor):raise TypeError('x is not tensor')out = []hidden = [np.zeros((self.hidden_size, x.shape[1])) for i in range(self.num_layers)]Wih0 = np.random.random((self.hidden_size, x.shape[2]))# x, hidden, Wih, Whh = torch.from_numpy(x), torch.tensor(hidden), torch.tensor(Wih), torch.tensor(Whh)hidden = torch.tensor(hidden)self.Wih0 = Parameter(torch.tensor(Wih0))time = x.shape[0]for i in range(time):hidden[0] = torch.tanh((torch.matmul(self.Wih0, torch.transpose(x[i, ...], 1, 0)) +torch.matmul(self.Whh[0], hidden[0].clone().detach())))for i in range(1, self.num_layers):hidden[i] = torch.tanh((torch.matmul(self.Wih[i-1], hidden[i-1].clone().detach()) +torch.matmul(self.Whh[i], hidden[i].clone().detach())))out.append(hidden[self.num_layers-1])# 如果list中的元素为tensor,就无法用torch.tensor()转换,会报错return torch.stack([i for i in out]).permute(0, 2, 1).contiguous(), hidden.permute(0, 2, 1).contiguous()if __name__ == '__main__':a = torch.tensor([1, 2, 3])print(torch.cuda.is_available(), type(a))rnn = Rnn(1, 5, 4)rnn_office = nn.RNN(1, 5, 4)optimizer = torch.optim.Adam(params=rnn.parameters(), lr=0.1)# print(list(rnn.parameters()))input = torch.tensor(np.random.random((6, 3, 1)))for _ in range(10):out, h = rnn(input)# pred = torch.softmax(h.mean(dim=0), dim=1).argmax(dim=1).to(torch.float64)pred = h.mean(dim=0)a = rnn.parameters()# print(rnn)param = [i for i in a]label = torch.tensor([1, 2, 3])criticism = nn.CrossEntropyLoss()loss = criticism(pred, label)print(loss.item())optimizer.zero_grad()loss.backward()optimizer.step()print(rnn_office(input.to(torch.float32))[1].shape)print(f'seq is {input.shape[0]}, batch_size is {input.shape[1]} ', 'out.shape ', out.shape, ' h.shape ', h.shape)# print(sigmoid(np.random.random((2, 3))))## element-wise multiplication# print(np.array([1, 2])*np.array([2, 1]))

分割线

我又将代码稍微调整,使得其可以进行梯度下降计算。

import numpy as np
import torch
from torch import nnclass Rnn(nn.Module):def __init__(self, input_size, hidden_size, num_layers, bidirectional=False):super(Rnn, self).__init__()self.input_size = input_sizeself.hidden_size = hidden_sizeself.num_layers = num_layersself.bidirectional = bidirectionaldef forward(self, x):''':param x: [seq, batch_size, embedding]:return: out, hidden'''# x.shape [sep, batch, feature]# hidden.shape [hidden_size, batch]# Whh0.shape [hidden_size, hidden_size]  Wih0.shape [hidden_size, feature]# Whh1.shape [hidden_size, hidden_size]  Wih1.size  [hidden_size, hidden_size]out = []x, hidden = np.array(x), [np.zeros((self.hidden_size, x.shape[1])) for i in range(self.num_layers)]Wih = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(1, self.num_layers)]Wih0 = np.random.random((self.hidden_size, x.shape[2]))Whh = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(self.num_layers)]# x, hidden, Wih, Whh = torch.from_numpy(x), torch.tensor(hidden), torch.tensor(Wih), torch.tensor(Whh)x = torch.from_numpy(x)hidden = torch.tensor(hidden)Wih0 = torch.tensor(Wih0, requires_grad=True)Wih, Whh = torch.tensor(Wih, requires_grad=True), torch.tensor(Whh, requires_grad=True)time = x.shape[0]for i in range(time):hidden[0] = torch.tanh((torch.matmul(Wih0, torch.transpose(x[i, ...], 1, 0)) +torch.matmul(Whh[0], hidden[0])))for i in range(1, self.num_layers):hidden[i] = torch.tanh((torch.matmul(Wih[i-1], hidden[i-1]) +torch.matmul(Whh[i], hidden[i])))out.append(hidden[self.num_layers-1])# 如果list中的元素为tensor,就无法用torch.tensor()转换,会报错return torch.stack([i for i in out]), hiddendef sigmoid(x):return 1.0/(1.0 + 1.0/np.exp(x))if __name__ == '__main__':a = torch.tensor([1, 2, 3])print(torch.cuda.is_available(), type(a))rnn = Rnn(1, 5, 4)input = np.random.random((6, 2, 1))out, h = rnn(input)print(f'seq is {input.shape[0]}, batch_size is {input.shape[1]} ', 'out.shape ', out.shape, ' h.shape ', h.shape)# print(sigmoid(np.random.random((2, 3))))## element-wise multiplication# print(np.array([1, 2])*np.array([2, 1]))

分割线

首先说明代码只是帮助理解,并未写出梯度下降部分,默认参数已经被固定,不影响理解。代码主要实现RNN原理,只使用numpy库,不可用于GPU加速。

import numpy as npclass Rnn():def __init__(self, input_size, hidden_size, num_layers, bidirectional=False):self.input_size = input_sizeself.hidden_size = hidden_sizeself.num_layers = num_layersself.bidirectional = bidirectionaldef feed(self, x):''':param x: [seq, batch_size, embedding]:return: out, hidden'''# x.shape [sep, batch, feature]# hidden.shape [hidden_size, batch]# Whh0.shape [hidden_size, hidden_size]  Wih0.shape [hidden_size, feature]# Whh1.shape [hidden_size, hidden_size]  Wih1.size  [hidden_size, hidden_size]out = []x, hidden = np.array(x), [np.zeros((self.hidden_size, x.shape[1])) for i in range(self.num_layers)]Wih = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(1, self.num_layers)]Wih.insert(0, np.random.random((self.hidden_size, x.shape[2])))Whh = [np.random.random((self.hidden_size, self.hidden_size)) for i in range(self.num_layers)]time = x.shape[0]for i in range(time):hidden[0] = np.tanh((np.dot(Wih[0], np.transpose(x[i, ...], (1, 0))) +np.dot(Whh[0], hidden[0])))for i in range(1, self.num_layers):hidden[i] = np.tanh((np.dot(Wih[i], hidden[i-1]) +np.dot(Whh[i], hidden[i])))out.append(hidden[self.num_layers-1])return np.array(out), np.array(hidden)def sigmoid(x):return 1.0/(1.0 + 1.0/np.exp(x))if __name__ == '__main__':rnn = Rnn(1, 5, 4)input = np.random.random((6, 2, 1))out, h = rnn.feed(input)print(f'seq is {input.shape[0]}, batch_size is {input.shape[1]} ', 'out.shape ', out.shape, ' h.shape ', h.shape)# print(sigmoid(np.random.random((2, 3))))## element-wise multiplication# print(np.array([1, 2])*np.array([2, 1]))

更多推荐

Pytorch实现RNN原理

本文发布于:2023-07-28 20:53:22,感谢您对本站的认可!
本文链接:https://www.elefans.com/category/jswz/34/1309770.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
本文标签:原理   Pytorch   RNN

发布评论

评论列表 (有 0 条评论)
草根站长

>www.elefans.com

编程频道|电子爱好者 - 技术资讯及电子产品介绍!