循环神经网络(RNN)
申明:本书的理论和代码来自于开源书籍,TensorFlow深度学习
地址:https://github.com/jiajunhua/dragen1860-Deep-Learning-with-TensorFlow-book
1. Pytorch的RNN简单实现
2.1. 单层RNNCell
[s,b,n], s为句子长度,b为句子数量,n为词向量长度,h为状态长度
import torch
from torch import nn
from torch import optim
from torch.nn import functional as F
x = torch.randn(80, 4, 100)
cell1 = nn.RNNCell(100, 64)
h1 = torch.zeros(4, 64)
for xt in x:
h1 = cell1(xt, h1)
print(h1.shape)
2.2. 多层 RNNCell
x = torch.randn(80, 4, 100)
cell1 = nn.RNNCell(100, 64)
cell2 = nn.RNNCell(64, 20)
h1 = torch.zeros(4, 64)
h2 = torch.zeros(4, 20)
for xt in x:
h1 = cell1(xt, h1)
h2 = cell2(h1, h2)
print(h2.shape)
2.3 RNN
rnn = nn.RNN(input_size=100, hidden_size=64, num_layers=1)
x = torch.randn(80, 4, 100)
out, h = rnn(x, torch.zeros(1, 4, 64))
print(out.shape, h.shape)
rnn = nn.RNN(input_size=100, hidden_size=64, num_layers=3)
x = torch.randn(80, 4, 100)
out, h = rnn(x, torch.zeros(3, 4, 64))
print(out.shape, h.shape)
2.RNN 时间序列预测
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from matplotlib import pyplot as plt
num_time_steps = 50
input_size = 1
hidden_size = 16
output_size = 1
lr = 0.01
class Net(nn.Module):
def __init__(self, ):
super(Net, self).__init__()
self.rnn = nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
batch_first=True,
)
for p in self.rnn.parameters():
nn.init.normal_(p, mean=0.0, std=0.001)
self.linear = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden_prev):
out, hidden_prev = self.rnn(x, hidden_prev)
out = out.view(-1, hidden_size)
out = self.linear(out)
out = out.unsqueeze(dim=0)
return out, hidden_prev
model = Net()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr)
hidden_prev = torch.zeros(1, 1, hidden_size)
for iter in range(6000):
start = np.random.randint(3, size=1)[0]
time_steps = np.linspace(start, start + 10, num_time_steps)
data = np.sin(time_steps)
data = data.reshape(num_time_steps, 1)
x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1)
y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1)
output, hidden_prev = model(x, hidden_prev)
hidden_prev = hidden_prev.detach()
loss = criterion(output, y)
model.zero_grad()
loss.backward()
optimizer.step()
if iter % 100 == 0:
print("Iteration: {} loss {}".format(iter, loss.item()))
start = np.random.randint(3, size=1)[0]
time_steps = np.linspace(start, start + 10, num_time_steps)
data = np.sin(time_steps)
data = data.reshape(num_time_steps, 1)
x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1)
y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1)
predictions = []
input = x[:, 0, :]
for _ in range(x.shape[1]):
input = input.view(1, 1, 1)
(pred, hidden_prev) = model(input, hidden_prev)
input = pred
predictions.append(pred.detach().numpy().ravel()[0])
x = x.data.numpy().ravel()
y = y.data.numpy()
plt.scatter(time_steps[:-1], x.ravel(), s=90)
plt.plot(time_steps[:-1], x.ravel())
plt.scatter(time_steps[1:], predictions)
plt.show()
这段代码我自己看的有点晕,勉勉轻轻看懂了点。
3.LSTM
3.1 LSTMCell
print('one layer lstm')
x = torch.randn(80, 4, 100)
cell = nn.LSTMCell(input_size=100, hidden_size=64)
h = torch.zeros(4, 64)
c = torch.zeros(4, 64)
for xt in x:
h, c = cell(xt, [h, c])
print(h.shape, c.shape)
print('two layer lstm')
x = torch.randn(80, 4, 100)
cell1 = nn.LSTMCell(input_size=100, hidden_size=64)
cell2 = nn.LSTMCell(input_size=64, hidden_size=20)
h1 = torch.zeros(4, 64)
c1 = torch.zeros(4, 64)
h2 = torch.zeros(4, 20)
c2 = torch.zeros(4, 20)
for xt in x:
h1, c1 = cell1(xt, [h1, c1])
h2, c2 = cell2(h1, [h2, c2])
print(h2.shape, c2.shape)
3.2LSTM
print('Lstm')
x = torch.randn(80, 4, 100)
lstm = nn.LSTM(input_size=100, hidden_size=64, num_layers=3)
print(lstm)
out, (h, c) = lstm(x)
print(out.shape, h.shape, c.shape)
4.GRU
4.1 GRUCell
单层
rnn = nn.GRUCell(100, 64)
x = torch.randn(80, 4, 100)
hx = torch.randn(4, 64)
out = []
for xt in x:
hx = rnn(xt, hx)
out.append(hx)
print(hx.shape)
多层
x = torch.randn(80, 4, 100)
cell1 = nn.GRUCell(100, 64)
cell2 = nn.GRUCell(64, 20)
h1 = torch.zeros(4, 64)
h2 = torch.zeros(4, 20)
for xt in x:
h1 = cell1(x[i],h1)
h2 = cell2(h1,h2)
print(h1.shape, h2.shape)
4.2 GRU
多层
rnn = nn.GRU(100, 64, 3)
input = torch.randn(80, 4, 100)
h0 = torch.randn(3, 4, 64)
out, hn = rnn(input, h0)
print(out.shape,hn.shape)
感觉网上的PyTorch的例子要比TensorFlow2的例子少,等以后有机会再补充。
|