版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/github_39611196/article/details/82591431
本篇博客主要介绍采用RNN做回归。
示例代码:
import torch
from torch import nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from torch.autograd import Variable
# 超参数
TIME_STEP = 10
INPUT_SIZE = 1
LR = 0.02
# 生成回归数据并显示
# steps = np.linspace(0, np.pi * 2, 100, dtype=np.float32)
# x_np = np.sin(steps)
# y_np = np.cos(steps)
# plt.plot(steps, y_np, 'r-', label='target (cos)')
# plt.plot(steps, x_np, 'b-', label='target (sin)')
# plt.show()
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN(
input_size=INPUT_SIZE,
hidden_size=32, # hidden state的神经元数目
num_layers=1,
batch_first=True,
)
self.out = nn.Linear(32, 1)
def forward(self, x, h_state):
r_out, h_state = self.rnn(x, h_state)
outs = []
# 实现动态图
for time_step in range(r_out.size(1)):
outs.append(self.out(r_out[:, time_step, :]))
return torch.stack(outs, dim=1), h_state
rnn = RNN()
# print(rnn)
# 优化器
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
# 损失函数
loss_func = nn.MSELoss()
h_state = None
for step in range(60):
start, end = step * np.pi, (step + 1) * np.pi # time steps
# use sin predicts cos
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)
x_np = np.sin(steps)
y_np = np.cos(steps)
x = Variable(torch.from_numpy(x_np[np.newaxis, :, np.newaxis]))
y = Variable(torch.from_numpy(y_np[np.newaxis, :, np.newaxis]))
prediction, h_state = rnn(x, h_state)
h_state = Variable(h_state.data)
loss = loss_func(prediction, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
plt.plot(steps, y_np.flatten(), 'r-')
plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
plt.draw()
plt.pause(0.05)
plt.ioff()
plt.show()
网络结构:
RNN (
(rnn): RNN(1, 32, batch_first=True)
(out): Linear (32 -> 1)
)
数据形式:
训练结果: