# Simple RNN时间序列预测

import numpy.random import randint
import numpy as np
import torch
from torch import nn, optim
from matplotlib import pyplot as plt

start = randint(3) # [0, 3)
time_steps = np.linspace(start, start + 10, num_time_steps) # 返回num_time_steps个点
data = np.sin(time_steps) # [50]
data = data.reshape(num_time_steps, -1) # [50, 1]
x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1) # 0~48
y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1) # 1~49

start表示的含义从几何上来说就是图上红色左边框的对应的横坐标的值，因为我们要确定一个起点，从这个起点开始向后取50个点，如果每次这个起点都是相同的，就会被这个网络记住

x是50个数据点中的前49个，我们利用这49个点，每个点都向后预测一个单位的数据，得到$\hat y$，然后将$\hat y$与$y$进行对比

class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.rnn = nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
batch_first=True,
)
self.linear = nn.Linear(hidden_size, output_size)

def forward(self, x, h0):
out, h0 = self.rnn(x, h0)
# [b, seq, h] => [seq, h]
out = out.view(-1, hidden_size)
out = self.linear(out) # [seq, h] => [seq, 1]
out = out.unsqueeze(dim=0) # => [1, seq, 1]
return out, h0

model = Net()
criterion = nn.MSELoss()

h0 = torch.zeros(1, 1, hidden_size) # [b, 1, hidden_size]

for iter in range(6000):
start = np.random.randint(3, size=1)[0]
time_steps = np.linspace(start, start + 10, num_time_steps)
data = np.sin(time_steps)
data = data.reshape(num_time_steps, 1)
x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1)
y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1)

output, h0 = model(x, h0)
h0 = h0.detach()

loss = criterion(output, y)
loss.backward()
optimizer.step()

if iter % 100 == 0:
print("Iteration: {} loss {}".format(iter, loss.item()))

predictions = []
input = x[:, 0, :]
for _ in range(x.shape[1]):
input = input.view(1, 1, 1)
(pred, h0) = model(input, h0)
input = pred
predictions.append(pred.detach().numpy().ravel()[0])

from numpy.random import randint
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from matplotlib import pyplot as plt

num_time_steps = 50
input_size = 1
hidden_size = 16
output_size = 1
lr=0.01

class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.rnn = nn.RNN(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
batch_first=True,
)
self.linear = nn.Linear(hidden_size, output_size)

def forward(self, x, h0):
out, h0 = self.rnn(x, h0)
# [b, seq, h]
out = out.view(-1, hidden_size)
out = self.linear(out)
out = out.unsqueeze(dim=0)
return out, h0

model = Net()
criterion = nn.MSELoss()

h0 = torch.zeros(1, 1, hidden_size)

for iter in range(6000):
start = randint(3)
time_steps = np.linspace(start, start + 10, num_time_steps)
data = np.sin(time_steps)
data = data.reshape(num_time_steps, 1)
x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1)
y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1)

output, h0 = model(x, h0)
h0 = h0.detach()

loss = criterion(output, y)
loss.backward()
optimizer.step()

if iter % 100 == 0:
print("Iteration: {} loss {}".format(iter, loss.item()))

start = randint(3)
time_steps = np.linspace(start, start + 10, num_time_steps)
data = np.sin(time_steps)
data = data.reshape(num_time_steps, 1)
x = torch.tensor(data[:-1]).float().view(1, num_time_steps - 1, 1)
y = torch.tensor(data[1:]).float().view(1, num_time_steps - 1, 1)

predictions = []
input = x[:, 0, :]
for _ in range(x.shape[1]):
input = input.view(1, 1, 1)
(pred, h0) = model(input, h0)
input = pred
predictions.append(pred.detach().numpy().ravel()[0])

x = x.data.numpy().ravel() # flatten操作
y = y.data.numpy()
plt.scatter(time_steps[:-1], x.ravel(), s=90)
plt.plot(time_steps[:-1], x.ravel())

plt.scatter(time_steps[1:], predictions)
plt.show()

0 条评论

• ### LeetCode258. 各位相加

ab = (a*10+b)  ab%9 = (a*9+a+b)%9 = (a+b)%9  abc = (a*100+b*10+c)  abc%9 = ...

• ### Word2Vec的PyTorch实现（乞丐版）

根据论文所述，我这里设定window size=2，即每个中心词左右各取2个词作为背景词，那么对于上面的list，窗口每次滑动，选定的中心词和背景词如下图所示

• ### 利用PyTorch使用LSTM

和RNNCell类似，输入input_size的shape是[batch, input_size]，输出$h_t$和$c_t$的shape是[batch, hi...

• ### 封装一个千分位函数，并且保留两位小数

封装一个保留千分位的函数，并且保留两位小数（输入：123456，输出：123,456.00）

• ### 重学数据结构（三、队列）

和上一篇的栈相反，队列(queue)是一种先进先出(First In First Out, FIFO)的线性表。

• ### 进击吧！Pythonista（9/100）

在不考虑字符排列的条件下，对于相差只有一个字符的两个字符串，实现一个算法来识别相差的那个字符。要求如下：

• ### 一文带你读懂机器学习和数据科学的决策树

决策树是一类非常强大的机器学习模型，在高度可解释的同时又在许多任务中有非常良好的表现。 决策树在ML模型的特殊之处在于它清晰的信息表示结构。 决策树通过训练学到...

• ### 同样是追星 ，他们是这样做的 。

最近我朋友疯狂迷恋韩国的偶像团体防弹少年团，于是拜托我帮忙写一段程序实时检测韩国新闻网站instiz旗下两个板块pt和clip，当出现自家idol的新闻时，程序...

• ### 四种常见NLP框架使用总结

本文来自公众号：哈工大SCIR，AI 科技评论 获授权转载，如需转载，请联系哈工大SCIR