ValueError: Expected input batch_size (64) to match target batch_size (32)
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import pandas as pd
class DiabetesDataset(Dataset):
def __init__(self, filepath):
data = pd.read_csv(filepath)
self.len = data.shape[0] # shape[0]指行数,shape[1]指列数,data.shape返回的是行数和列数
self.x_data = torch.tensor(np.array(data)[:, 1:-1].astype(np.float32))
# 第二列到最后一列的数
self.y_data = torch.tensor(np.array(data)[:, [0]].astype(np.float32))
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.len
trainsets = DiabetesDataset('data/us-101.csv')
testsets = DiabetesDataset('data/us-101.csv')
batch_size = 32
epoches = 40
train_loader =DataLoader(dataset=trainsets, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=testsets, batch_size=batch_size, shuffle=False)
#构建模型
class LSTM_Model(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super(LSTM_Model, self).__init__()
self.hidden_dim = hidden_dim
self.layer_dim = layer_dim
self.lstm = nn.LSTM(input_dim, hidden_dim, layer_dim, batch_first=True)
self.fc = nn.Linear(hidden_dim, 64)
self.fc1 = nn.Linear(64, output_dim)
self.so = nn.Softmax(dim=1)
self.drop = nn.Dropout(0.3)
self.relu = nn.ReLU()
def forward(self, x):
ho = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_().to(device)
co = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).requires_grad_().to(device)
out, (hn, cn) = self.lstm(x, (ho.detach(), co.detach()))
out = self.so(self.drop(self.fc1(self.relu(self.drop(self.fc(out[:, -1, :]))))))
return out
#参数设置
input_dim = 4
hidden_dim = 128
layer_dim = 1
output_dim = 3
sequence_dim = 1
iter = 0
#模型调用
model = LSTM_Model(input_dim, hidden_dim, layer_dim, output_dim)
#使用Gpu
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.to(device)
#构建损失器和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# length = len(list(model.parameters()))
# for i in range(length):
# print('参数: %d' % (i+1))
# print(list(model.parameters())[i].size())
loss_list = []
accurary_list = []
iteration_list = []
#模型训练
if __name__ == '__main__':
for epoch in range(epoches):
for i, (images, labels) in enumerate(train_loader, 0):
images = images.view(-1, sequence_dim, input_dim).requires_grad_().to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels.squeeze(dim=1).long())
# loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
iter += 1
if iter % 500 == 0:
model.eval()
correct = 0.0
total = 0.0
with torch.no_grad():
for images, labels in test_loader:
images = images.view(-1, sequence_dim, input_dim).to(device)
outputs = model(images)
_, predict = torch.max(outputs.data, dim=1)
total += labels.size(0)
if torch.cuda.is_available():
correct += (predict.gpu() == labels.gpu()).sum()
else:
correct += (predict == labels).sum()
accurary = correct / total
loss_list.append(loss.data)
accurary_list.append(accurary)
iteration_list.append(iter)
print("loop: {}, Loss: {}".format(iter, loss.item()))
print('Accuracy on test set: %d %%' % (correct / total))
#可视化
plt.plot(iteration_list, loss_list)
plt.xlabel('Number of Iteration')
plt.ylabel('Loss')
plt.title('LSTM')
plt.show()
plt.plot(iteration_list, accurary_list)
plt.xlabel('Number of Iteration')
plt.ylabel('accurary')
plt.title('LSTM')
plt.show()
报错为:
Traceback (most recent call last):
File "C:\Users\yin\PycharmProjects\ngsim\predict.py", line 88, in <module>
loss = criterion(outputs, labels.squeeze(dim=1).long())
File "C:\Users\yin\miniconda3\envs\pytorch\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\yin\miniconda3\envs\pytorch\lib\site-packages\torch\nn\modules\loss.py", line 1174, in forward
return F.cross_entropy(input, target, weight=self.weight,
File "C:\Users\yin\miniconda3\envs\pytorch\lib\site-packages\torch\nn\functional.py", line 3026, in cross_entropy
return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
ValueError: Expected input batch_size (64) to match target batch_size (32).
相似问题