首页
学习
活动
专区
工具
TVP
发布
社区首页 >问答首页 >如何把训练日志打印到指定的项目txt文档中以及加一层卷积进去?

如何把训练日志打印到指定的项目txt文档中以及加一层卷积进去?

提问于 2022-12-17 08:52:38
回答 0关注 0查看 47

代码为以下:

(第一篇):

代码语言:javascript
复制
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
pipeline = transforms.Compose([
    transforms.ToTensor()
])
train_set = datasets.MNIST("data", train=True, download=True, transform=pipeline)
train_loader = DataLoader(train_set, batch_size=1, shuffle=True)
img, target = train_set[0]
print(img.shape)
print(target)
writer = SummaryWriter("dataloader_show")
step = 0
for data in train_loader:
    imgs, target = data
    writer.add_imager("train_data", imgs, step)
    step = step + 1
writer.close()

第二个文件:

代码语言:javascript
复制
# 1加载库
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

# 2 定义超参数
batch_size = 64  # 每批处理的数据
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 是否用GPU进行训练
epochs = 20  # 训练数据集的轮次
train_loss_record = np.zeros(epochs)  # 定义训练损失
test_loss_record = np.zeros(epochs)  # 定义测试损失
Accuracy_record = np.zeros(epochs)  # 定义准确率
# 3构建pipeline,对图像做处理
pipeline = transforms.Compose([
    transforms.ToTensor(),  # 将图片转换成tensor
    transforms.Normalize((0.1307), (0.3081))  # 正则化,降低模型复杂度
])
# 4下载、加载数据
# 下载数据集
train_set = datasets.MNIST("data", train=True, download=True, transform=pipeline)
test_set = datasets.MNIST("data", train=False, download=True, transform=pipeline)
# 加载数据
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)  # shuffLe=True,将数据集打
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)  # shuffle=True,将数据集打乱


# 5构建网络模型
class Digit(nn.Module):
    def __init__(self):
        super(Digit, self).__init__()
        self.conv1 = nn.Conv2d(1, 10, 5)  # 1:灰度图片的通道为1,10:输出通道,5:kernel,
        self.conv2 = nn.Conv2d(10, 20, 3)  # 10:输入通道,20:输出通道,3:kernel
        self.linear1 = nn.Linear(20 * 10 * 10, 500)  # 全连接层 20*10*10:输入通道,500:输出通道,
        self.linear2 = nn.Linear(500, 10)  # 全连接层 10:输入通道,10:输出通道,
        # 向前传播函数

    def forward(self, x):
        input_size = x.size(0)  # 图像尺寸构成 batch_size*1*28*28,
        x = self.conv1(x)  # 输入:batch*28*28,输出:batch*24*24 计算过程:28-5+1= 24
        x = F.relu(x)  # 激活函数:
        x = F.max_pool2d(x, 2, 2)  # 池化层:对图片进行压缩,进行下采样的一种方法,有最大池化与平均池化,池化盒核为2*2
        # 输入:batch*10*10*24*24输出:batch*10*10*12
        x = self.conv2(x)  # 输入:batch*10*12*12 输出:batch*20*10*10 (12-3+1=10)
        x = F.relu(x)  # 激活函数:
        x = x.view(input_size, -1)  # 拉平,-1:自动计算维度
        # 输入:batch*20*100*10输出:2000
        x = self.linear1(x)  # 输入:batch*2000,输出:batch*500
        x = F.relu(x)  # 激活函数:shape保持不变
        x = self.linear2(x)  # 激活函数:shape保持不变 return x
        # 输入:batch*500,输出:batch*10
        output = F.log_softmax(x, dim=1)
        return output

        # 6 定义优化器


model = Digit().to(device)
optimizer = optim.Adam(model.parameters())  # 采用Adam优化器


# 定义训练方法
def train_model(model, device, train_loader, optimizer, epoch):
    # 训练模型
    model.train()
    for batch_index, (data, target) in enumerate(train_loader):
        # 部署到device上
        data, target = data.to(device), target.to(device)
        # 梯度初始化为0
        optimizer.zero_grad()
        # 训练后的结果
        output = model(data)
        # 计算损失
        loss = F.cross_entropy(output, target)
        # 找到概率值最大的下标
        pred = output.max(1, keepdim=True)
        # 反向传播
        loss.backward()
        # 参数优化
        optimizer.step()
        if batch_index % 3000 == 0:
            print("Train Epoch:{}\t Loss:{:.6f}".format(epoch, loss.item()))
            train_loss_record[epoch] = loss.item()  # 优化器


# 8 定义测试方法
def test_model(model, device, test_loader, epoch):
    # 模型验证
    model.eval()
    # 正确率
    correct = 0.0
    # 测试损失
    test_loss = 0.0
    with torch.no_grad():  # 不计算梯度,也不进行反向传播
        for data, target in test_loader:
            # 部署到device上
            data, target = data.to(device), target.to(device)
            # 测试数据
            output = model(data)
            # 计算测试损失
            test_loss += F.cross_entropy(output, target).item()
            # 找到概率值最大下标
            pred = output.max(1, keepdim=True)[1]  #
            # 累计正确率
            correct += pred.eq(target.view_as(pred)).sum().item()
        test_loss /= len(test_loader.dataset)
        print("Test---Average loss :{:.4f}, Accuracy : {:.3f}\n".format(
            test_loss, 100.0 * correct / len(test_loader.dataset)))
        test_loss_record[epoch] = test_loss
        Accuracy_record[epoch] = 100.0 * correct / len(test_loader.dataset)


# 9 模型训练与保存
for epoch in range(0, epochs):
    # 训练模型
    train_model(model, device, test_loader, optimizer, epoch)
    test_model(model, device, test_loader, epoch)
    # 保存模型
    torch.save(test_model, "./model/mode_{}.pth".format(epoch))
    
    print("模型已保存")

回答

和开发者交流更多问题细节吧,去 写回答
相关文章

相似问题

相关问答用户
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档