Pytorch实现自定义数据集手写数字识别

下面完整代码在github仓库:传送门


一、将数据打包成数据集

from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import os
import torch


class MyDataset(Dataset):
    def __init__(self, main_dir, is_train=True):
        self.dataset = []
        data_filename = "TRAIN" if is_train else "TEST"
        # 循环获得样本数据文件夹下的训练集或测试集文件夹下的文件夹名(类别名)
        for i, cls_filename in enumerate(os.listdir(os.path.join(main_dir, data_filename))):
            # print(i)
            # print(os.listdir(os.path.join(main_dir)))
            # print(os.listdir(os.path.join(main_dir, data_filename)))
            # 循环获得每个类别文件夹下的数字图片
            for img_data in os.listdir(os.path.join(main_dir, data_filename, cls_filename)):
                self.dataset.append([os.path.join(main_dir, data_filename, cls_filename, img_data), i])  # i作标签
                # print(self.dataset)  # ['D:\\PycharmProjects\\2020-08-25-全连接神经网络\\MNIST_IMG\\TRAIN\\0\\0.jpg', 0]
                # 装图片路径可以节省内存,避免列表装了所有图片导致内存爆炸。

    def __len__(self):
        return len(self.dataset)  # 获取数据集长度(个数),方便迭代

    def __getitem__(self, index):  # 里面包括迭代器
        data = self.dataset[index]  # 根据索引来取[[图片数据路径、标签]...]
        image_data = self.image_preprocess(Image.open(data[0]))  # 拿到图片数据路径并打开得到图片数据,并且做预处理。
        label_data = data[1]  # 拿到图片标签
        return image_data, label_data

    def image_preprocess(self, x):
        return transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, ], std=[0.5, ])
        ])(x)  # 对图片数据进行预处理


# if __name__ == '__main__':
#     data_path = r"D:\PycharmProjects\2020-08-25-全连接神经网络\MNIST_IMG"
#
#     dataset = MyDataset(data_path, True)
#
#     dataloader = DataLoader(dataset, 128, shuffle=False, num_workers=1, drop_last=True)
#     for x, y in dataloader:  # [[图片数据, 标签], [图片数据, 标签]...]
#         print(x.shape)
#         print(y.shape)

二、训练数据

下面两种训练方式任选一种:

import torch
import torch.nn as nn
import torch.utils.data as data
import matplotlib.pyplot as plt
from dataset_sampling import MyDataset
from torch.optim import sgd, adam, adagrad, rmsprop, adadelta, adamax, adamw, sparse_adam, asgd


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Linear(in_features=784, out_features=512),
            nn.Dropout(0.5),
            nn.BatchNorm1d(512),  # N, H, W
            # nn.LayerNorm(512),  # C, H, W
            # nn.InstanceNorm1d(512),  # H, W  (要求输入数据三维)
            # nn.GroupNorm(2, 512)  # C, H, W,  将512分成两组
            nn.ReLU()
        )  # N, 512
        self.layer2 = nn.Sequential(
            nn.Linear(in_features=512, out_features=256),
            nn.Dropout(0.5),
            nn.BatchNorm1d(256),
            nn.ReLU()
        )  # N, 256
        self.layer3 = nn.Sequential(
            nn.Linear(in_features=256, out_features=128),
            nn.Dropout(0.5),
            nn.BatchNorm1d(128),
            nn.ReLU()
        )  # N, 128
        self.layer4 = nn.Sequential(
            nn.Linear(in_features=128, out_features=10),
        )  # N, 10

    def forward(self, x):
        # x = torch.reshape(x, [1, x.size(0), -1])  # 形状[1, N, C*H*W]
        # print(x.shape)
        # y1 = self.layer1(x)[0]   # 这两行代码适用于在InstanceNorm1d的情况。将第一维去掉,变成两维

        x = torch.reshape(x, [x.size(0), -1])  # 形状[N, C*H*W]
        y1 = self.layer1(x)
        # y1 = torch.dropout(y1, 0.5, True)

        y2 = self.layer2(y1)
        # y2 = torch.dropout(y2, 0.5, True)

        y3 = self.layer3(y2)
        # y3 = torch.dropout(y3, 0.5, True)

        self.y4 = self.layer4(y3)

        out = self.y4
        return out


if __name__ == '__main__':
    batch_size = 200
    # 加载本地数据集
    data_path = r"D:\PycharmProjects\2020-08-25-全连接神经网络\MNIST_IMG"
    save_params = "./mnist_params.pth"
    save_net = "./mnist_net.pth"

    train_data = MyDataset(data_path, True)
    test_data = MyDataset(data_path, False)

    train_loader = data.DataLoader(train_data, batch_size, shuffle=True)
    test_loader = data.DataLoader(test_data, batch_size, shuffle=True)

    if torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")
    net = Net().to(device)
    # net.load_state_dict(torch.load(save_params))
    # net = torch.load(save_net).to(device)

    loss_function = nn.MSELoss()

    # optimizer = torch.optim.SGD(net.parameters(), lr=1e-3, momentum=0.5, dampening=0,
    #                             weight_decay=0,  nesterov=False)
    optimizer = torch.optim.Adam(net.parameters(), lr=1e-3, betas=(0.9, 0.99), eps=1e-8,
                                 weight_decay=0, amsgrad=False)  # betas:0.9越大越平滑, 默认效果最好
    # weight_decay:表示L2正则化系数

    # optimizer = adagrad.Adagrad(net.parameters())
    # optimizer = adadelta.Adadelta(net.parameters())
    # optimizer = rmsprop.RMSprop(net.parameters())
    # optimizer = sgd.SGD(net.parameters(), 1e-3)
    # optimizer = adam.Adam(net.parameters())

    a = []
    b = []
    plt.ion()
    net.train()
    for epoch in range(2):
        for i, (x, y) in enumerate(train_loader):
            x = x.to(device)
            y = y.to(device)
            output = net(x)

            # print(output)
            # print(output[0])  # 一张图片经过神经网络输出的十个值
            # print(output.shape)  # torch.Size([100, 10])
            # print(y)
            # 在1轴里面填1, 同时将标签形状变为(N, 1)
            y = torch.zeros(y.cpu().size(0), 10).scatter_(1, y.cpu().reshape(-1, 1), 1).to(device)  # 根据数字确定索引位置
            # print(y)
            # print(y.size(0))

            # 加正则化
            # L1 = 0
            # L2 = 0
            # for params in net.parameters():
            #     L1 += torch.sum(torch.abs(params))
            #     L2 += torch.sum(torch.pow(params, 2))
            # loss = loss_function(output, y)
            # loss1 = loss + 0.001*L1
            # loss2 = loss + 0.001*L2
            # loss = 0.2*loss1 + 0.8*loss2

            loss = loss_function(output, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if i % 10 == 0:
                a.append(i + (epoch*(len(train_data) / batch_size)))
                b.append(loss.item())
                # plt.clf()
                # plt.plot(a, b)
                # plt.pause(1)
                print("Epoch:{}, batch:{}/600, loss:{:.3f}".format(epoch, int(i), loss.item()))

        # print(a)
        torch.save(net.state_dict(), "./mnist_params.pth")
        # torch.save(net, "./mnist_net.pth")

    net.eval()
    eval_loss = 0
    eval_acc = 0
    for i, (x, y) in enumerate(test_loader):
        x = x.to(device)
        y = y.to(device)
        out = net(x)

        y = torch.zeros(y.cpu().size(0), 10).scatter_(1, y.cpu().reshape(-1, 1), 1).to(device)  # 标签y不能在GPU上运算,需要转CPU
        loss = loss_function(out, y)
        # print("Test_Loss:{:.3f}".format(loss.item()))

        # print(y.size(0))
        # print(loss.item())
        # print("====")

        eval_loss += loss.item()*y.size(0)  # 一张图片的损失值乘以批次就是这一批的损失,循环一轮,就是总损失。
        arg_max = torch.argmax(out, 1)

        # y = torch.argmax(y, 1)
        y = y.argmax(1)  # 根据索引将数字取出来
        print(arg_max)
        print(y)
        exit()

        eval_acc += (arg_max==y).sum().item()

    mean_loss = eval_loss / len(test_data)  # 算完所有轮次的总损失除以总的测试数据长度
    mean_acc = eval_acc / len(test_data)

    # print(y)
    # print(torch.argmax(out, 1))
    print("loss:{:.3f}, Acc:{:.3f}".format(mean_loss, mean_acc))

import torch
import torch.nn as nn
import torch.utils.data as data
from torch.utils import data
import matplotlib.pyplot as plt
from dataset_sampling import MyDataset
from tensorboardX import SummaryWriter
# from torch.utils.tensorboard import SummaryWriter

# writer = SummaryWriter("./logs")  # 生成日志文档
# 调出可视化窗口命令:tensorboard --logdir=路径

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Sequential(
            nn.Linear(784, 512),
            nn.BatchNorm1d(512),
            nn.ReLU()
        )
        self.fc2 = nn.Sequential(
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.ReLU()
        )
        self.fc3 = nn.Sequential(
            nn.Linear(256, 128),
            nn.BatchNorm1d(128),
            nn.ReLU()
        )

        self.fc4 = nn.Linear(128, 10)

    def forward(self, x, epoch):
        x = x.reshape(x.size(0), -1)
        y = self.fc1(x)
        y = self.fc2(y)
        y = self.fc3(y)
        y = self.fc4(y)

        # writer.add_histogram("out", y, epoch)  # 收集输出、轮次
        # writer.add_histogram("weight", self.fc4.weight, epoch)  # 收集权重,轮次
        return y


if __name__ == '__main__':
    batch_size = 100
    # 加载本地数据集
    data_path = r"D:\PycharmProjects\2020-08-25-全连接神经网络\MNIST_IMG"
    save_params = "./mnist_params.pth"
    save_net = "./mnist_net.pth"

    train_data = MyDataset(data_path, True)
    test_data = MyDataset(data_path, False)

    train_loader = data.DataLoader(train_data, batch_size, shuffle=True)
    test_loader = data.DataLoader(test_data, batch_size, shuffle=True)

    if torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    net = Net().to(device)
    # net.load_state_dict(torch.load(save_params))
    # net = torch.load(save_net).to(device)
    # loss_fn = nn.MSELoss()
    loss_fn = nn.CrossEntropyLoss()
    optim = torch.optim.Adam(net.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
                             weight_decay=0)

    net.train()
    for epoch in range(2):
        train_loss = 0
        train_acc = 0
        for i, (x, y) in enumerate(train_loader):
            # y=torch.zeros(y.size(0),10).scatter_(1,y.reshape(-1,1),1)
            x = x.to(device)
            y = y.to(device)

            out = net(x, epoch)

            loss = loss_fn(out, y)
            out = torch.argmax(out, 1)

            train_loss += loss.item()  # 一张图片的损失

            train_acc += torch.sum(torch.eq(y.cpu(), out.detach().cpu()))

            optim.zero_grad()
            loss.backward()
            optim.step()

        train_avgloss = train_loss / len(train_loader)  # 一轮次的损失除以一轮的训练数据长度
        train_avgacc = train_acc / len(train_loader)

        test_loss = 0
        test_acc = 0
        for i, (x, y) in enumerate(test_loader):
            # y = torch.zeros(y.size(0),10).scatter_(1,y.reshape(-1,1),1)
            x = x.to(device)
            y = y.to(device)

            out = net(x, epoch)
            loss = loss_fn(out, y)

            out = torch.argmax(out, 1)
            test_loss += loss.item()
            test_acc += torch.sum(torch.eq(y.cpu(), out.detach().cpu()))

        test_avgloss = test_loss / len(test_loader)
        test_avgacc = test_acc / len(test_loader)

        print("epoch:{},train_loss:{:.3f},test_loss:{:.3f}".format(epoch, train_avgloss, test_avgloss))
        print("epoch:{},train_acc:{:.3f}%,test_acc:{:.3f}%".format(epoch, train_avgacc, test_avgacc))

        # writer.add_scalars("loss", {"train_loss": train_avgloss, "test_loss": test_avgloss}, epoch)
        # writer.add_scalars("acc", {"train_acc": train_avgacc, "test_acc": test_avgacc}, epoch)
        torch.save(net.state_dict(), save_params)
        # torch.save(net, save_net)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值