pytorch CV入门一:数据集和密集网络

初次编辑:2024/2/14;最后编辑:2024/2/16

教程出自:https://learn.microsoft.com/en-us/training/modules/intro-computer-vision-pytorch/

第二部分链接:https://blog.csdn.net/qq_33345365/article/details/136123205


数据集


使用torchvision提供的MNIST数据集,这是一个手写数字的数据集,每个数据由两个元素组成:

  1. 一个数字的实际图像,由形状为1x28x28的张量表示。
  2. 是一个标签,用于表示这个张量表示哪个数字。

所需库

import torchvision
import matplotlib.pyplot as plt
from torchvision.transforms import ToTensor

加载数据集,使用6000张训练图片和1000张测试图片

data_train = torchvision.datasets.MNIST("./data", download=True, train=True, transform=ToTensor())
data_test = torchvision.datasets.MNIST("./data", download=True, train=False, transform=ToTensor())

使用可视化方式展示前8个图片

使用matplotlib.pyplot展示图片

注意使用pycharm等可以可视化的IDE。

fig, ax = plt.subplots(1, 7)
for i in range(7):
    ax[i].imshow(data_train[i][0].view(28, 28))
    ax[i].set_title(data_train[i][1])
    ax[i].axis("off")

plt.show()

在这里插入图片描述

展示数据集的特性

  1. 数据集的数据量
  2. 张量大小和标签
  3. 像素强度
print('Training samples:', len(data_train))  # 60000
print('Test samples:', len(data_test))  # 10000

print('Tensor size:', data_train[0][0].size())  # torch.Size([1, 28, 28])
print('First 10 digits are:', [data_train[i][1] for i in range(10)])  # [5, 0, 4, 1, 9, 2, 1, 3, 1, 4]

# All pixel intensities of the images are represented by floating-point values in between 0 and 1:
print('Min intensity value: ', data_train[0][0].min().item())  # 0.0
print('Max intensity value: ', data_train[0][0].max().item())  # 1.0

整体代码如下:

import torchvision
import matplotlib.pyplot as plt
from torchvision.transforms import ToTensor

data_train = torchvision.datasets.MNIST("./data", download=True, train=True, transform=ToTensor())
data_test = torchvision.datasets.MNIST("./data", download=True, train=False, transform=ToTensor())

fig, ax = plt.subplots(1, 7)
for i in range(7):
    ax[i].imshow(data_train[i][0].view(28, 28))
    ax[i].set_title(data_train[i][1])
    ax[i].axis("off")

plt.show()

print('Training samples:', len(data_train))  # 60000
print('Test samples:', len(data_test))  # 10000

print('Tensor size:', data_train[0][0].size())  # torch.Size([1, 28, 28])
print('First 10 digits are:', [data_train[i][1] for i in range(10)])  # [5, 0, 4, 1, 9, 2, 1, 3, 1, 4]

print('Min intensity value: ', data_train[0][0].min().item())  # 0.0
print('Max intensity value: ', data_train[0][0].max().item())  # 1.0

简单的密集神经网络


本章介绍最简单的图像分类方法-全连接神经网络a fully-connected neural network,也叫做感知机perceptron

新建一个文件dense.py

加载辅助文件

首先加载一个辅助python文件pytorchcv.py,放到和dense.py同一级目录下,用于加载之前已经讲述过的内容:

从下述网站获得,

!wget https://raw.githubusercontent.com/MicrosoftDocs/pytorchfundamentals/main/computer-vision-pytorch/pytorchcv.py

具体内容如下:

# Script file to hide implementation details for PyTorch computer vision module

import builtins
import torch
import torch.nn as nn
from torch.utils import data
import torchvision
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import glob
import os
import zipfile

default_device = 'cuda' if torch.cuda.is_available() else 'cpu'


def load_mnist(batch_size=64):
    builtins.data_train = torchvision.datasets.MNIST('./data',
                                                     download=True, train=True, transform=ToTensor())
    builtins.data_test = torchvision.datasets.MNIST('./data',
                                                    download=True, train=False, transform=ToTensor())
    builtins.train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size)
    builtins.test_loader = torch.utils.data.DataLoader(data_test, batch_size=batch_size)


def train_epoch(net, dataloader, lr=0.01, optimizer=None, loss_fn=nn.NLLLoss()):
    optimizer = optimizer or torch.optim.Adam(net.parameters(), lr=lr)
    net.train()
    total_loss, acc, count = 0, 0, 0
    for features, labels in dataloader:
        optimizer.zero_grad()
        lbls = labels.to(default_device)
        out = net(features.to(default_device))
        loss = loss_fn(out, lbls)  # cross_entropy(out,labels)
        loss.backward()
        optimizer.step()
        total_loss += loss
        _, predicted = torch.max(out, 1)
        acc += (predicted == lbls).sum()
        count += len(labels)
    return total_loss.item() / count, acc.item() / count


def validate(net, dataloader, loss_fn=nn.NLLLoss()):
    net.eval()
    count, acc, loss = 0, 0, 0
    with torch.no_grad():
        for features, labels in dataloader:
            lbls = labels.to(default_device)
            out = net(features.to(default_device))
            loss += loss_fn(out, lbls)
            pred = torch.max(out, 1)[1]
            acc += (pred == lbls).sum()
            count += len(labels)
    return loss.item() / count, acc.item() / count


def train(net, train_loader, test_loader, optimizer=None, lr=0.01, epochs=10, loss_fn=nn.NLLLoss()):
    optimizer = optimizer or torch.optim.Adam(net.parameters(), lr=lr)
    res = {'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': []}
    for ep in range(epochs):
        tl, ta = train_epoch(net, train_loader, optimizer=optimizer, lr=lr, loss_fn=loss_fn)
        vl, va = validate(net, test_loader, loss_fn=loss_fn)
        print(f"Epoch {ep:2}, Train acc={ta:.3f}, Val acc={va:.3f}, Train loss={tl:.3f}, Val loss={vl:.3f}")
        res['train_loss'].append(tl)
        res['train_acc'].append(ta)
        res['val_loss'].append(vl)
        res['val_acc'].append(va)
    return res


def train_long(net, train_loader, test_loader, epochs=5, lr=0.01, optimizer=None, loss_fn=nn.NLLLoss(), print_freq=10):
    optimizer = optimizer or torch.optim.Adam(net.parameters(), lr=lr)
    for epoch in range(epochs):
        net.train()
        total_loss, acc, count = 0, 0, 0
        for i, (features, labels) in enumerate(train_loader):
            lbls = labels.to(default_device)
            optimizer.zero_grad()
            out = net(features.to(default_device))
            loss = loss_fn(out, lbls)
            loss.backward()
            optimizer.step()
            total_loss += loss
            _, predicted = torch.max(out, 1)
            acc += (predicted == lbls).sum()
            count += len(labels)
            if i % print_freq == 0:
                print("Epoch {}, minibatch {}: train acc = {}, train loss = {}".format(epoch, i, acc.item() / count,
                                                                                       total_loss.item() / count))
        vl, va = validate(net, test_loader, loss_fn)
        print("Epoch {} done, validation acc = {}, validation loss = {}".format(epoch, va, vl))


def plot_results(hist):
    plt.figure(figsize=(15, 5))
    plt.subplot(121)
    plt.plot(hist['train_acc'], label='Training acc')
    plt.plot(hist['val_acc'], label='Validation acc')
    plt.legend()
    plt.subplot(122)
    plt.plot(hist['train_loss'], label='Training loss')
    plt.plot(hist['val_loss'], label='Validation loss')
    plt.legend()


def plot_convolution(t, title=''):
    with torch.no_grad():
        c = nn.Conv2d(kernel_size=(3, 3), out_channels=1, in_channels=1)
        c.weight.copy_(t)
        fig, ax = plt.subplots(2, 6, figsize=(8, 3))
        fig.suptitle(title, fontsize=16)
        for i in range(5):
            im = data_train[i][0]
            ax[0][i].imshow(im[0])
            ax[1][i].imshow(c(im.unsqueeze(0))[0][0])
            ax[0][i].axis('off')
            ax[1][i].axis('off')
        ax[0, 5].imshow(t)
        ax[0, 5].axis('off')
        ax[1, 5].axis('off')
        # plt.tight_layout()
        plt.show()


def display_dataset(dataset, n=10, classes=None):
    fig, ax = plt.subplots(1, n, figsize=(15, 3))
    mn = min([dataset[i][0].min() for i in range(n)])
    mx = max([dataset[i][0].max() for i in range(n)])
    for i in range(n):
        ax[i].imshow(np.transpose((dataset[i][0] - mn) / (mx - mn), (1, 2, 0)))
        ax[i].axis('off')
        if classes:
            ax[i].set_title(classes[dataset[i][1]])


def check_image(fn):
    try:
        im = Image.open(fn)
        im.verify()
        return True
    except:
        return False


def check_image_dir(path):
    for fn in glob.glob(path):
        if not check_image(fn):
            print("Corrupt image: {}".format(fn))
            os.remove(fn)


def common_transform():
    std_normalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                     std=[0.229, 0.224, 0.225])
    trans = torchvision.transforms.Compose([
        torchvision.transforms.Resize(256),
        torchvision.transforms.CenterCrop(224),
        torchvision.transforms.ToTensor(),
        std_normalize])
    return trans


def load_cats_dogs_dataset():
    if not os.path.exists('data/PetImages'):
        with zipfile.ZipFile('data/kagglecatsanddogs_5340.zip', 'r') as zip_ref:
            zip_ref.extractall('data')

    check_image_dir('data/PetImages/Cat/*.jpg')
    check_image_dir('data/PetImages/Dog/*.jpg')

    dataset = torchvision.datasets.ImageFolder('data/PetImages', transform=common_transform())
    trainset, testset = torch.utils.data.random_split(dataset, [20000, len(dataset) - 20000])
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=32)
    testloader = torch.utils.data.DataLoader(trainset, batch_size=32)
    return dataset, trainloader, testloader

加载数据集和库:

创建批处理来加快训练速度

import torch
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
from torchinfo import summary
from torchvision.transforms import ToTensor
from pytorchcv import load_mnist, plot_results
from torch.utils import data
from torch.nn.functional import relu, log_softmax

# load_mnist()  # 下面四行和这个函数同样功能,但是不推荐直接修改builtins
data_train = torchvision.datasets.MNIST("./data", download=True, train=True, transform=ToTensor())
data_test = torchvision.datasets.MNIST("./data", download=True, train=False, transform=ToTensor())
train_loader = torch.utils.data.DataLoader(data_train, batch_size=64)
test_loader = torch.utils.data.DataLoader(data_test, batch_size=64)  # 64可以更大

最简单模型

  • 一个最简单模型就是一个全连接层(fully-connected layer),也叫做线性层(Linear layer)。当使用MNIST数据集时,输入是784(28 * 28)个,输出是10(0 - 9)个。之所以称为linear,因为它对其输入进行线性变换,可以定义为y=Wx+b,其中W是一个权重矩阵,b是偏置,W的大小为784×10。

  • 全连接层的输入是一维向量,因此需要将图片从1×28×28转变为784,需要使用扁平化Flatten函数

  • 因为全连接层的输出没有归一化到0到1之间,它不能被认为是概率。此外,如果想要输出不同数字的概率,它们都需要加起来为1。为了将输出向量转化为概率向量,Softmax函数经常被用作分类神经网络的最后一个激活函数。例如,softmax([−1,1,2])=[0.035,0.25,0.705]。在PyTorch中,通常使用LogSoftmax函数,它会计算输出概率的对数。为了将输出向量转换为实际概率,我们需要取torch.exp的输出。

这样得到模型:

net = nn.Sequential(
    nn.Flatten(),
    nn.Linear(784, 10),
    nn.LogSoftmax(dim=1))

模型训练

本章介绍训练一个epoch的函数

epoch:使用训练数据集进行一次训练称为一个epoch

训练的大致过程如下所示:

  1. 从输入数据集中取一批数据(64),计算了这个这批数据的预测结果。
  2. 这个结果和预期结果之间的差异是用“损失函数(Loss function)”来计算。损失函数显示了网络的输出与预期输出的差异,训练的目标是尽量减少损失。
  3. 我们计算这个损失函数相对于模型权重(参数)的梯度,然后用它来调整权重以优化网络的性能。调整量由一个称为学习率的参数控制,优化算法的细节在优化器对象中定义。
  4. 重复这些步骤,直到处理完整个数据集。下面是一个执行一次epoch训练的函数:
def train_epoch(net, dataloader, lr=0.01, optimizer=None, loss_fn=nn.NLLLoss()):
    optimizer = optimizer or torch.optim.Adam(net.parameters(), lr=lr)  # 优化器Adam
    net.train()  # 将网络切换到训练模式
    total_loss, acc, count = 0, 0, 0
    for features, labels in dataloader:  # 遍历数据集中的所有批次
        optimizer.zero_grad()
        out = net(features)  # 计算网络对这批(out)所做的预测
        loss = loss_fn(out, labels)  # cross_entropy(out,labels)
        loss.backward()  # 计算损失,即预测值与期望值之间的差异
        optimizer.step()  # 通过调整网络的权重来尽量减少损失
        total_loss += loss
        _, predicted = torch.max(out, 1)
        acc += (predicted == labels).sum()  # 计算正确预测案例的数量(准确性)
        count += len(labels)
    return total_loss.item() / count, acc.item() / count  # 损失率 正确率

a = train_epoch(net, train_loader)
print(a)
#(0.005937494913736979, 0.8927666666666667)

参数含义:

  • 神经网络
  • DataLoader,定义要训练的数据
  • Learning rate(lr)学习率:定义了网络学习的速度。在学习过程中,我们多次使用相同数据,每次都调整权重。如果学习率过高,新的值将覆盖旧的知识,网络将表现不佳。如果学习率太小,则会导致学习过程非常缓慢。
  • loss function (loss_fn)损失函数:这是一个衡量预期结果与网络产生的结果之间差异的函数。在大多数分类任务中都使用了NLLLoss,因此其被设置为默认值。
  • Optimizer优化器,它定义了一个优化算法。最传统的算法是随机梯度下降(stochastic gradient descent),但我们将使用一个更高级的版本,默认称为Adam。
  • 返回:我们返回测试数据集上的平均损失和准确率。

该函数计算并返回每个数据项的平均损失,以及训练准确率(猜对的案例百分比)。通过在训练过程中观察这种损失,可以看到网络是否正在改进并从提供的数据中学习。

验证准确性

使用测试集验证准确性

def validate(net, dataloader, loss_fn=nn.NLLLoss()):
    net.eval()
    count, acc, loss = 0, 0, 0
    with torch.no_grad():  # 不需要反向,因为不需要训练
        for features, labels in dataloader:
            out = net(features)
            loss += loss_fn(out, labels)  # 直接统计损失值即可
            pred = torch.max(out, 1)[1]  # 取out中最大的记录在pref,即概率最大的
            acc += (pred == labels).sum()  # 如果正确,则加1
            count += len(labels)
    return loss.item() / count, acc.item() / count


b = validate(net, test_loader)
print(b)
#(0.005853596115112305, 0.8942)

与训练函数类似,我们返回测试数据集上的平均损失和准确率。

过拟合

通常在训练神经网络时的一开始,训练和验证的准确性都提高,然而,在某些情况下,可能会发生训练精度提高而验证精度开始下降的情况。这将是一个过拟合的迹象,即模型在训练数据集上表现良好,但在新数据上表现不佳。

下面是训练函数,可以用来执行训练和验证。它打印每个epoch的训练和验证精度,并返回可用于在图上绘制损失和精度的历史。

def train(net,train_loader,test_loader,optimizer=None,lr=0.01,epochs=10,loss_fn=nn.NLLLoss()):
    optimizer = optimizer or torch.optim.Adam(net.parameters(),lr=lr)
    res = { 'train_loss' : [], 'train_acc': [], 'val_loss': [], 'val_acc': []}
    for ep in range(epochs):
        tl,ta = train_epoch(net,train_loader,optimizer=optimizer,lr=lr,loss_fn=loss_fn)
        vl,va = validate(net,test_loader,loss_fn=loss_fn)
        print(f"Epoch {ep:2}, Train acc={ta:.3f}, Val acc={va:.3f}, Train loss={tl:.3f}, Val loss={vl:.3f}")
        res['train_loss'].append(tl)
        res['train_acc'].append(ta)
        res['val_loss'].append(vl)
        res['val_acc'].append(va)
    return res
  
# Epoch  0, Train acc=0.893, Val acc=0.894, Train loss=0.006, Val loss=0.006
# Epoch  1, Train acc=0.910, Val acc=0.899, Train loss=0.005, Val loss=0.006
# Epoch  2, Train acc=0.913, Val acc=0.899, Train loss=0.005, Val loss=0.006
# Epoch  3, Train acc=0.915, Val acc=0.897, Train loss=0.005, Val loss=0.006
# Epoch  4, Train acc=0.916, Val acc=0.897, Train loss=0.005, Val loss=0.006


# 重新初始化网络以从头开始
net = nn.Sequential(
        nn.Flatten(), 
        nn.Linear(784,10), # 784 inputs, 10 outputs
        nn.LogSoftmax())

hist = train(net,train_loader,test_loader,epochs=5)

plt.figure(figsize=(15, 5))
plt.subplot(121)
plt.plot(hist['train_acc'], label='Training acc')
plt.plot(hist['val_acc'], label='Validation acc')
plt.legend()
plt.subplot(122)
plt.plot(hist['train_loss'], label='Training loss')
plt.plot(hist['val_loss'], label='Validation loss')
plt.legend()
plt.show()

结果:

在这里插入图片描述

多层感知机

为了进一步提高准确率,我们可能需要包含一个或多个隐藏层。事实上,可以用数学方法证明,如果一个网络只由一系列线性层组成,那么狭缝本质上相当于一个线性层。因此在层与层之间插入非线性函数是很重要的!
ReLU是一种最简单的激活函数,深度学习中使用的其他激活函数有sigmoid和tanh,但ReLU最常用于计算机视觉,因为它可以快速计算,使用其他函数不会带来任何显著的好处。ReLU的定义如下:

R e L U ( x ) = 0 ,   x < 0 ; R e L U ( x ) = x ,   x > = 0 ReLU(x) = 0,\ x < 0;\\ ReLU(x) = x,\ x >= 0 ReLU(x)=0, x<0;ReLU(x)=x, x>=0

net = nn.Sequential(
    nn.Flatten(),
    nn.Linear(784, 100),  # 784 inputs, 100 outputs
    nn.ReLU(),  # Activation Function
    nn.Linear(100, 10),  # 100 inputs, 10 outputs
    nn.LogSoftmax(dim=0))

summary(net, input_size=(1, 28, 28))  # 参数总量784×100+100+100×10+10=78500

输出:

==========================================================================================
Layer (type:depth-idx)                   Output Shape              Param #
==========================================================================================
Sequential                               [1, 10]                   --
├─Flatten: 1-1                           [1, 784]                  --
├─Linear: 1-2                            [1, 100]                  78,500
├─ReLU: 1-3                              [1, 100]                  --
├─Linear: 1-4                            [1, 10]                   1,010
├─LogSoftmax: 1-5                        [1, 10]                   --
==========================================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
Total mult-adds (M): 0.08
==========================================================================================
Input size (MB): 0.00
Forward/backward pass size (MB): 0.00
Params size (MB): 0.32
Estimated Total Size (MB): 0.32
==========================================================================================

另一种建模方式

class MyNet(nn.Module):
    def __init__(self):
        super(MyNet, self).__init__()
        self.flatten = nn.Flatten()
        self.hidden = nn.Linear(784, 100)
        self.out = nn.Linear(100, 10)

    def forward(self, x):
        x = self.flatten(x)
        x = self.hidden(x)
        x = relu(x)  # !
        x = self.out(x)
        x = log_softmax(x, dim=0)  # !
        return x


net = MyNet()

summary(net, input_size=(1, 28, 28), device='cpu')

输出,不包括隐层:

==========================================================================================
Layer (type:depth-idx)                   Output Shape              Param #
==========================================================================================
MyNet                                    [1, 10]                   --
├─Flatten: 1-1                           [1, 784]                  --
├─Linear: 1-2                            [1, 100]                  78,500
├─Linear: 1-3                            [1, 10]                   1,010
==========================================================================================
Total params: 79,510
Trainable params: 79,510
Non-trainable params: 0
Total mult-adds (M): 0.08
==========================================================================================
Input size (MB): 0.00
Forward/backward pass size (MB): 0.00
Params size (MB): 0.32
Estimated Total Size (MB): 0.32
==========================================================================================

使用隐层后准确率的变化

net = MyNet()

summary(net, input_size=(1, 28, 28), device='cpu')

hist = train(net, train_loader, test_loader, epochs=5)
plot_results(hist)
plt.show()

在这里插入图片描述

代码总结:

import torch
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
from torchinfo import summary
from torchvision.transforms import ToTensor
from pytorchcv import load_mnist, plot_results
from torch.utils import data
from torch.nn.functional import relu, log_softmax

# load_mnist()  # 下面四行和这个函数同样功能,但是不推荐直接修改builtins

data_train = torchvision.datasets.MNIST("./data", download=True, train=True, transform=ToTensor())
data_test = torchvision.datasets.MNIST("./data", download=True, train=False, transform=ToTensor())
train_loader = torch.utils.data.DataLoader(data_train, batch_size=64)
test_loader = torch.utils.data.DataLoader(data_test, batch_size=64)  # we can use larger batch size for testing

net = nn.Sequential(
    nn.Flatten(),
    nn.Linear(784, 10),  # 784 inputs, 10 outputs
    nn.LogSoftmax(dim=1))


# print('Digit to be predicted: ', data_train[0][1])
# print(torch.exp(net(data_train[0][0])))


def train_epoch(net, dataloader, lr=0.01, optimizer=None, loss_fn=nn.NLLLoss()):
    optimizer = optimizer or torch.optim.Adam(net.parameters(), lr=lr)
    net.train()  # 将网络切换到训练模式
    total_loss, acc, count = 0, 0, 0
    for features, labels in dataloader:  # 遍历数据集中的所有批次
        optimizer.zero_grad()
        out = net(features)  # 计算网络对这批(out)所做的预测
        loss = loss_fn(out, labels)  # cross_entropy(out,labels)
        loss.backward()  # 计算损失,即预测值与期望值之间的差异
        optimizer.step()  # 通过调整网络的权重来尽量减少损失
        total_loss += loss
        _, predicted = torch.max(out, 1)
        acc += (predicted == labels).sum()  # 计算正确预测案例的数量(准确性)
        count += len(labels)
    return total_loss.item() / count, acc.item() / count  # 损失率 正确率


a = train_epoch(net, train_loader)
print(a)


def validate(net, dataloader, loss_fn=nn.NLLLoss()):
    net.eval()
    count, acc, loss = 0, 0, 0
    with torch.no_grad():
        for features, labels in dataloader:
            out = net(features)
            loss += loss_fn(out, labels)
            pred = torch.max(out, 1)[1]
            acc += (pred == labels).sum()
            count += len(labels)
    return loss.item() / count, acc.item() / count


b = validate(net, test_loader)
print(b)


def train(net, train_loader, test_loader, optimizer=None, lr=0.01, epochs=10, loss_fn=nn.NLLLoss()):
    optimizer = optimizer or torch.optim.Adam(net.parameters(), lr=lr)
    res = {'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': []}
    for ep in range(epochs):
        tl, ta = train_epoch(net, train_loader, optimizer=optimizer, lr=lr, loss_fn=loss_fn)
        vl, va = validate(net, test_loader, loss_fn=loss_fn)
        print(f"Epoch {ep:2}, Train acc={ta:.3f}, Val acc={va:.3f}, Train loss={tl:.3f}, Val loss={vl:.3f}")
        res['train_loss'].append(tl)
        res['train_acc'].append(ta)
        res['val_loss'].append(vl)
        res['val_acc'].append(va)
    return res


# 重新初始化网络以从头开始
net = nn.Sequential(
    nn.Flatten(),
    nn.Linear(784, 10),  # 784 inputs, 10 outputs
    nn.LogSoftmax(dim=1))

hist = train(net, train_loader, test_loader, epochs=5)

plt.figure(figsize=(15, 5))
plt.subplot(121)
plt.plot(hist['train_acc'], label='Training acc')
plt.plot(hist['val_acc'], label='Validation acc')
plt.legend()
plt.subplot(122)
plt.plot(hist['train_loss'], label='Training loss')
plt.plot(hist['val_loss'], label='Validation loss')
plt.legend()
plt.show()

net = nn.Sequential(
    nn.Flatten(),
    nn.Linear(784, 100),  # 784 inputs, 100 outputs
    nn.ReLU(),  # Activation Function
    nn.Linear(100, 10),  # 100 inputs, 10 outputs
    nn.LogSoftmax(dim=0))

summary(net, input_size=(1, 28, 28))  # 参数总量784×100+100+100×10+10=78500


# any syntax 另一种定义模型的句法

class MyNet(nn.Module):
    def __init__(self):
        super(MyNet, self).__init__()
        self.flatten = nn.Flatten()
        self.hidden = nn.Linear(784, 100)
        self.out = nn.Linear(100, 10)

    def forward(self, x):
        x = self.flatten(x)
        x = self.hidden(x)
        x = relu(x)  # !
        x = self.out(x)
        x = log_softmax(x, dim=0)  # !
        return x


net = MyNet()

summary(net, input_size=(1, 28, 28), device='cpu')

hist = train(net, train_loader, test_loader, epochs=5)
plot_results(hist)
plt.show()

  • 69
    点赞
  • 17
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

whyte王

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值