构建ResNet-50模型 CIFAR-10 数据训练

该代码段展示了如何在PyTorch中构建ResNet-50模型,并用CIFAR-10数据集进行训练。首先检查是否有GPU可用,然后设置训练参数,如迭代次数、批次大小和学习率。接着,对图像进行预处理,加载CIFAR-10数据集,并定义ResNet-50的网络结构。模型在训练集上进行训练,期间动态调整学习率,并在测试集上评估模型性能。
摘要由CSDN通过智能技术生成
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms


# 判断是否有GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

num_epochs = 100 #50轮
batch_size = 100 #50步长
learning_rate = 0.01 #学习率0.01

# 图像预处理
transform = transforms.Compose([
                transforms.Pad(4),
                transforms.RandomHorizontalFlip(),
                transforms.RandomCrop(32),
                transforms.ToTensor()])

# CIFAR-10 数据集下载
train_dataset = torchvision.datasets.CIFAR10(root='../data/',
                        train=True, 
                        transform=transform,
                        download=True)

test_dataset = torchvision.datasets.CIFAR10(root='../data/',
                       train=False, 
                       transform=transforms.ToTensor())

# 数据载入
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                       batch_size=batch_size,
                       shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                      batch_size=batch_size,
                      shuffle=False)

class BasicBlock(nn.Module):
    """搭建BasicBlock模块"""
    expansion = 1

    def __init__(self, in_channel, out_channel, stride=1, downsample=None):
        super(BasicBlock, self).__init__()

        # 使用BN层是不需要使用bias的,bias最后会抵消掉
        self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, padding=1, stride=stride, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channel)    # BN层, BN层放在conv层和relu层中间使用
        self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size=3, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channel)

        self.downsample = downsample
        self.relu = nn.ReLU(inplace=True)
        
    # 前向传播
    def forward(self, X):
        identity = X
        Y = self.relu(self.bn1(self.conv1(X)))
        Y = self.bn2(self.conv2(Y))

        if self.downsample is not None:    # 保证原始输入X的size与主分支卷积后的输出size叠加时维度相同
            identity = self.downsample(X)

        return self.relu(Y + identity)
        """神经网络的前向传播函数:
            它接受一个输入张量X,然后通过一些卷积层和批量归一化层来计算输出张量Y。
            如果存在下采样层,它将对输入张量进行下采样以使其与输出张量的尺寸相同。
            最后,输出张量Y和输入张量X的恒等映射相加并通过ReLU激活函数进行激活。"""



class BottleNeck(nn.Module):
    """搭建BottleNeck模块"""
    # BottleNeck模块最终输出out_channel是Residual模块输入in_channel的size的4倍(Residual模块输入为64),shortcut分支in_channel
    # 为Residual的输入64,因此需要在shortcut分支上将Residual模块的in_channel扩张4倍,使之与原始输入图片X的size一致
    expansion = 4

    def __init__(self, in_channel, out_channel, stride=1, downsample=None):
        super(BottleNeck, self).__init__()
        # 默认原始输入为224,经过7x7层和3x3层之后BottleNeck的输入降至64
        self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channel)    # BN层, BN层放在conv层和relu层中间使用
        self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channel)
        self.conv3 = nn.Conv2d(out_channel, out_channel * self.expansion, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(out_channel * self.expansion)  # Residual中第三层out_channel扩张到in_channel的4倍

        self.downsample = downsample
        self.relu = nn.ReLU(inplace=True)

    # 前向传播
    def forward(self, X):
        identity = X

        Y = self.relu(self.bn1(self.conv1(X)))
        Y = self.relu(self.bn2(self.conv2(Y)))
        Y = self.bn3(self.conv3(Y))

        if self.downsample is not None:    # 保证原始输入X的size与主分支卷积后的输出size叠加时维度相同
            identity = self.downsample(X)

        return self.relu(Y + identity)


class ResNet(nn.Module):
    """搭建ResNet-layer通用框架"""
    # num_classes是训练集的分类个数,include_top是在ResNet的基础上搭建更加复杂的网络时用到,此处用不到
    def __init__(self, residual, num_residuals, num_classes=1000, include_top=True):
        super(ResNet, self).__init__()

        self.out_channel = 64    # 输出通道数(即卷积核个数),会生成与设定的输出通道数相同的卷积核个数
        self.include_top = include_top

        self.conv1 = nn.Conv2d(3, self.out_channel, kernel_size=7, stride=2, padding=3,
                               bias=False)    # 3表示输入特征图像的RGB通道数为3,即图片数据的输入通道为3
        self.bn1 = nn.BatchNorm2d(self.out_channel)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.conv2 = self.residual_block(residual, 64, num_residuals[0])
        self.conv3 = self.residual_block(residual, 128, num_residuals[1], stride=2)
        self.conv4 = self.residual_block(residual, 256, num_residuals[2], stride=2)
        self.conv5 = self.residual_block(residual, 512, num_residuals[3], stride=2)
        if self.include_top:
            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))    # output_size = (1, 1)
            self.fc = nn.Linear(512 * residual.expansion, num_classes)

        # 对conv层进行初始化操作
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') #fan_out保留了向后传递中权重的大小。
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def residual_block(self, residual, channel, num_residuals, stride=1):
        downsample = None

        # 用在每个conv_x组块的第一层的shortcut分支上,此时上个conv_x输出out_channel与本conv_x所要求的输入in_channel通道数不同,
        # 所以用downsample调整进行升维,使输出out_channel调整到本conv_x后续处理所要求的维度。
        # 同时stride=2进行下采样减小尺寸size,(注:conv2时没有进行下采样,conv3-5进行下采样,size=56、28、14、7)。
        if stride != 1 or self.out_channel != channel * residual.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.out_channel, channel * residual.expansion, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(channel * residual.expansion))

        block = []    # block列表保存某个conv_x组块里for循环生成的所有层
        # 添加每一个conv_x组块里的第一层,第一层决定此组块是否需要下采样(后续层不需要)
        block.append(residual(self.out_channel, channel, downsample=downsample, stride=stride))
        self.out_channel = channel * residual.expansion    # 输出通道out_channel扩张

        for _ in range(1, num_residuals):
            block.append(residual(self.out_channel, channel))

        # 非关键字参数的特征是一个星号*加上参数名,比如*number,定义后,number可以接收任意数量的参数,并将它们储存在一个tuple中
        return nn.Sequential(*block)

    # 前向传播
    def forward(self, X):
        Y = self.relu(self.bn1(self.conv1(X)))
        Y = self.maxpool(Y)
        Y = self.conv5(self.conv4(self.conv3(self.conv2(Y))))

        if self.include_top:
            Y = self.avgpool(Y)
            Y = torch.flatten(Y, 1)
            Y = self.fc(Y)

        return Y


# # 构建ResNet-34模型
# def resnet34(num_classes=1000, include_top=True):
#     return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
# # 

# 构建ResNet-50模型
def resnet50(num_classes=1000, include_top=True):
    return ResNet(BottleNeck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
#模型,优化器
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
model = resnet50().to(device)


# 损失函数
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# 更新学习率
def update_lr(optimizer, lr):    
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

# 训练数据集
total_step = len(train_loader)
curr_lr = learning_rate
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        images = images.to(device)
        labels = labels.to(device)
        
        # Forward pass
        outputs = model(images)
        loss = criterion(outputs, labels)
        
        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        if (i+1) % 100 == 0:
            print ("Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
                   .format(epoch+1, num_epochs, i+1, total_step, loss.item()))

    # 延迟学习率
    if (epoch+1) % 20 == 0:
        curr_lr /= 3
        update_lr(optimizer, curr_lr)

# 测试网络模型
model.eval()
with torch.no_grad():
    correct = 0
    total = 0
    for images, labels in test_loader:
        images = images.to(device)
        labels = labels.to(device)
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        
        correct += (predicted == labels).sum().item()

    print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))

# S将模型保存
torch.save(model.state_dict(), 'resnet.ckpt')

Files already downloaded and verified Epoch [1/100], Step [100/500] Loss: 3.4539 Epoch [1/100], Step [200/500] Loss: 2.2621 Epoch [1/100], Step [300/500] Loss: 2.1389 Epoch [1/100], Step [400/500] Loss: 1.9445 Epoch [1/100], Step [500/500] Loss: 1.9467 Epoch [2/100], Step [100/500] Loss: 1.8993 Epoch [2/100], Step [200/500] Loss: 1.6854 Epoch [2/100], Step [300/500] Loss: 1.6963 Epoch [2/100], Step [400/500] Loss: 1.9156 Epoch [2/100], Step [500/500] Loss: 1.7383 Epoch [3/100], Step [100/500] Loss: 1.5354 Epoch [3/100], Step [200/500] Loss: 1.8040 Epoch [3/100], Step [300/500] Loss: 1.5674 Epoch [3/100], Step [400/500] Loss: 1.5697 Epoch [3/100], Step [500/500] Loss: 1.5785 Epoch [4/100], Step [100/500] Loss: 1.4129 Epoch [4/100], Step [200/500] Loss: 1.2668 Epoch [4/100], Step [300/500] Loss: 1.3072 Epoch [4/100], Step [400/500] Loss: 1.2381 Epoch [4/100], Step [500/500] Loss: 1.4077 Epoch [5/100], Step [100/500] Loss: 1.2668 Epoch [5/100], Step [200/500] Loss: 1.2989 Epoch [5/100], Step [300/500] Loss: 1.4786 Epoch [5/100], Step [400/500] Loss: 1.3924 Epoch [5/100], Step [500/500] Loss: 1.2431 Epoch [6/100], Step [100/500] Loss: 1.3178 Epoch [6/100], Step [200/500] Loss: 0.9894 Epoch [6/100], Step [300/500] Loss: 1.2222 Epoch [6/100], Step [400/500] Loss: 0.9779 Epoch [6/100], Step [500/500] Loss: 1.2887 Epoch [7/100], Step [100/500] Loss: 1.1890 Epoch [7/100], Step [200/500] Loss: 1.1981 Epoch [7/100], Step [300/500] Loss: 1.3093 Epoch [7/100], Step [400/500] Loss: 1.2115 Epoch [7/100], Step [500/500] Loss: 0.9758 Epoch [8/100], Step [100/500] Loss: 0.8626 Epoch [8/100], Step [200/500] Loss: 1.2218 Epoch [8/100], Step [300/500] Loss: 0.9133 Epoch [8/100], Step [400/500] Loss: 0.9434 Epoch [8/100], Step [500/500] Loss: 0.8026 Epoch [9/100], Step [100/500] Loss: 1.0327 Epoch [9/100], Step [200/500] Loss: 0.9796 Epoch [9/100], Step [300/500] Loss: 1.0030 Epoch [9/100], Step [400/500] Loss: 0.9334 Epoch [9/100], Step [500/500] Loss: 0.7397 Epoch [10/100], Step [100/500] Loss: 0.9597 Epoch [10/100], Step [200/500] Loss: 0.8623 Epoch [10/100], Step [300/500] Loss: 0.7930 Epoch [10/100], Step [400/500] Loss: 0.8517 Epoch [10/100], Step [500/500] Loss: 0.8155 Epoch [11/100], Step [100/500] Loss: 0.8579 Epoch [11/100], Step [200/500] Loss: 0.7485 Epoch [11/100], Step [300/500] Loss: 1.0145 Epoch [11/100], Step [400/500] Loss: 0.7876 Epoch [11/100], Step [500/500] Loss: 1.0533 Epoch [12/100], Step [100/500] Loss: 0.8318 Epoch [12/100], Step [200/500] Loss: 0.8240 Epoch [12/100], Step [300/500] Loss: 0.6796 Epoch [12/100], Step [400/500] Loss: 0.9275 Epoch [12/100], Step [500/500] Loss: 0.7809 Epoch [13/100], Step [100/500] Loss: 0.7002 Epoch [13/100], Step [200/500] Loss: 0.6155 Epoch [13/100], Step [300/500] Loss: 0.7648 Epoch [13/100], Step [400/500] Loss: 0.7816 Epoch [13/100], Step [500/500] Loss: 0.8048 Epoch [14/100], Step [100/500] Loss: 0.6334 Epoch [14/100], Step [200/500] Loss: 0.6950 Epoch [14/100], Step [300/500] Loss: 0.8670 Epoch [14/100], Step [400/500] Loss: 0.8716 Epoch [14/100], Step [500/500] Loss: 0.6615 Epoch [15/100], Step [100/500] Loss: 0.6569 Epoch [15/100], Step [200/500] Loss: 0.6801 Epoch [15/100], Step [300/500] Loss: 0.6607 Epoch [15/100], Step [400/500] Loss: 0.6651 Epoch [15/100], Step [500/500] Loss: 0.7271 Epoch [16/100], Step [100/500] Loss: 0.8344 Epoch [16/100], Step [200/500] Loss: 0.5425 Epoch [16/100], Step [300/500] Loss: 0.7287 Epoch [16/100], Step [400/500] Loss: 0.7688 Epoch [16/100], Step [500/500] Loss: 0.8915 Epoch [17/100], Step [100/500] Loss: 0.6032 Epoch [17/100], Step [200/500] Loss: 0.7277 Epoch [17/100], Step [300/500] Loss: 0.6371 Epoch [17/100], Step [400/500] Loss: 0.6458 Epoch [17/100], Step [500/500] Loss: 0.7832 Epoch [18/100], Step [100/500] Loss: 0.5951 Epoch [18/100], Step [200/500] Loss: 0.7062 Epoch [18/100], Step [300/500] Loss: 0.6603 Epoch [18/100], Step [400/500] Loss: 0.7520 Epoch [18/100], Step [500/500] Loss: 0.6160 Epoch [19/100], Step [100/500] Loss: 0.4692 Epoch [19/100], Step [200/500] Loss: 0.5588 Epoch [19/100], Step [300/500] Loss: 0.5520 Epoch [19/100], Step [400/500] Loss: 0.6081 Epoch [19/100], Step [500/500] Loss: 0.6480 Epoch [20/100], Step [100/500] Loss: 0.5189 Epoch [20/100], Step [200/500] Loss: 0.4863 Epoch [20/100], Step [300/500] Loss: 0.5898 Epoch [20/100], Step [400/500] Loss: 0.5782 Epoch [20/100], Step [500/500] Loss: 0.5901 Epoch [21/100], Step [100/500] Loss: 0.4988 Epoch [21/100], Step [200/500] Loss: 0.4527 Epoch [21/100], Step [300/500] Loss: 0.5812 Epoch [21/100], Step [400/500] Loss: 0.3896 Epoch [21/100], Step [500/500] Loss: 0.4910 Epoch [22/100], Step [100/500] Loss: 0.4924 Epoch [22/100], Step [200/500] Loss: 0.4412 Epoch [22/100], Step [300/500] Loss: 0.5249 Epoch [22/100], Step [400/500] Loss: 0.5038 Epoch [22/100], Step [500/500] Loss: 0.3495 Epoch [23/100], Step [100/500] Loss: 0.5108 Epoch [23/100], Step [200/500] Loss: 0.4158 Epoch [23/100], Step [300/500] Loss: 0.4891 Epoch [23/100], Step [400/500] Loss: 0.6744 Epoch [23/100], Step [500/500] Loss: 0.4396 Epoch [24/100], Step [100/500] Loss: 0.4881 Epoch [24/100], Step [200/500] Loss: 0.4668 Epoch [24/100], Step [300/500] Loss: 0.4505 Epoch [24/100], Step [400/500] Loss: 0.4979 Epoch [24/100], Step [500/500] Loss: 0.3183 Epoch [25/100], Step [100/500] Loss: 0.2881 Epoch [25/100], Step [200/500] Loss: 0.4885 Epoch [25/100], Step [300/500] Loss: 0.4768 Epoch [25/100], Step [400/500] Loss: 0.3228 Epoch [25/100], Step [500/500] Loss: 0.3627 Epoch [26/100], Step [100/500] Loss: 0.3453 Epoch [26/100], Step [200/500] Loss: 0.3511 Epoch [26/100], Step [300/500] Loss: 0.2819 Epoch [26/100], Step [400/500] Loss: 0.3490 Epoch [26/100], Step [500/500] Loss: 0.2317 Epoch [27/100], Step [100/500] Loss: 0.5077 Epoch [27/100], Step [200/500] Loss: 0.3500 Epoch [27/100], Step [300/500] Loss: 0.3424 Epoch [27/100], Step [400/500] Loss: 0.2485 Epoch [27/100], Step [500/500] Loss: 0.3438 Epoch [28/100], Step [100/500] Loss: 0.3677 Epoch [28/100], Step [200/500] Loss: 0.5225 Epoch [28/100], Step [300/500] Loss: 0.3481 Epoch [28/100], Step [400/500] Loss: 0.4037 Epoch [28/100], Step [500/500] Loss: 0.3270 Epoch [29/100], Step [100/500] Loss: 0.2967 Epoch [29/100], Step [200/500] Loss: 0.3484 Epoch [29/100], Step [300/500] Loss: 0.3282 Epoch [29/100], Step [400/500] Loss: 0.3839 Epoch [29/100], Step [500/500] Loss: 0.2768 Epoch [30/100], Step [100/500] Loss: 0.3504 Epoch [30/100], Step [200/500] Loss: 0.2952 Epoch [30/100], Step [300/500] Loss: 0.4796 Epoch [30/100], Step [400/500] Loss: 0.2876 Epoch [30/100], Step [500/500] Loss: 0.1987 Epoch [31/100], Step [100/500] Loss: 0.4509 Epoch [31/100], Step [200/500] Loss: 0.2392 Epoch [31/100], Step [300/500] Loss: 0.3292 Epoch [31/100], Step [400/500] Loss: 0.2602 Epoch [31/100], Step [500/500] Loss: 0.4963 Epoch [32/100], Step [100/500] Loss: 0.2827 Epoch [32/100], Step [200/500] Loss: 0.1985 Epoch [32/100], Step [300/500] Loss: 0.4416 Epoch [32/100], Step [400/500] Loss: 0.3012 Epoch [32/100], Step [500/500] Loss: 0.3387 Epoch [33/100], Step [100/500] Loss: 0.2040 Epoch [33/100], Step [200/500] Loss: 0.4050 Epoch [33/100], Step [300/500] Loss: 0.2010 Epoch [33/100], Step [400/500] Loss: 0.4876 Epoch [33/100], Step [500/500] Loss: 0.2457 Epoch [34/100], Step [100/500] Loss: 0.2729 Epoch [34/100], Step [200/500] Loss: 0.2987 Epoch [34/100], Step [300/500] Loss: 0.2874 Epoch [34/100], Step [400/500] Loss: 0.1924 Epoch [34/100], Step [500/500] Loss: 0.3674 Epoch [35/100], Step [100/500] Loss: 0.3355 Epoch [35/100], Step [200/500] Loss: 0.2429 Epoch [35/100], Step [300/500] Loss: 0.2122 Epoch [35/100], Step [400/500] Loss: 0.3697 Epoch [35/100], Step [500/500] Loss: 0.3998 Epoch [36/100], Step [100/500] Loss: 0.3161 Epoch [36/100], Step [200/500] Loss: 0.3284 Epoch [36/100], Step [300/500] Loss: 0.3442 Epoch [36/100], Step [400/500] Loss: 0.3382 Epoch [36/100], Step [500/500] Loss: 0.3456 Epoch [37/100], Step [100/500] Loss: 0.3286 Epoch [37/100], Step [200/500] Loss: 0.2446 Epoch [37/100], Step [300/500] Loss: 0.2823 Epoch [37/100], Step [400/500] Loss: 0.2959 Epoch [37/100], Step [500/500] Loss: 0.2430 Epoch [38/100], Step [100/500] Loss: 0.1831 Epoch [38/100], Step [200/500] Loss: 0.2889 Epoch [38/100], Step [300/500] Loss: 0.2922 Epoch [38/100], Step [400/500] Loss: 0.2054 Epoch [38/100], Step [500/500] Loss: 0.1347 Epoch [39/100], Step [100/500] Loss: 0.2637 Epoch [39/100], Step [200/500] Loss: 0.2637 Epoch [39/100], Step [300/500] Loss: 0.2813 Epoch [39/100], Step [400/500] Loss: 0.3248 Epoch [39/100], Step [500/500] Loss: 0.3057 Epoch [40/100], Step [100/500] Loss: 0.3299 Epoch [40/100], Step [200/500] Loss: 0.1976 Epoch [40/100], Step [300/500] Loss: 0.2750 Epoch [40/100], Step [400/500] Loss: 0.2294 Epoch [40/100], Step [500/500] Loss: 0.4134 Epoch [41/100], Step [100/500] Loss: 0.1358 Epoch [41/100], Step [200/500] Loss: 0.1992 Epoch [41/100], Step [300/500] Loss: 0.2670 Epoch [41/100], Step [400/500] Loss: 0.1885 Epoch [41/100], Step [500/500] Loss: 0.2023 Epoch [42/100], Step [100/500] Loss: 0.2311 Epoch [42/100], Step [200/500] Loss: 0.1840 Epoch [42/100], Step [300/500] Loss: 0.1439 Epoch [42/100], Step [400/500] Loss: 0.0960 Epoch [42/100], Step [500/500] Loss: 0.1500 Epoch [43/100], Step [100/500] Loss: 0.1447 Epoch [43/100], Step [200/500] Loss: 0.2738 Epoch [43/100], Step [300/500] Loss: 0.2176 Epoch [43/100], Step [400/500] Loss: 0.1427 Epoch [43/100], Step [500/500] Loss: 0.3577 Epoch [44/100], Step [100/500] Loss: 0.2053 Epoch [44/100], Step [200/500] Loss: 0.2539 Epoch [44/100], Step [300/500] Loss: 0.1235 Epoch [44/100], Step [400/500] Loss: 0.1545 Epoch [44/100], Step [500/500] Loss: 0.1845 Epoch [45/100], Step [100/500] Loss: 0.1383 Epoch [45/100], Step [200/500] Loss: 0.1825 Epoch [45/100], Step [300/500] Loss: 0.1985 Epoch [45/100], Step [400/500] Loss: 0.1863 Epoch [45/100], Step [500/500] Loss: 0.2869 Epoch [46/100], Step [100/500] Loss: 0.1188 Epoch [46/100], Step [200/500] Loss: 0.2223 Epoch [46/100], Step [300/500] Loss: 0.1885 Epoch [46/100], Step [400/500] Loss: 0.2495 Epoch [46/100], Step [500/500] Loss: 0.1616 Epoch [47/100], Step [100/500] Loss: 0.1949 Epoch [47/100], Step [200/500] Loss: 0.1859 Epoch [47/100], Step [300/500] Loss: 0.1515 Epoch [47/100], Step [400/500] Loss: 0.1591 Epoch [47/100], Step [500/500] Loss: 0.2433 Epoch [48/100], Step [100/500] Loss: 0.1027 Epoch [48/100], Step [200/500] Loss: 0.2360 Epoch [48/100], Step [300/500] Loss: 0.1523 Epoch [48/100], Step [400/500] Loss: 0.1851 Epoch [48/100], Step [500/500] Loss: 0.2373 Epoch [49/100], Step [100/500] Loss: 0.2149 Epoch [49/100], Step [200/500] Loss: 0.1576 Epoch [49/100], Step [300/500] Loss: 0.2118 Epoch [49/100], Step [400/500] Loss: 0.1306 Epoch [49/100], Step [500/500] Loss: 0.2522 Epoch [50/100], Step [100/500] Loss: 0.1517 Epoch [50/100], Step [200/500] Loss: 0.1229 Epoch [50/100], Step [300/500] Loss: 0.2115 Epoch [50/100], Step [400/500] Loss: 0.1866 Epoch [50/100], Step [500/500] Loss: 0.1937 Epoch [51/100], Step [100/500] Loss: 0.0771 Epoch [51/100], Step [200/500] Loss: 0.1709 Epoch [51/100], Step [300/500] Loss: 0.0762 Epoch [51/100], Step [400/500] Loss: 0.2029 Epoch [51/100], Step [500/500] Loss: 0.2107 Epoch [52/100], Step [100/500] Loss: 0.1359 Epoch [52/100], Step [200/500] Loss: 0.1093 Epoch [52/100], Step [300/500] Loss: 0.2465 Epoch [52/100], Step [400/500] Loss: 0.1144 Epoch [52/100], Step [500/500] Loss: 0.1223 Epoch [53/100], Step [100/500] Loss: 0.2360 Epoch [53/100], Step [200/500] Loss: 0.2001 Epoch [53/100], Step [300/500] Loss: 0.1507 Epoch [53/100], Step [400/500] Loss: 0.1354 Epoch [53/100], Step [500/500] Loss: 0.0672 Epoch [54/100], Step [100/500] Loss: 0.1606 Epoch [54/100], Step [200/500] Loss: 0.1224 Epoch [54/100], Step [300/500] Loss: 0.1440 Epoch [54/100], Step [400/500] Loss: 0.1685 Epoch [54/100], Step [500/500] Loss: 0.0730 Epoch [55/100], Step [100/500] Loss: 0.1313 Epoch [55/100], Step [200/500] Loss: 0.1417 Epoch [55/100], Step [300/500] Loss: 0.1049 Epoch [55/100], Step [400/500] Loss: 0.1555 Epoch [55/100], Step [500/500] Loss: 0.1537 Epoch [56/100], Step [100/500] Loss: 0.1117 Epoch [56/100], Step [200/500] Loss: 0.1110 Epoch [56/100], Step [300/500] Loss: 0.1006 Epoch [56/100], Step [400/500] Loss: 0.0682 Epoch [56/100], Step [500/500] Loss: 0.0651 Epoch [57/100], Step [100/500] Loss: 0.1128 Epoch [57/100], Step [200/500] Loss: 0.1063 Epoch [57/100], Step [300/500] Loss: 0.1105 Epoch [57/100], Step [400/500] Loss: 0.1036 Epoch [57/100], Step [500/500] Loss: 0.1679 Epoch [58/100], Step [100/500] Loss: 0.1007 Epoch [58/100], Step [200/500] Loss: 0.1191 Epoch [58/100], Step [300/500] Loss: 0.1294 Epoch [58/100], Step [400/500] Loss: 0.2413 Epoch [58/100], Step [500/500] Loss: 0.0889 Epoch [59/100], Step [100/500] Loss: 0.0999 Epoch [59/100], Step [200/500] Loss: 0.0499 Epoch [59/100], Step [300/500] Loss: 0.1013 Epoch [59/100], Step [400/500] Loss: 0.1115 Epoch [59/100], Step [500/500] Loss: 0.1661 Epoch [60/100], Step [100/500] Loss: 0.1560 Epoch [60/100], Step [200/500] Loss: 0.1925 Epoch [60/100], Step [300/500] Loss: 0.1581 Epoch [60/100], Step [400/500] Loss: 0.1439 Epoch [60/100], Step [500/500] Loss: 0.1154 Epoch [61/100], Step [100/500] Loss: 0.2569 Epoch [61/100], Step [200/500] Loss: 0.1078 Epoch [61/100], Step [300/500] Loss: 0.0877 Epoch [61/100], Step [400/500] Loss: 0.1099 Epoch [61/100], Step [500/500] Loss: 0.0899 Epoch [62/100], Step [100/500] Loss: 0.0964 Epoch [62/100], Step [200/500] Loss: 0.1246 Epoch [62/100], Step [300/500] Loss: 0.0749 Epoch [62/100], Step [400/500] Loss: 0.0714 Epoch [62/100], Step [500/500] Loss: 0.0901 Epoch [63/100], Step [100/500] Loss: 0.0609 Epoch [63/100], Step [200/500] Loss: 0.1960 Epoch [63/100], Step [300/500] Loss: 0.1205 Epoch [63/100], Step [400/500] Loss: 0.0620 Epoch [63/100], Step [500/500] Loss: 0.1255 Epoch [64/100], Step [100/500] Loss: 0.1876 Epoch [64/100], Step [200/500] Loss: 0.1295 Epoch [64/100], Step [300/500] Loss: 0.0590 Epoch [64/100], Step [400/500] Loss: 0.0793 Epoch [64/100], Step [500/500] Loss: 0.1002 Epoch [65/100], Step [100/500] Loss: 0.2150 Epoch [65/100], Step [200/500] Loss: 0.0326 Epoch [65/100], Step [300/500] Loss: 0.1861 Epoch [65/100], Step [400/500] Loss: 0.1031 Epoch [65/100], Step [500/500] Loss: 0.1454 Epoch [66/100], Step [100/500] Loss: 0.0689 Epoch [66/100], Step [200/500] Loss: 0.0719 Epoch [66/100], Step [300/500] Loss: 0.0597 Epoch [66/100], Step [400/500] Loss: 0.0602 Epoch [66/100], Step [500/500] Loss: 0.1433 Epoch [67/100], Step [100/500] Loss: 0.0953 Epoch [67/100], Step [200/500] Loss: 0.1073 Epoch [67/100], Step [300/500] Loss: 0.0680 Epoch [67/100], Step [400/500] Loss: 0.0823 Epoch [67/100], Step [500/500] Loss: 0.0892 Epoch [68/100], Step [100/500] Loss: 0.2156 Epoch [68/100], Step [200/500] Loss: 0.0446 Epoch [68/100], Step [300/500] Loss: 0.1711 Epoch [68/100], Step [400/500] Loss: 0.1158 Epoch [68/100], Step [500/500] Loss: 0.1250 Epoch [69/100], Step [100/500] Loss: 0.0426 Epoch [69/100], Step [200/500] Loss: 0.0771 Epoch [69/100], Step [300/500] Loss: 0.0550 Epoch [69/100], Step [400/500] Loss: 0.0578 Epoch [69/100], Step [500/500] Loss: 0.1375 Epoch [70/100], Step [100/500] Loss: 0.1879 Epoch [70/100], Step [200/500] Loss: 0.0512 Epoch [70/100], Step [300/500] Loss: 0.1090 Epoch [70/100], Step [400/500] Loss: 0.0961 Epoch [70/100], Step [500/500] Loss: 0.0386 Epoch [71/100], Step [100/500] Loss: 0.0693 Epoch [71/100], Step [200/500] Loss: 0.1158 Epoch [71/100], Step [300/500] Loss: 0.0912 Epoch [71/100], Step [400/500] Loss: 0.0801 Epoch [71/100], Step [500/500] Loss: 0.0794 Epoch [72/100], Step [100/500] Loss: 0.0783 Epoch [72/100], Step [200/500] Loss: 0.1552 Epoch [72/100], Step [300/500] Loss: 0.0723 Epoch [72/100], Step [400/500] Loss: 0.1312 Epoch [72/100], Step [500/500] Loss: 0.1046 Epoch [73/100], Step [100/500] Loss: 0.1013 Epoch [73/100], Step [200/500] Loss: 0.1167 Epoch [73/100], Step [300/500] Loss: 0.1507 Epoch [73/100], Step [400/500] Loss: 0.0667 Epoch [73/100], Step [500/500] Loss: 0.1218 Epoch [74/100], Step [100/500] Loss: 0.0567 Epoch [74/100], Step [200/500] Loss: 0.2422 Epoch [74/100], Step [300/500] Loss: 0.1259 Epoch [74/100], Step [400/500] Loss: 0.1205 Epoch [74/100], Step [500/500] Loss: 0.0674 Epoch [75/100], Step [100/500] Loss: 0.0994 Epoch [75/100], Step [200/500] Loss: 0.1299 Epoch [75/100], Step [300/500] Loss: 0.0912 Epoch [75/100], Step [400/500] Loss: 0.1207 Epoch [75/100], Step [500/500] Loss: 0.0578 Epoch [76/100], Step [100/500] Loss: 0.0552 Epoch [76/100], Step [200/500] Loss: 0.0384 Epoch [76/100], Step [300/500] Loss: 0.0857 Epoch [76/100], Step [400/500] Loss: 0.0657 Epoch [76/100], Step [500/500] Loss: 0.0637 Epoch [77/100], Step [100/500] Loss: 0.0678 Epoch [77/100], Step [200/500] Loss: 0.0634 Epoch [77/100], Step [300/500] Loss: 0.0950 Epoch [77/100], Step [400/500] Loss: 0.0603 Epoch [77/100], Step [500/500] Loss: 0.0629 Epoch [78/100], Step [100/500] Loss: 0.0983 Epoch [78/100], Step [200/500] Loss: 0.0542 Epoch [78/100], Step [300/500] Loss: 0.0846 Epoch [78/100], Step [400/500] Loss: 0.0606 Epoch [78/100], Step [500/500] Loss: 0.1125 Epoch [79/100], Step [100/500] Loss: 0.0699 Epoch [79/100], Step [200/500] Loss: 0.0235 Epoch [79/100], Step [300/500] Loss: 0.0966 Epoch [79/100], Step [400/500] Loss: 0.0849 Epoch [79/100], Step [500/500] Loss: 0.0624 Epoch [80/100], Step [100/500] Loss: 0.0660 Epoch [80/100], Step [200/500] Loss: 0.1813 Epoch [80/100], Step [300/500] Loss: 0.0333 Epoch [80/100], Step [400/500] Loss: 0.0469 Epoch [80/100], Step [500/500] Loss: 0.1246 Epoch [81/100], Step [100/500] Loss: 0.0513 Epoch [81/100], Step [200/500] Loss: 0.0651 Epoch [81/100], Step [300/500] Loss: 0.1127 Epoch [81/100], Step [400/500] Loss: 0.0491 Epoch [81/100], Step [500/500] Loss: 0.0271 Epoch [82/100], Step [100/500] Loss: 0.0385 Epoch [82/100], Step [200/500] Loss: 0.0877 Epoch [82/100], Step [300/500] Loss: 0.0868 Epoch [82/100], Step [400/500] Loss: 0.0862 Epoch [82/100], Step [500/500] Loss: 0.0514 Epoch [83/100], Step [100/500] Loss: 0.1079 Epoch [83/100], Step [200/500] Loss: 0.1079 Epoch [83/100], Step [300/500] Loss: 0.0810 Epoch [83/100], Step [400/500] Loss: 0.2013 Epoch [83/100], Step [500/500] Loss: 0.1534 Epoch [84/100], Step [100/500] Loss: 0.0816 Epoch [84/100], Step [200/500] Loss: 0.0512 Epoch [84/100], Step [300/500] Loss: 0.0975 Epoch [84/100], Step [400/500] Loss: 0.0453 Epoch [84/100], Step [500/500] Loss: 0.0597 Epoch [85/100], Step [100/500] Loss: 0.0694 Epoch [85/100], Step [200/500] Loss: 0.0334 Epoch [85/100], Step [300/500] Loss: 0.0552 Epoch [85/100], Step [400/500] Loss: 0.0376 Epoch [85/100], Step [500/500] Loss: 0.0983 Epoch [86/100], Step [100/500] Loss: 0.1971 Epoch [86/100], Step [200/500] Loss: 0.0499 Epoch [86/100], Step [300/500] Loss: 0.1320 Epoch [86/100], Step [400/500] Loss: 0.0893 Epoch [86/100], Step [500/500] Loss: 0.0165 Epoch [87/100], Step [100/500] Loss: 0.0550 Epoch [87/100], Step [200/500] Loss: 0.1400 Epoch [87/100], Step [300/500] Loss: 0.1504 Epoch [87/100], Step [400/500] Loss: 0.1126 Epoch [87/100], Step [500/500] Loss: 0.0849 Epoch [88/100], Step [100/500] Loss: 0.0722 Epoch [88/100], Step [200/500] Loss: 0.0432 Epoch [88/100], Step [300/500] Loss: 0.0264 Epoch [88/100], Step [400/500] Loss: 0.0291 Epoch [88/100], Step [500/500] Loss: 0.0781 Epoch [89/100], Step [100/500] Loss: 0.0334 Epoch [89/100], Step [200/500] Loss: 0.0843 Epoch [89/100], Step [300/500] Loss: 0.0124 Epoch [89/100], Step [400/500] Loss: 0.1015 Epoch [89/100], Step [500/500] Loss: 0.0411 Epoch [90/100], Step [100/500] Loss: 0.0632 Epoch [90/100], Step [200/500] Loss: 0.0322 Epoch [90/100], Step [300/500] Loss: 0.0825 Epoch [90/100], Step [400/500] Loss: 0.0725 Epoch [90/100], Step [500/500] Loss: 0.0681 Epoch [91/100], Step [100/500] Loss: 0.0541 Epoch [91/100], Step [200/500] Loss: 0.0671 Epoch [91/100], Step [300/500] Loss: 0.0544 Epoch [91/100], Step [400/500] Loss: 0.1105 Epoch [91/100], Step [500/500] Loss: 0.1484 Epoch [92/100], Step [100/500] Loss: 0.0360 Epoch [92/100], Step [200/500] Loss: 0.0634 Epoch [92/100], Step [300/500] Loss: 0.0755 Epoch [92/100], Step [400/500] Loss: 0.0541 Epoch [92/100], Step [500/500] Loss: 0.0423 Epoch [93/100], Step [100/500] Loss: 0.1114 Epoch [93/100], Step [200/500] Loss: 0.1141 Epoch [93/100], Step [300/500] Loss: 0.0463 Epoch [93/100], Step [400/500] Loss: 0.0178 Epoch [93/100], Step [500/500] Loss: 0.0927 Epoch [94/100], Step [100/500] Loss: 0.0761 Epoch [94/100], Step [200/500] Loss: 0.0592 Epoch [94/100], Step [300/500] Loss: 0.0156 Epoch [94/100], Step [400/500] Loss: 0.1192 Epoch [94/100], Step [500/500] Loss: 0.0439 Epoch [95/100], Step [100/500] Loss: 0.0184 Epoch [95/100], Step [200/500] Loss: 0.0842 Epoch [95/100], Step [300/500] Loss: 0.1108 Epoch [95/100], Step [400/500] Loss: 0.0444 Epoch [95/100], Step [500/500] Loss: 0.0460 Epoch [96/100], Step [100/500] Loss: 0.0723 Epoch [96/100], Step [200/500] Loss: 0.0646 Epoch [96/100], Step [300/500] Loss: 0.0748 Epoch [96/100], Step [400/500] Loss: 0.2077 Epoch [96/100], Step [500/500] Loss: 0.0878 Epoch [97/100], Step [100/500] Loss: 0.0183 Epoch [97/100], Step [200/500] Loss: 0.1441 Epoch [97/100], Step [300/500] Loss: 0.0589 Epoch [97/100], Step [400/500] Loss: 0.0780 Epoch [97/100], Step [500/500] Loss: 0.0795 Epoch [98/100], Step [100/500] Loss: 0.0525 Epoch [98/100], Step [200/500] Loss: 0.0571 Epoch [98/100], Step [300/500] Loss: 0.0585 Epoch [98/100], Step [400/500] Loss: 0.1118 Epoch [98/100], Step [500/500] Loss: 0.0249 Epoch [99/100], Step [100/500] Loss: 0.0374 Epoch [99/100], Step [200/500] Loss: 0.0762 Epoch [99/100], Step [300/500] Loss: 0.0949 Epoch [99/100], Step [400/500] Loss: 0.1005 Epoch [99/100], Step [500/500] Loss: 0.0400 Epoch [100/100], Step [100/500] Loss: 0.0816 Epoch [100/100], Step [200/500] Loss: 0.0797 Epoch [100/100], Step [300/500] Loss: 0.0366 Epoch [100/100], Step [400/500] Loss: 0.0397 Epoch [100/100], Step [500/500] Loss: 0.0542 Accuracy of the model on the test images: 86.67 %

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值