pytorch代码总结

一.实现对MNIST数据集的分类问题的代码:

import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

#prepare data
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))])#ToTensor()是为了将PIL转化为Tensor类型  Normalize()是归一化,通过均值与方差将数值转化为[0,1]

train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size = batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size = batch_size)

#design modle using class
class Net(torch.nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.l1 = torch.nn.Linear(784,512)
        self.l2 = torch.nn.Linear(512, 256)
        self.l3 = torch.nn.Linear(256, 128)
        self.l4 = torch.nn.Linear(128, 64)
        self.l5 = torch.nn.Linear(64, 10)

    def forward(self,x):
        x = x.view(-1,784)  #将x从1×28×28 转化为1×784的张量,-1就是为了自动获取mini——batch
        x = F.relu(self.l1(x))  #运用relu函数进行非线性变换
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return self.l5(x)   #最后一层不做激活,直接当成inputs输入softmax层

model = Net()

#construct criterion and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(),lr=0.01,momentum=0.5)

#training cycle

#将train函数封装起来
def train(epoch):
    running_loss = 0.0
    for batch_idx,data in enumerate(train_dataloader,0):
        #获得一个mini——batch的输入与标签
        inputs,target = data
        optimizer.zero_grad()#直接将优化器中的梯度清零
        #通过模型预测结果(64,10)
        outputs = model(inputs)
        #交叉熵代价函数outputs(64,10),target(64)计算损失
        loss = criterion(outputs,target)
        #反向传播
        loss.backward()
        #更新参数
        optimizer.step()

        #计算总损失
        running_loss+=loss
        #300次打印一次平均损失
        if batch_idx%300 == 299:
            print('[%d,%5d] loss: %.3f' % (epoch+1,batch_idx+1,running_loss/300))
            running_loss=0.0#重新置为0.0

#将test函数封装起来
def test():
    correct = 0#预测正确结果数
    total = 0 #总共的test数量
    with torch.no_grad(): #不计算grad,防止形成计算图
        for data in test_dataloader:
            images,labels = data
            outputs = model(images)
            #其其某一列最大的为其预测值
            _,predicted = torch.max(outputs.data,dim=1)# dim = 1 列是第0个维度,行是第1个维度
            total += labels.size(0)#计算样本数
            correct += (predicted == labels).sum().item()#张量之间的比较运算,记录标签与预测结果相同的个数
    print('accuracy on test set: %d %%' % (100*correct/total))

if __name__ == '__main__':
    for epoch in range(10):
        print(f"第{epoch+1}轮训练开始了!")
        train(epoch)
        test()

只采取线性层最终的准确率能达到97%,可以通过修改训练轮数,以及优化器算法和学习率提升准确率,但是参照网上一些网络结构,分别采取了以下几种方法:①运用inception层,

#1.运用inception层
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

# prepare dataset

batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])  # 归一化,均值和方差

train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)


# design model using class

#定义的是一个Innception网络层
class InceptionA(torch.nn.Module):
    def __init__(self,in_channels):
        super(InceptionA, self).__init__()
        #共有4层分层的网络
        #1 由平均池化层和1×1的卷积层(24)组成
        self.branch_pool = torch.nn.Conv2d(in_channels,24,kernel_size=1)

        #2由1×1的卷积层(16)
        self.branch1x1 = torch.nn.Conv2d(in_channels,16,kernel_size=1)

        #3由1x1的卷积层(16),5x5的卷积层(24)组成
        self.branch5x5_1 = torch.nn.Conv2d(in_channels,16,kernel_size=1)
        self.branch5x5_2 = torch.nn.Conv2d(16,24,kernel_size=5,padding=2)

        #4由1x1卷积层(16),3x3卷积层(24),3x3卷积层(24)组成
        self.branch3x3_1 = torch.nn.Conv2d(in_channels,16,kernel_size=1)
        self.branch3x3_2 = torch.nn.Conv2d(16,24,kernel_size=3,padding=1)
        self.branch3x3_3 = torch.nn.Conv2d(24, 24, kernel_size=3, padding=1)

    def forward(self, x):
        #Flatten data from(n,1,28,28) to (n,784)
        #1得出的结果
        branch1x1 = self.branch1x1(x)

        # 2得出的结果
        branch5x5 = self.branch5x5_1(x)
        branch5x5 = self.branch5x5_2(branch5x5)

        #3得出的结果
        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)
        branch3x3 = self.branch3x3_3(branch3x3)

        #4得出的结果
        branch_pool = F.avg_pool2d(x,kernel_size=3,stride=1,padding=1)
        branch_pool = self.branch_pool(branch_pool)

        outputs = [branch1x1,branch5x5,branch3x3,branch_pool]
        return torch.cat(outputs,dim=1)# 将其拼接起来,沿着dim=1的维度,b,c,w,h,c对应的是dim=1

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1,10,kernel_size=5)#第一层卷积核
        self.conv2 = torch.nn.Conv2d(88, 20, kernel_size=5)#第二层卷积核 88=3x24+16

        self.incep1 = InceptionA(in_channels=10)#与conv1的10对应
        self.incep2 = InceptionA(in_channels=20)#与conv2的20对应

        self.mp = torch.nn.MaxPool2d(2)#最大池化层2x2

        self.fc = torch.nn.Linear(1408,10)#线性函数1408-10



    def forward(self, x):
        in_size = x.size(0)
        x = F.relu(self.mp(self.conv1(x)))
        x = self.incep1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.incep2(x)
        x = x.view(in_size,-1)
        x = self.fc(x)
        return x # 最后一层不做激活,不进行非线性变换


model = Net()

# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)


# training cycle forward, backward, update


def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        # 获得一个批次的数据和标签
        inputs, target = data
        optimizer.zero_grad()
        # 获得模型预测结果(64, 10)
        outputs = model(inputs)
        # 交叉熵代价函数outputs(64,10),target(64)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)  # dim = 1 列是第0个维度,行是第1个维度
            total += labels.size(0)
            correct += (predicted == labels).sum().item()  # 张量之间的比较运算
    print('accuracy on test set: %d %% [%d/%d]' % (100 * correct / total,correct,total))


if __name__ == '__main__':
    for epoch in range(10):
        train(epoch)
        test()

②运用residual层

#2.运用residual层
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

# prepare dataset

batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])  # 归一化,均值和方差

train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)


# design model using class

#定义一个residualblock层
class ResidulBlock(torch.nn.Module):
    def __init__(self,channels):
        super(ResidulBlock,self).__init__()
        self.channels = channels
        self.conv1 = torch.nn.Conv2d(channels,channels,kernel_size=3,padding=1)#让其大小并未改变
        self.conv2 = torch.nn.Conv2d(channels, channels, kernel_size=3, padding=1)

    def forward(self,x):
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        return F.relu(x+y)

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1,16,kernel_size=5)#第一层卷积核
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)#第二层卷积核

        self.rblock1 = ResidulBlock(16)#与conv1的16对应
        self.rblock2 = ResidulBlock(32)#与conv2的32对应

        self.mp = torch.nn.MaxPool2d(2)#最大池化层2x2

        self.fc = torch.nn.Linear(512,10)#线性函数512-10



    def forward(self, x):
        in_size = x.size(0)

        x = F.relu(self.mp(self.conv1(x)))
        x = self.rblock1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.rblock2(x)

        x = x.view(in_size,-1)#将其进行展开
        x = self.fc(x)
        return x # 最后一层不做激活,不进行非线性变换


model = Net()

# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)


# training cycle forward, backward, update


def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        # 获得一个批次的数据和标签
        inputs, target = data
        optimizer.zero_grad()
        # 获得模型预测结果(64, 10)
        outputs = model(inputs)
        # 交叉熵代价函数outputs(64,10),target(64)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)  # dim = 1 列是第0个维度,行是第1个维度
            total += labels.size(0)
            correct += (predicted == labels).sum().item()  # 张量之间的比较运算
    print('accuracy on test set: %d %% [%d/%d]' % (100 * correct / total,correct,total))


if __name__ == '__main__':
    for epoch in range(10):
        train(epoch)
        test()

,③对residual层采取0.5(x+y)的优化,

#3.采用优化0.5x+0.5y
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

# prepare dataset

batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])  # 归一化,均值和方差

train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)


# design model using class

#定义一个residualblock层
class ResidulBlock(torch.nn.Module):
    def __init__(self,channels):
        super(ResidulBlock,self).__init__()
        self.channels = channels
        self.conv1 = torch.nn.Conv2d(channels,channels,kernel_size=3,padding=1)#让其大小并未改变
        self.conv2 = torch.nn.Conv2d(channels, channels, kernel_size=3, padding=1)

    def forward(self,x):
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        return 0.5*F.relu(x+y)

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1,16,kernel_size=5)#第一层卷积核
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)#第二层卷积核

        self.rblock1 = ResidulBlock(16)#与conv1的16对应
        self.rblock2 = ResidulBlock(32)#与conv2的32对应

        self.mp = torch.nn.MaxPool2d(2)#最大池化层2x2

        self.fc = torch.nn.Linear(512,10)#线性函数512-10



    def forward(self, x):
        in_size = x.size(0)

        x = F.relu(self.mp(self.conv1(x)))
        x = self.rblock1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.rblock2(x)

        x = x.view(in_size,-1)
        x = self.fc(x)
        return x # 最后一层不做激活,不进行非线性变换


model = Net()

# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)


# training cycle forward, backward, update


def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        # 获得一个批次的数据和标签
        inputs, target = data
        optimizer.zero_grad()
        # 获得模型预测结果(64, 10)
        outputs = model(inputs)
        # 交叉熵代价函数outputs(64,10),target(64)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)  # dim = 1 列是第0个维度,行是第1个维度
            total += labels.size(0)
            correct += (predicted == labels).sum().item()  # 张量之间的比较运算
    print('accuracy on test set: %d %% [%d/%d]' % (100 * correct / total,correct,total))


if __name__ == '__main__':
    for epoch in range(10):
        train(epoch)
        test()

④采用对residual层采取对x进行1×1卷积层再与y相加。其最高准确率能够达到99%,代码如下:

#4.采用优化对x进行一次1x1卷积
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

# prepare dataset

batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])  # 归一化,均值和方差

train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)


# design model using class

#定义一个residualblock层
class ResidulBlock(torch.nn.Module):
    def __init__(self,channels):
        super(ResidulBlock,self).__init__()
        self.channels = channels
        self.conv1 = torch.nn.Conv2d(channels,channels,kernel_size=3,padding=1)#让其大小并未改变
        self.conv2 = torch.nn.Conv2d(channels, channels, kernel_size=3, padding=1)
        self.conv3 = torch.nn.Conv2d(channels,channels,kernel_size=1)

    def forward(self,x):
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        x = self.conv3(x)
        return F.relu(x+y)

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1,16,kernel_size=5)#第一层卷积核
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)#第二层卷积核

        self.rblock1 = ResidulBlock(16)#与conv1的16对应
        self.rblock2 = ResidulBlock(32)#与conv2的32对应

        self.mp = torch.nn.MaxPool2d(2)#最大池化层2x2

        self.fc = torch.nn.Linear(512,10)#线性函数1408-10



    def forward(self, x):
        in_size = x.size(0)

        x = F.relu(self.mp(self.conv1(x)))
        x = self.rblock1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.rblock2(x)

        x = x.view(in_size,-1)
        x = self.fc(x)
        return x # 最后一层不做激活,不进行非线性变换


model = Net()

# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)


# training cycle forward, backward, update


def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        # 获得一个批次的数据和标签
        inputs, target = data
        optimizer.zero_grad()
        # 获得模型预测结果(64, 10)
        outputs = model(inputs)
        # 交叉熵代价函数outputs(64,10),target(64)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)  # dim = 1 列是第0个维度,行是第1个维度
            total += labels.size(0)
            correct += (predicted == labels).sum().item()  # 张量之间的比较运算
    print('accuracy on test set: %d %% [%d/%d]' % (100 * correct / total,correct,total))


if __name__ == '__main__':
    for epoch in range(10):
        train(epoch)
        test()

二.实现了对CIFAR10数据集的分类问题:

#郁涛
#开发时间:23-4-19 下午 03:11
import torch
import time
import torchvision

#准备数据集
from torch import nn
from torch.nn import Sequential, Flatten
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

train_data = torchvision.datasets.CIFAR10(root='./dataset',train=True,transform=torchvision.transforms.ToTensor(),
                                          download=True)
test_data = torchvision.datasets.CIFAR10(root='./dataset',train=False,transform=torchvision.transforms.ToTensor(),
                                          download=True)

#查看数据集大小  len--length  长度
train_data_size = len(train_data)
test_data_size = len(test_data)
# 如果train_data_set=10,训练数据集的长度为:10,这是format字符串格式化的方法
print("训练数据集的长度为:{}".format(train_data_size))
print("测试数据集的长度为:{}".format(test_data_size))

#利用  Dataloader  加载数据集
train_dataloader = DataLoader(train_data,batch_size=64)
test_dataloader = DataLoader(test_data,batch_size=64)

#搭建神经网络
class MyNet(nn.Module):
    def __init__(self):
        super(MyNet, self).__init__()
        self.model = Sequential(
            nn.Conv2d(3, 32, kernel_size=5, padding=2),#这样可以不改变图片大小
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, kernel_size=5, padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, kernel_size=5, padding=2),
            nn.MaxPool2d(2),
            nn.Flatten(),#将其展平
            nn.Linear(1024, 64),
            nn.Linear(64, 10)
        )

    def forward(self, x):
        x = self.model(x)
        return x
model = MyNet()
if torch.cuda.is_available():
    model = model.cuda()

#损失函数
loss_fn = nn.CrossEntropyLoss()
if torch.cuda.is_available():
    loss_fn = loss_fn.cuda()

#优化器
learning_rata = 0.01
#也可写成1e-1 = 1 x (10)^(-2) = 1/100 =0.01
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rata)

#设置训练网络的一些参数
total_train_step = 0
#记录测试的次数
total_test_step = 0
#训练的轮数
epoch = 30

#添加tensorboard
writer = SummaryWriter('../logs_train')

start_time = time.time()
for i in range(epoch):
    print('------------第{}轮训练开始------------'.format(i+1))

    #训练步骤开始
    model.train()  # 让网络进入训练状态
    for data in train_dataloader:
        imgs,targets = data
        if torch.cuda.is_available():
            imgs = imgs.cuda()
            targets = targets.cuda()
        outputs = model(imgs)
        loss = loss_fn(outputs,targets)
        #优化器优化模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        total_train_step += 1
        if total_train_step % 100 ==0:
            end_time = time.time()
            print(end_time-start_time)
            print('训练次数:{},loss:{}'.format(total_train_step,loss.item()))
            writer.add_scalar('train_loss',loss.item(),total_train_step)

    #测试步骤开始
    model.eval()#让网络进行测试状态
    total_test_loss = 0
    total_accuracy = 0
    with torch.no_grad():
        for data in test_dataloader:
            imgs,targets = data
            if torch.cuda.is_available():
                imgs = imgs.cuda()
                targets = targets.cuda()
            outputs = model(imgs)
            loss = loss_fn(outputs,targets)
            total_test_loss += loss.item()
            accuacy = (outputs.argmax(1) == targets).sum()
            total_accuracy +=accuacy.item()

    print("整体测试集上的loss:{}".format(total_test_loss))
    print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
    writer.add_scalar('test_loss',total_test_loss,total_test_step)
    writer.add_scalar('test_accuracy',total_accuracy,total_test_step)
    total_test_step += 1

    torch.save(model,"mynet_{}.pth".format(i))
    print('模型已保存')

writer.close()

上述代码准确率只能达到65%,但是当学习率调整为0.1时会出现损失为non的现象,查询网上才了解到这是学习率较高导致梯度爆炸的现象,修改回来后就好了。

三.实现对FashionMNIST数据集进行分类的神经网络模型(根据上方MNIST数据集分类结果采用ResidualBlock层):

# 运用residual层
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

# prepare dataset
device = torch.device('cuda'if torch.cuda.is_available() else 'cpu')
batch_size = 64
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])  # 归一化,均值和方差

train_dataset = datasets.FashionMNIST(root='../dataset/Fashionmnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.FashionMNIST(root='../dataset/Fashionmnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)


# design model using class

#定义一个residualblock层
class ResidulBlock(torch.nn.Module):
    def __init__(self,channels):
        super(ResidulBlock,self).__init__()
        self.channels = channels
        self.conv1 = torch.nn.Conv2d(channels,channels,kernel_size=3,padding=1)#让其大小并未改变
        self.conv2 = torch.nn.Conv2d(channels, channels, kernel_size=3, padding=1)

    def forward(self,x):
        y = F.relu(self.conv1(x))
        y = self.conv2(y)
        return F.relu(x+y)

class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(1,16,kernel_size=5)#第一层卷积核
        self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=5)#第二层卷积核

        self.rblock1 = ResidulBlock(16)#与conv1的16对应
        self.rblock2 = ResidulBlock(32)#与conv2的32对应

        self.mp = torch.nn.MaxPool2d(2)#最大池化层2x2

        self.fc = torch.nn.Linear(512,10)#线性函数1408-10



    def forward(self, x):
        in_size = x.size(0)

        x = F.relu(self.mp(self.conv1(x)))
        x = self.rblock1(x)
        x = F.relu(self.mp(self.conv2(x)))
        x = self.rblock2(x)

        x = x.view(in_size,-1)
        x = self.fc(x)
        return x # 最后一层不做激活,不进行非线性变换


model = Net()
model = model.to(device)

# construct loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
criterion = criterion.to(device)
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)


# training cycle forward, backward, update


def train(epoch):
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader, 0):
        # 获得一个批次的数据和标签
        inputs, target = data
        inputs = inputs.to(device)
        target = target.to(device)
        optimizer.zero_grad()
        # 获得模型预测结果(64, 10)
        outputs = model(inputs)
        # 交叉熵代价函数outputs(64,10),target(64)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0


def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images = images.to(device)
            labels = labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)  # dim = 1 列是第0个维度,行是第1个维度
            total += labels.size(0)
            correct += (predicted == labels).sum().item()  # 张量之间的比较运算
    print('accuracy on test set: %d %% [%d/%d]' % (100 * correct / total,correct,total))


if __name__ == '__main__':
    for epoch in range(30):
        train(epoch)
        test()

上述代码的准确率最高可以跑到90%。

发现在FashionMNIST数据集采用在MNIST较好用的Residual层进行分类的时候发现只能达到90%,猜测可能是正则化参数的问题,该正则化参数可能不适应于MNIST数据集。且Residual相对于普通线性层由89%提升了1%到99%,提升感觉并没有在MNIST上从98%到99%那么明显。

四.总结:

        在编写上述代码时,发现搭建神经网络中最重要的是要确定它的尺寸大小(B,C,W,H),然后通过每层网络的输出数据的尺寸大小都需要注意,且常见错误就是格式问题,区分PIL,tensor,narrays类型,此外就是注意卷积层和线性层以及池化层参数,损失函数,优化器的选取,对于网络的性能有很大的影响,不一定网络层数越多越好,主要是网络架构,可以多看看pytorch官方文档中已有的模型架构,对其进行学习改造。

总结就是需要多看官方文档,多多实践,提高自己的英语水平也非常重要,特别是专业领域的英语水平,方便以后阅读前沿的文章和官方的文档说明。还有就是继续加强理论学习,阅读深度学习相关的书籍。

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值