《PyTorch深度学习实践》课上代码笔记 三

自学笔记

课程老师:刘二大人 河北工业大学教师 https://liuii.github.io
课程来源:https://www.bilibili.com/video/BV1Y7411d7Ys

推荐一个服务器租赁的平台

相比于阿里腾讯等平台,亲测性价比要高,显卡有1080Ti、2080Ti、3080Ti等,运行速度自然比自己的笔记本快,也能保护自己心爱的笔记本,实例中有Jupyter Notebook、Visual Studio Code编辑器,编写调试程序方便,适合新手上手。
链接:https://featurize.cn?s=85167577b36f44299a332f2c8dff344f

网页主界面网页主界面
进入实例后的界面
进入实例后的界面

八、Softmax_Classifier_Linear

#导入相应的包
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

#小训练集大小
batch_size = 64

#数据集的处理

#图片变换:转换成Tensor,标准化
transform = transforms.Compose([ transforms.ToTensor(),
                                transforms.Normalize((0.1307, ),(0.3081, ))])
#创建训练数据集
train_dataset = datasets.MNIST(root='../dataset/mnist/',
                               train=True, download=True,
                               transform=transform)
#导入训练数据集
train_loader = DataLoader(train_dataset,
                          shuffle=True,
                          batch_size=batch_size)
#创建测试数据集
test_dataset = datasets.MNIST(root='../dataset/mnist/',
                              train=False,
                              download=True,
                              transform=transform)
#导入测试数据集
test_loader = DataLoader(test_dataset,
                          shuffle=False,
                          batch_size=batch_size)

#设计模型(全连接的线性模型)
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        #图片像素是28*28,所以输入为784=28*28
        self.l1 = torch.nn.Linear(784, 512)
        self.l2 = torch.nn.Linear(512, 256)
        self.l3 = torch.nn.Linear(256, 128)
        self.l4 = torch.nn.Linear(128, 64)
        self.l5 = torch.nn.Linear(64, 10)
    def forward(self, x):
        #变形成784列,行数自动计算
        x = x.view(-1, 784)
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return self.l5(x)
#实例化
model = Net()

#损失函数及反馈
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

#封装训练函数
def train(epoch):
    #初始化训练损失
    running_loss = 0.0
    #训练
    for batch_idx,data in enumerate(train_loader, 0):
        #导入数据
        inputs, target = data
        #梯度归零
        optimizer.zero_grad()
        # 前馈 + 反馈 + 更新
        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        #计算总损失
        running_loss += loss.item()
        #每300次训练,输出一次
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0

#封装测试函数
def test():
    #初始化准确率
    correct = 0
    total = 0
    #不计算梯度,节省内存
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)
            #计算所有的测试个数
            total += labels.size(0)
            #计算正确的个数
            correct += (predicted == labels).sum().item()
    #输出平均准确率
    print('Accuracy on test set: %d %%' % (100 * correct / total))

if __name__ == '__main__':
    #训练10次
    for epoch in range(10):
        #训练
        train(epoch)
        #测试
        test()

九、Softmax_Classifier_CNN

#导入相应的包
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

#小训练集大小
batch_size = 64

#数据集的处理
#图片变换:转换成Tensor,标准化
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize((0.1307,),(0.3081,))])
#创建训练数据集
train_dataset = datasets.MNIST(root='../dataset/mnist/',
                               train=True, download=True,
                               transform=transform)
#导入训练数据集
train_loader = DataLoader(train_dataset,
                          shuffle=True,
                          batch_size=batch_size)
#创建测试数据集
test_dataset = datasets.MNIST(root='../dataset/mnist/',
                              train=False,
                              download=True,
                              transform=transform)
#导入测试数据集
test_loader = DataLoader(test_dataset,
                          shuffle=False,
                          batch_size=batch_size)

#设计模型(CNN)
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        #卷积层1,输入1,输出10,卷积核5*5
        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
        #卷积层2,输入10,输出20,卷积核5*5
        self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=5)
        #最大池化层
        self.pooling = torch.nn.MaxPool2d(2)
        #线性层
        self.fc = torch.nn.Linear(320, 10)

    #封装前馈函数
    def forward(self, x):
        #训练子集大小
        batch_size = x.size(0)
        x = F.relu(self.pooling(self.conv1(x)))
        x = F.relu(self.pooling(self.conv2(x)))
        #变形成 batch_size 行
        x = x.view(batch_size, -1)
        x = self.fc(x)
        return x
#实例化
model = Net()

#损失函数及反馈
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

#封装训练函数
def train(epoch):
    #初始化训练损失
    running_loss = 0.0
    #训练
    for batch_idx,data in enumerate(train_loader, 0):
        #导入数据
        inputs, target = data
        #梯度归零
        optimizer.zero_grad()
        # 前馈 + 反馈 + 更新
        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        #计算总损失
        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0

#封装测试函数
def test():
    correct = 0
    total = 0
    #不计算梯度,节省内存
    with torch.no_grad():
        for data in test_loader:
            #导入测试数据
            images, labels = data
            #带入模型,得到输出
            outputs = model(images)
            #找到输出中概率最大的下标及值
            _, predicted = torch.max(outputs.data, dim=1)
            #计算所有的测试个数
            total += labels.size(0)
            #计算正确的个数
            correct += (predicted == labels).sum().item()
    #输出准确率
    print('Accuracy on test set: %d %%' % (100 * correct / total))

if __name__ == '__main__':
    #训练10次
    for epoch in range(10):
        #训练
        train(epoch)
        #测试
        test()

十、Softmax_Classifier_CNN_plus

课上老师留的作业,用了三个卷积层、三个pooling、三个relu,仅改动模型那块即可。

#导入相应的包
import torch
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim

#小训练集大小
batch_size = 64

#数据集的处理
#图片变换:转换成Tensor,标准化
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize((0.1307,),(0.3081,))])
#创建训练数据集
train_dataset = datasets.MNIST(root='../dataset/mnist/',
                               train=True, download=True,
                               transform=transform)
#导入训练数据集
train_loader = DataLoader(train_dataset,
                          shuffle=True,
                          batch_size=batch_size)
#创建测试数据集
test_dataset = datasets.MNIST(root='../dataset/mnist/',
                              train=False,
                              download=True,
                              transform=transform)
#导入测试数据集
test_loader = DataLoader(test_dataset,
                          shuffle=False,
                          batch_size=batch_size)

#设计模型(CNN)
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        #卷积层1,输入1,输出10,卷积核5*5
        self.conv1 = torch.nn.Conv2d(1, 10, kernel_size=5)
        #卷积层2,输入10,输出20,卷积核3*3
        self.conv2 = torch.nn.Conv2d(10, 20, kernel_size=3)
        #卷积层3,输入20,输出30,卷积核3*3,padding为1
        self.conv3 = torch.nn.Conv2d(20, 30, kernel_size=3,padding=1)
        #最大池化层
        self.pooling = torch.nn.MaxPool2d(2)
        #线性层
        self.fc = torch.nn.Linear(120, 10)

    def forward(self, x): 
        # Flatten data from (n, 1, 28, 28) to (n, 784)
        batch_size = x.size(0)
        x = F.relu(self.pooling(self.conv1(x)))
        x = F.relu(self.pooling(self.conv2(x)))
        x = F.relu(self.pooling(self.conv3(x)))
        x = x.view(batch_size, -1) # flatten
        x = self.fc(x)
        return x
#实例化
model = Net()

#损失函数及反馈
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

#训练
def train(epoch):
    running_loss = 0.0
    for batch_idx,data in enumerate(train_loader, 0):
        inputs, target = data
        optimizer.zero_grad()
        # forward + backward + update
        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_idx % 300 == 299:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0

def test():
    correct = 0
    total = 0
    #不计算梯度,节省内存
    with torch.no_grad():
        for data in test_loader:
            #导入测试数据
            images, labels = data
            #带入模型,得到输出
            outputs = model(images)
            #找到输出中概率最大的下标及值
            _, predicted = torch.max(outputs.data, dim=1)
            #计算所有的测试个数
            total += labels.size(0)
            #计算正确的个数
            correct += (predicted == labels).sum().item()
    #输出准确率
    print('Accuracy on test set: %d %%' % (100 * correct / total))

if __name__ == '__main__':
    #训练10次
    for epoch in range(10):
        #训练
        train(epoch)
        #测试
        test()
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值