刘二老师第九讲:

第九讲地址


import matplotlib.pyplot as plt
import torch
from torchvision import transforms # 对数据进行预处理
from torchvision import datasets
from torch.utils.data import DataLoader
import torch.nn.functional as F  # For using function Relu()
import torch.optim as optim
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="True"


# 1.Prepare Dataset
batch_size = 64
transform = transforms.Compose([
    transforms.ToTensor(),  # Convert the PIL Image to Tensor
    transforms.Normalize((0.1307), (0.3081)) # 分别是mean, std 让数据变成标准分布
])

train_dataset = datasets.MNIST(root ="D:\Python-learn\深度学习",  # 这里写之前下载过的地址,没下载的话令download=True
                               train=True,
                               download=False,
                               transform=transform)
train_loader = DataLoader(train_dataset,
                          shuffle=True,
                          batch_size=batch_size)
test_dataset = datasets.MNIST(root="D:\Python-learn\深度学习",
                              train=False,
                              download=True,
                              transform=transform)
test_loader = DataLoader(test_dataset,
                         shuffle=False,
                         batch_size=batch_size)

# 2.Design Model
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.l1 = torch.nn.Linear(784, 512)
        self.l2 = torch.nn.Linear(512, 256)
        self.l3 = torch.nn.Linear(256, 128)
        self.l4 = torch.nn.Linear(128, 64)
        self.l5 = torch.nn.Linear(64, 10)  # 输出N * 10的矩阵,表示0~9的线性化的值,再通过softmax输出概率值

    def forward(self, x):
        x = x.view(-1, 784)  # -1表示未知的第一维大小,PyTorch 会自动计算出这个值,以保证张量元素的总数不变。
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.relu(self.l4(x))
        return self.l5(x)  # 最后一层不做激活处理 用于进行Softmax

model = Net()


# 3.Construct Loss and Optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)

# 4. Train and Test
def train(epoch):     # 封装函数 train
    running_loss = 0.0
    for batch_idx, data in enumerate(train_loader,0):
        inputs, target = data
        optimizer.zero_grad() # 优化之前清零

        # forward + backward + update
        # 获得模型预测结果(64, 10)
        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() # item()取值
        if batch_idx % 300 == 299: # 每300次才输出一次损失
            print('[%d, %5d] loss: %.3f' % (epoch +1, batch_idx + 1, running_loss / 300))
            running_loss = 0.0

def test():
    correct = 0
    total = 0
    with torch.no_grad():  # 测试阶段,不需要反向传播。
        for data in test_loader:
            images, labels = data
            outputs = model(images)
            _, predict = torch.max(outputs.data, dim=1) # 表示沿着张量的第 1 维度(行)进行操作。并只返回最大值的索引
            total += labels.size(0) # labels.size 输出(N , 1)
            correct += (predict == labels).sum().item()
    print('Accuracy on test set:%d %%' % (100 * correct / total))

if __name__ =='__main__':
    for epoch in range(10): # range() 函数是从 0 开始计数的 。
        train(epoch)
        test()

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值