S02: 手写深度学习框架-MNIST例子

笔者手写了简单的深度学习框架,这个小项目源于笔者学习pytorch的过程中对autograd的探索。项目名称为kitorch。该项目基于numpy实现,代码的执行效率比cpu的pytorch要慢。虽然如此,我想对于初学者来说,有兴趣的同学还是可以看一下的。本项目代码见github


由于学习自pytorch,笔者尽量使kitorch的 API接口风格与pytorch保持一致。本篇文章展示一个MNIST的例子:

import numpy as np
import kitorch as kt
from kitorch import nn,optim,functional as F
#使用torch加载数据
import torch
from torchvision import datasets, transforms

# #### 加载数据 #####
BATCH_SIZE=256
train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('data', train=True, download=True, 
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('data', train=False, transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=BATCH_SIZE, shuffle=True)

# ###########定义模型 #########################
class ConvNet(nn.Module):
    def __init__(self): 
        super(ConvNet,self).__init__()
        self.conv1 = nn.Conv2d(1,10,5) # 10, 24x24
        self.bn1 = nn.BatchNorm2d(10)
        self.conv2 = nn.Conv2d(10,20,3) # 128, 10x10
        self.bn2 = nn.BatchNorm2d(20)
        self.fc1 = nn.Linear(20*10*10,200)
        self.bn3 = nn.BatchNorm1d(200)
        self.fc2 = nn.Linear(200,10)
        
    def forward(self,x:kt.Tensor):
        # x: 512-1-28-28
        batch_size = x.shape[0]
        out = self.conv1(x)     # 512-10-24-24
        out = self.bn1(out)
        out = F.relu(out)
        out = F.maxpool2d(out,(2,2)) # 512-10-12-12
        out = self.conv2(out) # 512-20-10-10
        out = self.bn2(out)
        out = F.relu(out)
        out = out.reshape((batch_size,-1))
        out = self.fc1(out)
        out = self.bn3(out)
        out = F.relu(out)
        out = self.fc2(out)
        return F.log_softmax(out,dim=1)

# ###########训练和预测的过程############################
def train(model, train_loader, optimizer, epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data,target = kt.from_numpy(data.numpy().astype(np.float64)),target.numpy()
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        loss.backward()
        optimizer.step()
        if(batch_idx+1)%100 == 0: 
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))


def test(model,test_loader):
    test_loss = 0
    correct = 0
    model.eval()
    for data, target in test_loader:
        data,target = kt.from_numpy(data.numpy().astype(np.float64)),target.numpy()
        output = model(data)
        test_loss += F.nll_loss(output, target).item() # 将一批的损失相加
        result = output.data.argmax(axis=1)
        correct += (result == target).sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.6f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))

EPOCHS = 20
model = ConvNet()
#测试1:
# lr = [(3,0.05),(5,0.02),(7,0.001),(10,0.005),(15,0.001),(20,0.0005)]
# optimizer = optim.SGD(model.parameters(),lr,momentum=0.9)

# 测试2
# lr = [(3,0.05),(5,0.02),(7,0.001),(10,0.005),(15,0.001),(20,0.0005)]
# optimizer = optim.Adagrad(model.parameters(),lr=lr) 

# 256, 20 epoch 可达到99.25%
optimizer = optim.Adadelta(model.parameters())

# 测试失败
# optimizer = optim.RMSprop(model.parameters(),beta=0.5)

# 256, 20次可达到99.11%
# optimizer = optim.Adam(model.parameters())

for epoch in range(1, EPOCHS+1):
    train(model, train_loader, optimizer, epoch)
    test(model,test_loader)

执行结果

##最高结果
Test set: Average loss: 0.000085, Accuracy: 9938/10000 (99.38%)
Train Epoch: 19 [25344/60000 (42%)]	Loss: 0.000414
Train Epoch: 19 [50944/60000 (85%)]	Loss: 0.000137
## 耗时 Wall time: 26min 19s 
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值