MNIST4_pytorch nn训练预测

针对MNIST数据集进行j基于pytorch的nn模型训练和预测
部分脚本如下: 完整脚本见笔者github

bacth_data 是简单的生成器
get_TrainTest_df 是数据获取函数

__doc__ = """
Train : 0.98
Test : 0.954
"""

import pandas as pd 
import numpy as np
from utils.utils_tools import clock, get_ministdata
import warnings
warnings.filterwarnings(action='ignore')
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
from torchvision import transforms
from tqdm import tqdm
BATCH_SIZE=128

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        # 【batch_size, in_channels, height_1, width_1】
        self.l1 = nn.Linear(784, 256)
        self.active_1 = F.relu
        # self.pool = nn.MaxPool2d(2, 2)
        self.d1 = nn.Linear(256, 32)
        self.active_2 = F.relu
        self.d2 = nn.Linear(32, 10)
        self.softmax = F.softmax
        self._compile_opt()
        
    def forward(self, x):
        x = self.active_1(self.l1(x))
        x = x.flatten(start_dim=1)
        x = self.active_2(self.d1(x))
        logits = self.d2(x)
        out = self.softmax(logits, dim=1)
        return out

    def model_test(self, lossfunc, x_te: 'np.array', y_te: 'np.array'):
        x_te, y_te = torch.tensor(x_te, dtype=float), torch.tensor(y_te, dtype=torch.long)
        out = self(x_te)
        loss_ = lossfunc(out, y_te)
        loss_out = loss_.detach().item()
        acc_out = (torch.argmax(out, 1).flatten() == y_te).type(torch.float).mean().item()
        return loss_out, acc_out

    def _compile_opt(self):
        self.optm = torch.optim.Adam(self.parameters(), lr=0.001)
        self.loss = nn.CrossEntropyLoss()
 
    def batch_backward(self, x_t, y_t):
        # forward + backward + loss
        out = self(x_t)
        loss_ = self.loss(out, y_t)
        self.optm.zero_grad()
        loss_.backward() 
        # update model parameters
        self.optm.step()
        return out, loss_
    
    def predict(self, x, batch_size, loss_flag = False, y_te = None):
        loss_total = 0.0
        idx_list = list(range(0, x.shape[0], batch_size))
        out_list = []
        loss_ = 0
        for i in tqdm(idx_list):
            x_t = torch.tensor( x[i:i+batch_size], dtype=float)
            out = self(x_t)
            if loss_flag:
                y_t = torch.tensor(y_te[i:i+batch_size], dtype=torch.long)
                loss_ = self.loss(out, y_t)
            tmp_predict = torch.argmax(out, 1).flatten()
            out_list.append(tmp_predict)
            loss_total += loss_.detach().item()
        return np.concatenate(out_list), loss_total

@clock
def mian():
    torch.set_default_tensor_type(torch.DoubleTensor)
    nn_model = MyModel()
    nn_model.to('cpu')
    print('Loading Data ...')
    x_tr, y_tr, x_te, y_te = get_TrainTest_df()
    print(x_tr.shape)
    print(np.unique(y_tr))

    for epoch in range(80):
        train_runing_loss = 0.0
        total_count = 0.0 
        true_count = 0.0
        bd = bacth_data(x_tr, y_tr)
        loop = True
        while loop:
            try:
                x_t, y_t = next(bd)
                x_t.to('cpu')
                y_t.to('cpu')
                out, loss_ = nn_model.batch_backward(x_t, y_t)
                train_runing_loss += loss_.detach().item()
                pred_out = torch.argmax(out, 1).flatten()
                true_count += (pred_out == y_t).type(torch.float).sum().item()
                total_count += x_t.shape[0]
            except Exception as e:
                print(e)
                loop = False 
        # print(pred_out)
        train_acc = true_count / total_count
        print(f'Epoch: [ {epoch} ] | Train Loss {train_runing_loss:.4f} | Train Acc: {train_acc:.2f}')

        if epoch % 10 == 0:
            test_pred, loss_total = nn_model.predict(x_te, BATCH_SIZE, loss_flag=True, y_te=y_te)
            acc = (y_te == test_pred).mean()
            print(f'>>> | Test Loss {loss_total:.4f} | Train Acc: {acc:.3f}')

    test_pred, loss_total = nn_model.predict(x_te, BATCH_SIZE, loss_flag=True, y_te=y_te)
    acc = (y_te == test_pred).mean()
    print(f'>>> | Test Loss {loss_total:.4f} | Train Acc: {acc:.3f}')
    return nn_model


if __name__ == '__main__':
    mian()

结果


Epoch: [ 77 ] | Train Loss 162.8703 | Train Acc: 0.98

Epoch: [ 78 ] | Train Loss 162.8459 | Train Acc: 0.98

Epoch: [ 79 ] | Train Loss 162.8101 | Train Acc: 0.98
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 438/438 [00:01<00:00, 415.49it/s] 
>>> | Test Loss 660.0944 | Train Acc: 0.954


Train: 0.98
Test : 0.954
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Scc_hy

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值