paddlepaddle-mnist例子解析

参考GitHub:

https://github.com/yangninghua/deeplearning_backbone

paddlepaddle-1.4.0

python3.6

自己新建一个plot文件夹用于保存loss曲线图像 

新建一个image文件夹,放入一张测试图像,名字:mnist_train_20.png

以上要求自己可以随意修改

# -*-coding:utf-8-*-
from __future__ import print_function

import os
import argparse
from PIL import Image
import numpy
import paddle
import paddle.fluid as fluid


import sys
class Logger(object):
    def __init__(self, filename="Default.log"):
        self.terminal = sys.stdout
        self.log = open(filename, "a")

    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)

    def flush(self):
        pass


def parse_args():
    parser = argparse.ArgumentParser("mnist")
    parser.add_argument(
        '--enable_ce',
        action='store_true',
        help="If set, run the task with continuous evaluation logs.")
    parser.add_argument(
        '--use_gpu',
        type=bool,
        default=True,
        help="Whether to use GPU or not.")
    parser.add_argument(
        '--num_epochs', type=int, default=1, help="number of epochs.")
    args = parser.parse_args()
    return args


def loss_net(hidden, label):
    prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
    loss = fluid.layers.cross_entropy(input=prediction, label=label)
    avg_loss = fluid.layers.mean(loss)
    acc = fluid.layers.accuracy(input=prediction, label=label)
    return prediction, avg_loss, acc


def multilayer_perceptron(img, label):
    img = fluid.layers.fc(input=img, size=200, act='tanh')
    hidden = fluid.layers.fc(input=img, size=200, act='tanh')
    return loss_net(hidden, label)


def softmax_regression(img, label):
    return loss_net(img, label)


def convolutional_neural_network(img, label):
    conv_pool_1 = fluid.nets.simple_img_conv_pool(
        input=img,
        filter_size=5,
        num_filters=20,
        pool_size=2,
        pool_stride=2,
        act="relu")
    conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
    conv_pool_2 = fluid.nets.simple_img_conv_pool(
        input=conv_pool_1,
        filter_size=5,
        num_filters=50,
        pool_size=2,
        pool_stride=2,
        act="relu")
    return loss_net(conv_pool_2, label)



#-------训练期间通过调用一个handler函数来监控训练进度---------
from paddle.utils.plot import Ploter
train_prompt = "Train cost"
test_prompt = "Test cost"
cost_ploter = Ploter(train_prompt, test_prompt)
# 将训练过程绘图表示
def event_handler_plot(ploter_title, step, cost):
    cost_ploter.append(ploter_title, step, cost)
    cost_ploter.plot("./plot/paddle_plot")
# 打印训练的中间结果,训练轮次,batch数,损失函数
def event_handler(pass_id, batch_id, cost):
    print("Pass %d, Batch %d, Cost %f" % (pass_id,batch_id, cost))


def train(nn_type,
          use_cuda,
          save_dirname=None,
          model_filename=None,
          params_filename=None):

    #不理解啥意思,因为函数返回为false所以一定会继续执行,可能判断是否paddle安装或者编译成功
    if use_cuda and not fluid.core.is_compiled_with_cuda():
        return

    #定义了创建模型参数,输入输出,以及模型中可学习参数的初始化等各种操作
    startup_program = fluid.default_startup_program()
    #定义了神经网络模型,前向反向计算,以及优化算法对网络中可学习参数的更新
    main_program = fluid.default_main_program()

    #是否固定random_seed,即是否保证模型可复现,通过打印出的acc,avg等验证
    if args.enable_ce:
        train_reader = paddle.batch(
            paddle.dataset.mnist.train(), batch_size=BATCH_SIZE)
        test_reader = paddle.batch(
            paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
        startup_program.random_seed = 90
        main_program.random_seed = 90
    #数据提取操作,提取-打乱-打包batch
    else:
        train_reader = paddle.batch(
            paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
            batch_size=BATCH_SIZE)
        test_reader = paddle.batch(
            paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)

    #定义网络输入,image and label
    img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')

    #选择网络主干
    if nn_type == 'softmax_regression':
        net_conf = softmax_regression
    elif nn_type == 'multilayer_perceptron':
        net_conf = multilayer_perceptron
    else:
        net_conf = convolutional_neural_network

    #网络组合,输入(image+label)+主干+loss(loss类型,以及输出的loss格式-平均avg和准确率)
    prediction, avg_loss, acc = net_conf(img, label)

    #程序克隆-#定义了神经网络模型,前向反向计算,以及优化算法对网络中可学习参数的更新
    test_program = main_program.clone(for_test=True)
    #选择优化器
    optimizer = fluid.optimizer.Adam(learning_rate=0.001)
    #选择优化目标-avg_loss
    optimizer.minimize(avg_loss)

    #提前定义好测试函数 在训练时候使用
    def train_test(train_test_program, train_test_feed, train_test_reader):

        #test_program通过main_program进行克隆,防止混淆,因为主程序中有梯度等参数
        acc_set = []
        avg_loss_set = []
        for test_data in train_test_reader():
            acc_np, avg_loss_np = exe.run(
                program=train_test_program,
                feed=train_test_feed.feed(test_data),
                fetch_list=[acc, avg_loss])
            acc_set.append(float(acc_np))
            avg_loss_set.append(float(avg_loss_np))
        # get test acc and loss
        acc_val_mean = numpy.array(acc_set).mean()
        avg_loss_val_mean = numpy.array(avg_loss_set).mean()
        #返回全部测试集的平均loss和平均准确率
        return avg_loss_val_mean, acc_val_mean

    #place属性由用户定义,代表程序将在哪里执行
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    #创建一个Executor执行器
    exe = fluid.Executor(place)

    #定义DataFeeder数据读取器,网络读取方式,指定网络的输入(网络通过graph搭建,需要指定输入)
    feeder = fluid.DataFeeder(feed_list=[img, label], place=place)

    #这里需要注意,在初始化前,把loss,优化器,执行器等都要定义好
    #正式进行网络训练前,需先执行参数初始化。其中 defalut_startup_program 中定义了创建模型参数,输入输出,以及模型中可学习参数的初始化等各种操作。
    exe.run(startup_program)
    #训练epochs次数
    epochs = [epoch_id for epoch_id in range(PASS_NUM)]

    #list用于保存 epoch_id, avg_loss_val, acc_val
    lists = []
    step = 0
    #epochs循环操作
    for epoch_id in epochs:
        # train_reader数据输入
        for step_id, data in enumerate(train_reader()):
            #由于传入数据与传出数据存在多列,因此 fluid 通过 feed 映射定义数据的传输数据,
            #通过 fetch_list 取出期望结果:
            metrics = exe.run(
                main_program,
                feed=feeder.feed(data),
                fetch_list=[avg_loss, acc])
            #若batch提取次数为100,画出loss曲线,打印出avg_loss等参数
            if step % 100 == 0:
                event_handler_plot(train_prompt, step, metrics[0])
                print("Pass %d, Epoch %d, Cost %f" % (step, epoch_id, metrics[0]))
            step += 1

        #每迭代一次,进行一次测试工作
        # test for epoch
        avg_loss_val, acc_val = train_test(
            train_test_program=test_program,
            train_test_reader=test_reader,
            train_test_feed=feeder)
        #打印epoch测试结果
        print("Test with Epoch %d, avg_cost: %s, acc: %s" % (epoch_id, avg_loss_val, acc_val))
        #输出loss图-avg_loss
        event_handler_plot(test_prompt, step, avg_loss_val)

        #保存每次测试结果,方便后续找到最优测试结果
        lists.append((epoch_id, avg_loss_val, acc_val))
        #如果给定路径,每次epoch都保存模型且覆盖之前的模型
        #保存模型时候应该指定输入input--["img"]
        #["img"]--预测(inference)需要 feed 的数据
        #[prediction]--保存预测(inference)结果的 Variables
        #exe--executor 保存 inference model
        if save_dirname is not None:
            fluid.io.save_inference_model(
                save_dirname, ["img"], [prediction],
                exe,
                model_filename=model_filename,
                params_filename=params_filename)

    #epochs迭代完毕
    #模型可复现,打印出训练的avg_loss,测试的avg_loss_val和acc_val
    if args.enable_ce:
        print("kpis\ttrain_cost\t%f" % metrics[0])
        print("kpis\ttest_cost\t%s" % avg_loss_val)
        print("kpis\ttest_acc\t%s" % acc_val)

    #找到最优的测试结果,打印出最优结果 epoch_id  avg_loss_val
    # find the best pass
    best = sorted(lists, key=lambda list: float(list[1]))[0]
    print('Best pass is %s, testing Avgcost is %s' % (best[0], best[1]))
    #打印分类的最优准确率
    print('The classification accuracy is %.2f%%' % (float(best[2]) * 100))


def infer(use_cuda,
          save_dirname=None,
          model_filename=None,
          params_filename=None):

    #网络是否保存了模型
    if save_dirname is None:
        return

    #place属性由用户定义,代表程序将在哪里执行 cpu or gpu
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    #定义执行器
    exe = fluid.Executor(place)

    #定义图像载入函数 通过PIL打开,resize到网络输入大小,进行归一化操作
    #定义N×C×H×W and float32
    #这些操作在mnist中已经定义,所以预测要一样
    def load_image(file):
        im = Image.open(file).convert('L')
        im = im.resize((28, 28), Image.ANTIALIAS)
        im = numpy.array(im).reshape(1, 1, 28, 28).astype(numpy.float32)
        im = im / 255.0 * 2.0 - 1.0
        return im

    #获取当前程序运行路径
    cur_dir = os.path.dirname(os.path.realpath(__file__))
    #载入测试图像
    tensor_img = load_image(cur_dir + '/image/mnist_train_20.png')

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).

        #载入模型
        #返回:(Program,feed_target_names, fetch_targets)
        #Program 是一个 Program ,它是预测 Program。
        #feed_target_names 是一个str列表,它包含需要在预测 Program 中提供数据的变量的名称。
        #fetch_targets 是一个 Variable 列表,从中我们可以得到推断结果。
        [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(
             save_dirname, exe, model_filename, params_filename)

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.

        #下列操作对应训练阶段的
        '''
        metrics = exe.run(
        main_program,
        feed=feeder.feed(data),
        fetch_list=[avg_loss, acc])
        '''
        results = exe.run(
            inference_program,
            feed={feed_target_names[0]: tensor_img},
            fetch_list=fetch_targets)
        #results=list[array([list[1,2,3,4,5,6,7,8,9,10]])]
        #按升序排列
        #所以lab[0][0][-1]
        lab = numpy.argsort(results)
        print("Inference result of image/mnist_train_20.png is: %d" % lab[0][0][-1])


def main(use_cuda, nn_type):
    # 保存参数,若为None,则默认为: __model__
    model_filename = "paddle_model"
    #保存参数,若为None,则保存为分离状态的参数 batch_norm_0.b_0 conv2d_0.b_0 等等
    params_filename = "paddle_params"
    #参数和模型的保存路径
    save_dirname = "recognize_digits_" + nn_type + ".inference.model"

    #保存所有print输出到log文件
    sys.stdout = Logger('paddle_log.txt')

    # call train() with is_local argument to run distributed train
    #训练程序和测试程序
    train(
        nn_type=nn_type,
        use_cuda=use_cuda,
        save_dirname=save_dirname,
        model_filename=model_filename,
        params_filename=params_filename)
    #使用保存的模型前向传播测试
    infer(
        use_cuda=use_cuda,
        save_dirname=save_dirname,
        model_filename=model_filename,
        params_filename=params_filename)


if __name__ == '__main__':
    #定义输入args参数
    args = parse_args()
    BATCH_SIZE = 64
    PASS_NUM = args.num_epochs
    use_cuda = args.use_gpu
    # predict = 'softmax_regression' # uncomment for Softmax
    # predict = 'multilayer_perceptron' # uncomment for MLP
    #选择使用的主干网络
    predict = 'convolutional_neural_network'  # uncomment for LeNet5
    main(use_cuda=use_cuda, nn_type=predict)

 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值