深度学习模型试跑(十二):STDC(PaddleSeg, python & c++)

2 篇文章 0 订阅
1 篇文章 0 订阅

前言

因为最近在研究PaddleX和PaddleSeg在win10下的部署,主要是奔向C++去的,所以这篇其实大部分记录工程搭建的工作;这篇以今年刚出的STDC为例。
我的环境:

  • Visual Studio 2019
  • CUDA 11.5,cudnn 8.2
  • CMake 3.17.1

一.模型解读

可以参考STDC语义分割网络,模型关键的地方都有做介绍。需要具体研究的建议去看一下github项目

二.模型训练

可参考官方教程,如果要训练自定义的数据(voc语义
分割格式),需要修改配置文件。

  1. 配置文件的参数修改:

    configs\_base_\pascal_voc12aug.yml(Line:4) (mode: train)
    configs\_base_\pascal_voc12.yml(Line:2、Line:27) Line:2 (iters: 80000) Line:27 (target_size: [你的图像尺寸宽度, 你的图像尺寸长度])
    paddleseg\datasets\voc.py(Line:39) (NUM_CLASSES = 数据集总类别数量)
  2. 训练:

    #开始训练(若没准备数据集它会自己下载):
    python train.py --config configs/stdcseg/stdc2_seg_voc12aug_512x512_40k.yml --save_dir output_voc2012
    #恢复训练:
    python train.py --config configs/stdcseg/stdc2_seg_voc12aug_512x512_40k.yml --save_dir output_voc2012 --resume_model 	output_voc2012/iter_54000 --do_eval --use_vdl
    
  3. 预测:

    python predict.py --config configs/stdcseg/stdc2_seg_voc12aug_512x512_40k.yml --model_path output_voc2012/best_model/model.pdparams --image_path data/test_imgs/ADE_test_00003102.jpg --save_dir output_voc2012/result --custom_color 0 0 0 255 255 255
    
  4. 导出预测模型

    python export.py --config configs/stdcseg/stdc2_seg_voc12aug_512x512_40k.yml --model_path output_voc2012/best_model/model.pdparams --save_dir output_voc2012_shared
    

在这里插入图片描述

三.VS2019运行C++预测

主要参照:
官方参考教程:
博客教程:

Step0: 搭建环境

进入到paddleseg/deploy/cpp目录,新建一个build文件夹;将cmakelists.txt替换成我修改后的cmakelists.txt,注意cuda(第77行)和yaml(第193~197行)设置为自己对应的路径。

cmake_minimum_required(VERSION 3.0)
project(cpp_inference_demo CXX C)

option(WITH_MKL        "Compile demo with MKL/OpenBlas support, default use MKL."       ON)
option(WITH_GPU        "Compile demo with GPU/CPU, default use CPU."                    OFF)
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static."   ON)
option(USE_TENSORRT "Compile demo with TensorRT."   OFF)
option(WITH_ROCM "Compile demo with rocm." OFF)

if(NOT WITH_STATIC_LIB)
  add_definitions("-DPADDLE_WITH_SHARED_LIB")
else()
  # PD_INFER_DECL is mainly used to set the dllimport/dllexport attribute in dynamic library mode.
  # Set it to empty in static library mode to avoid compilation issues.
  add_definitions("/DPD_INFER_DECL=")
endif()

macro(safe_set_static_flag)
    foreach(flag_var
        CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
        CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
      if(${flag_var} MATCHES "/MD")
        string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
      endif(${flag_var} MATCHES "/MD")
    endforeach(flag_var)
endmacro()

if(NOT DEFINED PADDLE_LIB)
  message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
endif()
if(NOT DEFINED DEMO_NAME)
  message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name")
endif()

include_directories("${PADDLE_LIB}/")
set(PADDLE_LIB_THIRD_PARTY_PATH "${PADDLE_LIB}/third_party/install/")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/include")
include_directories("${PADDLE_LIB_THIRD_PARTY_PATH}cryptopp/include")

link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}protobuf/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}glog/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}gflags/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}xxhash/lib")
link_directories("${PADDLE_LIB_THIRD_PARTY_PATH}cryptopp/lib")
link_directories("${PADDLE_LIB}/paddle/lib")

if (WIN32)
  add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
  option(MSVC_STATIC_CRT "use static C Runtime library by default" ON)
  if (MSVC_STATIC_CRT)
    if (WITH_MKL)
      set(FLAG_OPENMP "/openmp")
    endif()
    set(CMAKE_C_FLAGS_DEBUG   "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
    set(CMAKE_C_FLAGS_RELEASE  "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
    set(CMAKE_CXX_FLAGS_DEBUG  "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd ${FLAG_OPENMP}")
    set(CMAKE_CXX_FLAGS_RELEASE   "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT ${FLAG_OPENMP}")
    safe_set_static_flag()
    if (WITH_STATIC_LIB)
      add_definitions(-DSTATIC_LIB)
    endif()
  endif()
else()
  if(WITH_MKL)
    set(FLAG_OPENMP "-fopenmp")
  endif()
  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ${FLAG_OPENMP}")
endif()

if(WITH_GPU)
  set(CUDA_LIB "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.5\\lib\\x64")
  #if(NOT WIN32)
    #set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library")
  #else()
    #if(CUDA_LIB STREQUAL "")
      #set(CUDA_LIB "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.5\\lib\\x64")
    #endif()
  #endif(NOT WIN32)
endif()

if (USE_TENSORRT AND WITH_GPU)
  set(TENSORRT_ROOT "D:\\lbq\\TensorRT-7.2.3.4")
  if("${TENSORRT_ROOT}" STREQUAL "")
      message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ")
  endif()
  set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include)
  set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib)
  file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
  string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
    "${TENSORRT_VERSION_FILE_CONTENTS}")
  if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
    file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS)
    string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
      "${TENSORRT_VERSION_FILE_CONTENTS}")
  endif()
  if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
    message(SEND_ERROR "Failed to detect TensorRT version.")
  endif()
  string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
    TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
  message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
    "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
  include_directories("${TENSORRT_INCLUDE_DIR}")
  link_directories("${TENSORRT_LIB_DIR}")
endif()

if(WITH_MKL)
  set(MATH_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mklml")
  include_directories("${MATH_LIB_PATH}/include")
  if(WIN32)
    set(MATH_LIB ${MATH_LIB_PATH}/lib/mklml${CMAKE_STATIC_LIBRARY_SUFFIX}
                 ${MATH_LIB_PATH}/lib/libiomp5md${CMAKE_STATIC_LIBRARY_SUFFIX})
  else()
    set(MATH_LIB ${MATH_LIB_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
                 ${MATH_LIB_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
  endif()
  set(MKLDNN_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}mkldnn")
  if(EXISTS ${MKLDNN_PATH})
    include_directories("${MKLDNN_PATH}/include")
    if(WIN32)
      set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
    else(WIN32)
      set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
    endif(WIN32)
  endif()
else()
  set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas")
  include_directories("${OPENBLAS_LIB_PATH}/include/openblas")
  if(WIN32)
    set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX})
  else()
    set(MATH_LIB ${OPENBLAS_LIB_PATH}/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
  endif()
endif()

if(WITH_STATIC_LIB)
  set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
else()
  if(WIN32)
    set(DEPS ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
  else()
    set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
  endif()
endif()

if (NOT WIN32)
  set(EXTERNAL_LIB "-lrt -ldl -lpthread")
  set(DEPS ${DEPS}
      ${MATH_LIB} ${MKLDNN_LIB}
      glog gflags protobuf xxhash cryptopp
      ${EXTERNAL_LIB})
else()
  set(DEPS ${DEPS}
      ${MATH_LIB} ${MKLDNN_LIB}
      glog gflags_static libprotobuf xxhash cryptopp-static ${EXTERNAL_LIB})
  set(DEPS ${DEPS} shlwapi.lib)
endif(NOT WIN32)

if(WITH_GPU)
  if(NOT WIN32)
    if (USE_TENSORRT)
      set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
      set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
    endif()
    set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
  else()
    if(USE_TENSORRT)
      set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
      set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
      if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
        set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_STATIC_LIBRARY_SUFFIX})
      endif()
    endif()
    set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
    set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
    set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX} )
  endif()
endif()

if(WITH_ROCM)
  if(NOT WIN32)
    set(DEPS ${DEPS} ${ROCM_LIB}/libamdhip64${CMAKE_SHARED_LIBRARY_SUFFIX})
  endif()
endif()

include_directories(/usr/local/include)
link_directories(/usr/local/lib)

#find_package(yaml-cpp REQUIRED)
set(YAML_CPP "D:\\lbq\\yaml-cpp-master")  #7
set(YAML_CPP_INCLUDE_DIRS ${YAML_CPP}\\include) #8
set(YAML_CPP_LIBRARIES ${YAML_CPP}\\lib_mine) #9
include_directories(${YAML_CPP_INCLUDE_DIRS})
link_directories(${YAML_CPP_LIBRARIES})
#set(DEPS ${DEPS} "-lyaml-cpp")

find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
set(DEPS ${DEPS} ${OpenCV_LIBS})

add_executable(${DEMO_NAME} src/${DEMO_NAME}.cc)
target_link_libraries(${DEMO_NAME} ${DEPS})

if(WIN32)
  if(USE_TENSORRT)
    add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
            COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}
              ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
            COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}
              ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
    )
    if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
      add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
              COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX}
                ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE})
    endif()
  endif()
  if(WITH_MKL)
    add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
          COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release
          COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release
          COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll  ${CMAKE_BINARY_DIR}/Release
    )
  else()
    add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
          COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release
    )
  endif()
  if(NOT WITH_STATIC_LIB)
      add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
        COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
      )
  endif()
endif()

message(STATUS "    libraries: ${CUDA_LIB}")
message(STATUS "    MATH_LIB_path: ${MATH_LIB}")

此处附上yaml在windows下的静态编译方式:
# 1.克隆并进入项目
git clone https://github.com/jbeder/yaml-cpp.git
cd yaml-cpp
# 2.启动 cmake-gui
cmake-gui
# 3.输入源码路径D:\lbq\yaml-cpp-master,和构建路径D:\lbq\yaml-cpp-master\mt_build,然后点击Configure进行配置
# 4.在弹窗里选择Visual Studio 16 2019
# 5.只勾选这2个即可,别的都不要勾选
YAML_CPP_BUILD_CONTRIB YAML_CPP_BUILD_TOOLS
# 6.再次点击Configure,再点击Generate,项目文件生成完成后,点击YAML_CPP.sln,然后熟悉的vs就出来了;在vs里选择Release和x64编译生成,便会在build/Release下出现yaml-cpp.lib,这个正是cmakelists.txt中第195行对应的库文件。

在这里插入图片描述

Step1: 下载PaddlePaddle C++ 预测库 fluid_inference

参考
请添加图片描述

Step2:编译

配置完成之后选择以下三种方式编译,我选的是gpu方式编译,其中需要设置的是-DPADDLE_LIB,这个是在PaddleX提到过的PaddlePaddle C++ 预测库,请提前下载。

	1)cpu
	cmake .. -DDEMO_NAME=test_seg -DWITH_MKL=ON -DWITH_GPU=OFF -DUSE_TENSORRT=OFF -DWITH_STATIC_LIB=OFF -DPADDLE_LIB=D:\lbq\code\4_ocr\paddle_c_plus
	2)gpu
	cmake .. -DDEMO_NAME=test_seg -DWITH_MKL=ON -DWITH_GPU=ON -DUSE_TENSORRT=OFF -DWITH_STATIC_LIB=OFF -DPADDLE_LIB=D:\lbq\code\4_ocr\paddle_c_plus
	3)tensorrt
	cmake .. -DDEMO_NAME=test_seg -DWITH_MKL=ON -DWITH_GPU=ON -DUSE_TENSORRT=ON -DWITH_STATIC_LIB=OFF -DPADDLE_LIB=D:\lbq\code\4_ocr\paddle_c_plus

编译成功后,打开cpp_inference_demo.sln,在test_seg项目的属性->输入->附加依赖项 中添加一行’ yaml-cpp.lib’
在这里插入图片描述

然后便可生成test_seg.exe 。最后将python导出的模型软链接到Release目录下,并测试一张图片。
在这里插入图片描述

PaddleX和PaddleSeg都有tensorrt的解决方案,但是我调试的时候都出现了问题,目前找到了原因,正在进一步修复中。

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值