Linux编译(cross-compilation)

主机host系统:ubuntu16.04

目标硬件:ARM imx6qsabresd CortexA9

交叉编译可执行文件格式:ELF 32-bit LSB executable, ARM, EABI5 version 1 (SYSV), dynamically linked, interpreter /lib/ld-linux-armhf.so.3, for GNU/Linux 3.2.0

1.交叉编译

1.1 安装编译环境

sudo apt-get install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf g++-multilib-arm-linux-gnueabihf pkg-config-arm-linux-gnueabihf 
sudo apt-get install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf g++-multilib-arm-linux-gnueabihf pkg-config-arm-linux-gnueabihf 

完全不需要虚拟机编译出一个与ARM板子相同的交叉编译环境,以上环境能兼容。

1.2 交叉编译helloworld

#include<iostream>
int main()
{
    std::cout<<"hello world!"<<std::endl;
}
arm-linux-gnueabihf-g++ helloworld.cpp -o helloworld

通过以下命令查看兼容性(是否能在ARM板子上执行)

file helloworld
file helloworld

最后上传或者拷贝到ARM板子中执行验证即可。

1.3 交叉编译opencv

参考网站:https://docs.opencv.org/3.4.3/d0/d76/tutorial_arm_crosscompile_with_cmake.html

修改opencv/platforms/linux目录下的arm-gnueabi.toolchain.cmake中的CMAKE_CXX_COMPILER(arm-linux-gnueabihf-g++),以及CMAKE_CXX_FLAGS(可能用到-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9),CMAKE_EXE_LINKER_FLAGS(opencv需要的-lopencv_core -lopencv_imgproc -lopencv_imgcodecs),示例:

set(CMAKE_SYSTEM_NAME Linux)
#set( CMAKE_SYSTEM_PROCESSOR arm )
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9 -c -Wall ")
set(CMAKE_EXE_LINKER_FLAGS "-lopencv_core -lopencv_imgproc -lopencv_imgcodecs")

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

执行:

cmake -DCMAKE_TOOLCHAIN_FILE=../arm-gnueabi.toolchain.cmake ../../..
cmake -DCMAKE_TOOLCHAIN_FILE=../arm-gnueabi.toolchain.cmake ../../..

然后make即可

  • 编译安装到指定路径(目录)

如果需要编译(bin/host/lib/shared)到指定目录(如host)下,添加参数 -D CMAKE_INSTALL_PREFIX=./host即可,然后执行make install,完成之后即可在build/host目录下找到(bin/host/lib/shared)文件。

#opencv目录下自建build目录,并切换到build下
cmake -D CMAKE_INSTALL_PREFIX=./host ..
make install -j8

find_package(OpenCV REQUIRED)找不到OpenCV时,CMakeLists中添加自定义安装的opencv库,参考:自定义引入OpenCV版本 

2.编译动态链接.so(cross-compile)

参考网站:https://www.cnblogs.com/52php/p/5681711.html

//....test.c....
#include<stdio.h>
int fun()
{
    return 1;
}
arm-linux-gnueabihf-gcc test.c -I./ -fPIC -shared -o libtest.so

编译可执行文件若需要调用libtest.so文件,直接采用"-ltest"即可链接到libtest.so文件。

3.交叉编译人脸检测(Shiguang Shan)

在SeetaFaceEngine-master/FaceDetection目录下,修改CMakeLists.txt

  • 修改arm兼容的交叉编译环境
  • 各个库修改为arm兼容的库(最重要的是修改arm的opencv头文件以及库文件)
  • OPENMP以及SSE设置为OFF
  • EXAMPLES根据自己的需要决定
cmake_minimum_required(VERSION 3.1.0)

project(seeta_facedet_lib)

set(CMAKE_SYSTEM_NAME Linux)
#set( CMAKE_SYSTEM_PROCESSOR arm )
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9")
#set(CMAKE_EXE_LINKER_FLAGS "-lopencv_core -lopencv_imgproc -lopencv_imgcodecs")

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

set(CMAKE_BUILD_TYPE Debug)
# Build options
option(BUILD_EXAMPLES  "Set to ON to build examples"  ON)
option(USE_OPENMP      "Set to ON to build use openmp"  OFF)
option(USE_SSE         "Set to ON to build use SSE"  OFF)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2")

# Use C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
message(STATUS "C++11 support has been enabled by default.")

# Use SSE
if (USE_SSE)
    add_definitions(-DUSE_SSE)
    message(STATUS "Use SSE")
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1")
endif()

# Use OpenMP
if (USE_OPENMP)
    find_package(OpenMP QUIET)
    if (OPENMP_FOUND)
        message(STATUS "Use OpenMP")
        add_definitions(-DUSE_OPENMP)
        set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
        set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
    endif()
endif()

include_directories(include)

set(src_files 
    src/util/nms.cpp
    src/util/image_pyramid.cpp
    src/io/lab_boost_model_reader.cpp
    src/io/surf_mlp_model_reader.cpp
    src/feat/lab_feature_map.cpp
    src/feat/surf_feature_map.cpp
    src/classifier/lab_boosted_classifier.cpp
    src/classifier/mlp.cpp
    src/classifier/surf_mlp.cpp
    src/face_detection.cpp
    src/fust.cpp
    )

# Build shared library
add_library(seeta_facedet_lib SHARED ${src_files})
set(facedet_required_libs seeta_facedet_lib)

# Build examples
if (BUILD_EXAMPLES)
    message(STATUS "Build with examples.")
    #find_package(OpenCV)
    include_directories("../../../OpenCV_ARM/include")

#    if (NOT OpenCV_FOUND)
#        message(WARNING "OpenCV not found. Test will not be built.")
#    else()
#        include_directories(${OpenCV_INCLUDE_DIRS})
#        list(APPEND facedet_required_libs ${OpenCV_LIBS})

        add_executable(facedet_test src/test/facedetection_test.cpp)
        target_link_libraries(facedet_test ${facedet_required_libs} -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_videoio -lopencv_video -L../../../OpenCV_ARM/lib)
#    endif()
endif()

4.CMake基础入门

参考网站:https://cmake.org/cmake/help/v3.14/manual/cmake.1.html

  • 基础认识:

CMake是用来将一个项目源代码编译成一个项目的可执行文件。

  • 使用:
mkdir build
cd build
cmake ../    #编译CMakeLists.txt中写好的内容,产生CMakeCache.txt
make
mkdir build
cd build
cmake ../    #编译CMakeLists.txt中写好的内容,产生CMakeCache.txt
make

CMakeLists.txt文件规定了编译目标,以及项目的依赖。

CMakeCache.txt在执行cmake命令后产生,用一个树状结构标识项目目录,存储固定的信息。

  • CMakeLists.txt编写
  • 步骤:

示例参考:https://www.cnblogs.com/cv-pr/p/6206921.html

①指定cmake版本

cmake_minimum_required(VERSION 3.2)

②指定项目名称,一般与项目文件夹名相同即可

PROJECT(Test)

③添加头文件目录(它相当于g++选项中的-I参数的作用,也相当于环境变量中增加路径到CPLUS_INCLUDE_PATH变量的作用)

INCLUDE_DIRECTORIES(
include
)

④添加源文件目录

AUX_SOURCE_DIRECTORY(src DIR_SRCS)

⑤设置环境变量,编译用到的源文件全部需要添加

SET(TEST_MATH
${DIR_SRCS}
)

⑥添加可执行文件

ADD_EXECUTABLE(${PROJECT_NAME} ${TEST})

⑦添加可执行文件所需库文件

TARGET_LINK_LIBRARIES(${PROJECT_NAME} -lopencv_core)

多种写法

#比如(以下写法(包括备注中的)都可以): 
TARGET_LINK_LIBRARIES(myProject hello)   # 连接libhello.so库
TARGET_LINK_LIBRARIES(myProject libhello.a)
TARGET_LINK_LIBRARIES(myProject libhello.so)

#再如:
TARGET_LINK_LIBRARIES(myProject libeng.so)  #这些库名写法都可以。
TARGET_LINK_LIBRARIES(myProject eng)
TARGET_LINK_LIBRARIES(myProject -leng)

以上cmake基本步骤算是完成,还可以添加其他的功能:

LINK_DIRECTORIES(添加需要链接的库文件目录)

link_directories(directory1 directory2 ...)   #它相当于g++命令的-L选项的作用,也相当于环境变量中增加LD_LIBRARY_PATH的路径的作用

LINK_LIBRARIES (添加需要链接的库文件路径,注意这里是全路径。还是推荐使用⑦)

LINK_LIBRARIES("/opt/MATLAB/R2012a/bin/glnxa64/libeng.so")

add_subdirectory(NAME)     (添加一个文件夹进行编译,该文件夹下的CMakeLists.txt 负责编译该文件夹下的源码. NAME是相对于调用add_subdirectory的CMakeListst.txt的相对路径)

5.交叉编译人脸检测+人脸对齐(Shiguang Shan)

step1:SeetaFaceEngine-master目录下创建build文件夹,以及CMakeLists.txt(内容如下)

step2:build目录下编译

cmake_minimum_required(VERSION 2.8.4)

project(seeta_fa_lib)

set(CMAKE_SYSTEM_NAME Linux)
#set( CMAKE_SYSTEM_PROCESSOR arm )
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9")

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

# Build options
option(BUILD_EXAMPLES  "Set to ON to build examples"  ON)

# Use C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
message(STATUS "C++11 support has been enabled by default.")

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2")

set(fd FaceDetection)    #起别名,后面引用带${...}
set(fa FaceAlignment)
include_directories(${fd}/include)
include_directories(${fa}/include)

set(fd_src_files 
    ${fd}/src/util/nms.cpp
    ${fd}/src/util/image_pyramid.cpp
    ${fd}/src/io/lab_boost_model_reader.cpp
    ${fd}/src/io/surf_mlp_model_reader.cpp
    ${fd}/src/feat/lab_feature_map.cpp
    ${fd}/src/feat/surf_feature_map.cpp
    ${fd}/src/classifier/lab_boosted_classifier.cpp
    ${fd}/src/classifier/mlp.cpp
    ${fd}/src/classifier/surf_mlp.cpp
    ${fd}/src/face_detection.cpp
    ${fd}/src/fust.cpp
    )
add_library(seeta_facedet_lib SHARED ${fd_src_files})
set(facedet_required_libs seeta_facedet_lib)

set(fa_src_files 
    ${fa}/src/cfan.cpp
    ${fa}/src/face_alignment.cpp
    ${fa}/src/sift.cpp
    )

add_library(seeta_fa_lib SHARED ${fa_src_files})
set(fa_required_libs seeta_fa_lib)

if (BUILD_EXAMPLES)
    message(STATUS "Build with examples.")
    include_directories("../../OpenCV_ARM/include")
    include_directories("../../OpenCV_ARM/include/opencv")
    include_directories("../../OpenCV_ARM/include/opencv2")
    link_directories("../../OpenCV_ARM/lib")
    add_executable(fa_test ${fa}/src/test/face_alignment_test.cpp)
    target_link_libraries(fa_test ${fa_required_libs} ${facedet_required_libs} -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_videoio -lopencv_video -L../../OpenCV_ARM/lib)
endif()

imx6q运行时间:

real    0m3.526s
user    0m3.410s
sys     0m0.110s 

6.交叉编译人脸检测+人脸对齐+人脸验证(Shiguang Shan)

主要遇到的问题:

  • 采用add_subdirectories()方法,找不到自己编译好的OpenCV_ARM中的头文件(include)和库文件(lib),因此我将所有的CMakeLists中的功能写到一个中;
  • FaceIdentification/src/test/test_face_verification.cpp中包含的OpenCV头文件可能存在路径问题,比如我修改了#include "opencv2/highgui/highgui.hpp"
#基本配置环境
cmake_minimum_required(VERSION 2.8.4)

project(seeta_fa_lib)

set(CMAKE_SYSTEM_NAME Linux)
#set( CMAKE_SYSTEM_PROCESSOR arm )
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard -mcpu=cortex-a9")

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

# Build options
option(BUILD_EXAMPLES  "Set to ON to build examples"  ON)

# Use C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
message(STATUS "C++11 support has been enabled by default.")

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2")

#人脸检测(FaceDetection)+人脸对齐(FaceAlignment)
set(fd FaceDetection)
set(fa FaceAlignment)
include_directories(${fd}/include)
include_directories(${fa}/include)

set(fd_src_files 
    ${fd}/src/util/${fd}/src/classifier/surf_mlp.cpp
    ${fd}/src/face_detection.cpp
    ${fd}/src/fust.cpp
    )
add_library(seeta_facedet_lib SHARED ${fd_src_files})
set(facedet_required_libs seeta_facedet_lib)

set(fa_src_files 
    ${fa}/src/cfan.cpp
    ${fa}/src/face_alignment.cpp
    ${fa}/src/sift.cpp
    )

add_library(seeta_fa_lib SHARED ${fa_src_files})
set(fa_required_libs seeta_fa_lib)

#人脸识别(FaceIdentification)的配置环境
set (VIPLNET_VERSION_MAJOR 4)
set (VIPLNET_VERSION_MINOR 5)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} -std=c++11 -O2 -g -ggdb")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -std=c++11 -O2")

MESSAGE(STATUS "other platform: ${CMAKE_SYSTEM_NAME}")

set(fi FaceIdentification)
set(VIPLNET_INCLUDE_DIR ${fi}/include)
set(VIPLNET_SRC_DIR ${fi}/src)
# set __VIOL_LOG__ macro
# add_definitions(-D__VIPL_LOG__)

include_directories(${VIPLNET_INCLUDE_DIR})
include_directories(${VIPLNET_SRC_DIR})

#add_subdirectory(${fi}/src)

aux_source_directory(${fi}/src SRC_LIST)
aux_source_directory(${fi}/tools TOOLS_LIST)
    ${fd}/src/util/image_pyramid.cpp
    ${fd}/src/io/lab_boost_model_reader.cpp
    ${fd}/src/io/surf_mlp_model_reader.cpp
    ${fd}/src/feat/lab_feature_map.cpp
    ${fd}/src/feat/surf_feature_map.cpp
    ${fd}/src/classifier/lab_boosted_classifier.cpp
    ${fd}/src/classifier/mlp.cpp
    ${fd}/src/classifier/surf_mlp.cpp
    ${fd}/src/face_detection.cpp
    ${fd}/src/fust.cpp
    )
add_library(seeta_facedet_lib SHARED ${fd_src_files})
set(facedet_required_libs seeta_facedet_lib)

set(fa_src_files 
    ${fa}/src/cfan.cpp
    ${fa}/src/face_alignment.cpp
    ${fa}/src/sift.cpp
    )

add_library(seeta_fa_lib SHARED ${fa_src_files})
set(fa_required_libs seeta_fa_lib)

#FaceIdentification
set (VIPLNET_VERSION_MAJOR 4)
set (VIPLNET_VERSION_MINOR 5)

set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS} -std=c++11 -O2 -g -ggdb")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -std=c++11 -O2")

MESSAGE(STATUS "other platform: ${CMAKE_SYSTEM_NAME}")

set(fi FaceIdentification)
set(VIPLNET_INCLUDE_DIR ${fi}/include)
set(VIPLNET_SRC_DIR ${fi}/src)
# set __VIOL_LOG__ macro
# add_definitions(-D__VIPL_LOG__)

include_directories(${VIPLNET_INCLUDE_DIR})
include_directories(${VIPLNET_SRC_DIR})

#add_subdirectory(${fi}/src)

aux_source_directory(${fi}/src SRC_LIST)
aux_source_directory(${fi}/tools TOOLS_LIST)
add_library(viplnet SHARED ${SRC_LIST} ${TOOLS_LIST})
set_target_properties(viplnet PROPERTIES 
  VERSION ${VIPLNET_VERSION_MAJOR}.${VIPLNET_VERSION_MINOR} 
  SOVERSION ${VIPLNET_VERSION_MAJOR}.${VIPLNET_VERSION_MINOR})

if (BUILD_EXAMPLES)
    message(STATUS "Build with examples.")
    include_directories("../../OpenCV_ARM/include")
    include_directories("../../OpenCV_ARM/include/opencv")
    include_directories("../../OpenCV_ARM/include/opencv2")
    link_directories("../../OpenCV_ARM/lib")
    add_executable(fi_verification_test ${fi}/src/test/test_face_verification.cpp)
    target_link_libraries(fi_verification_test ${fa_required_libs} ${facedet_required_libs} viplnet -lopencv_core -lopencv_imgproc -lopencv_imgcodecs -lopencv_videoio -lopencv_video -L../../OpenCV_ARM/lib)
endif()

计算同一张图片的识别效果,即计算同一张图片的相似度(识别率=1),计算同一人不同图片(本人一张正装照和手机拍的照片,识别率=0.643433)。因此,不同设备下,识别精度不会损失,识别时间在arm平台下会超级慢,识别时间如下:

Freescale i.MX6 Quad/DualLite (Device Tree)运行时间Intel(R) Core(TM) i7-7800X CPU @ 3.50GHz运行时间
real    0m13.799sreal    0m1.007s
user    0m12.870suser    0m1.638s
sys     0m0.970ssys     0m0.330s

7.树莓派( cross-compilation )

树莓派连接两个USB摄像头,同时获取2个摄像头的frame,保存为图片。(两个摄像头不能接在一个usb hub上)

#include <iostream>

#include "unistd.h"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/videoio.hpp"

using namespace std;

int main(int argc, char *argv[])
{
    cout<<"this is a test program!"<<endl;
    
    cv::VideoCapture cap0(0);
    cv::VideoCapture cap1(1);
    cap0.set(cv::CAP_PROP_FRAME_WIDTH,1024);
    cap0.set(cv::CAP_PROP_FRAME_HEIGHT,800);
    cap1.set(cv::CAP_PROP_FRAME_WIDTH,1024);
    cap1.set(cv::CAP_PROP_FRAME_HEIGHT,800);
    if(!cap0.isOpened())
    {
        cout<<"the camera 0 cannot be opened!"<<endl;
        return -1;
    }
    else if(!cap1.isOpened())
    {
        cout<<"the camera 1 cannot be opened!"<<endl;
        return -1;
    }
    cv::Mat frame0;
    cv::Mat frame1;
    bool stop = false;
    while(!stop)
    {
        cap0>>frame0;
        if(!frame0.empty())
        {
          cv::imwrite("0_1.jpg",frame0);
          cout<<"0_1.jpg"<<endl;
        }
        cap1>>frame1;
        if(!frame1.empty())
        {
          cv::imwrite("1_1.jpg",frame1);
          cout<<"1_1.jpg"<<endl;
        }
        sleep(10);
    }
    frame0.release();
    frame1.release();
    return 0;
}

树莓派ARMv7l实现时间延时:使用unistd.h中的sleep()即可。

交叉编译CMakeLists.txt文件如下:

cmake_minimum_required(VERSION 2.8)
cmake_policy(SET CMP0015 NEW)
project(opencv_camera)

set(CMAKE_SYSTEM_NAME Linux)
set( CMAKE_C_COMPILER arm-linux-gnueabihf-gcc)
set( CMAKE_CXX_COMPILER arm-linux-gnueabihf-g++)

SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

SET(CMAKE_CXX_FLAGS "-march=armv7-a -mfpu=neon -mfloat-abi=hard")
set(CMAKE_BUILD_TYPE Debug)

# Use C++11
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
message(STATUS "C++11 support has been enabled by default.")

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O2")

include_directories(
    include
    include/opencv
    include/opencv2
)

link_directories(lib)
set(src opencv_camera.cpp)
add_executable(opencv_camera ${src})
target_link_libraries(opencv_camera libopencv_core.so libopencv_imgproc.so libopencv_imgcodecs.so libopencv_videoio.so libopencv_video.so libopencv_highgui.so)

将前文交叉编译完成的OpenCV库文件lib,与OpenCV会用到的头文件include复制到CMakeLists.txt所在目录下。

注意:复制lib和include之后,如果编译可执行文件时,还是存在找不到OpenCV相关的库,或者OpenCV相关文件未定义的错误,则需考虑include_directories以及link_directories中包含的目录不准确,或者存在漏库/头文件的情况。

8.librealsense编译

测试系统:ubuntu16.04

测试硬件:Intel RealSense D435

8.1 编译SDK 

  • 新建一个项目目录intelrealsense
  • 下载

终端切换至intelrealsense目录,git clone https://github.com/IntelRealSense/librealsense.git

  • 编译

参考网站:https://dev.intelrealsense.com/docs/compiling-librealsense-for-linux-ubuntu-guide

如果需要编译在arm64(rockchip rk3399 aarch64)上,参考网站编译,则直接在rockpi终端,librealsense目录下运行./scripts/libuvc_installation.sh即可,当然也可以加入一些优化选项,比如将scripts/libuvc_installation.sh中cmake ../ -DFORCE_LIBUVC=true -DCMAKE_BUILD_TYPE=release修改为:cmake ../ -DFORCE_LIBUVC=true -DCMAKE_BUILD_TYPE=release -DBUILD_EXAMPLES=OFF -DBUILD_GRAPHICAL_EXAMPLES=OFF -DBUILD_WITH_OPENMP=OFF -DBUILD_UNIT_TESTS=OFF -DENABLE_ZERO_COPY=ON

8.2 编译测距示例

参考网站:https://blog.csdn.net/dieju8330/article/details/85420584

  • 网站中的测距程序和CMakeLists.txt放于intelrealsense下,并在intelrealsense目录下新建一个lib目录;
  • 将8.1中编译生成的librealsense2.so.2.21.0复制到lib目录下;
  • CMakeLists.txt修改为
project(measure_distance)
cmake_minimum_required(VERSION 2.8)

cmake_policy(SET CMP0015 NEW)

include_directories("librealsense/include/")

set(CMAKE_CXX_FLAGS "-std=c++11")
#寻找opencv库
find_package(OpenCV REQUIRED)
#message(STATUS ${OpenCV_INCLUDE_DIRS})
#添加头文件
include_directories(${OpenCV_INCLUDE_DIRS})
link_directories(lib) #添加复制出来的链接库

add_executable(measure_distance measure_distance.cpp)
#链接Opencv库
target_link_libraries(measure_distance ${OpenCV_LIBS} )
#添加后可进行调试
set( CMAKE_BUILD_TYPE Debug )
#set(DEPENDENCIES realsense2 )

target_link_libraries(measure_distance librealsense2.so)
  • 编译测距程序,生成可执行文件
mkdir build && cd build && cmake .. && make -j8

8.3 error

terminate called after throwing an instance of 'rs2::invalid_value_error'
  what():  API version mismatch: librealsense.so was compiled with API version 2.21.0 but the application was compiled with 2.19.1! Make sure correct version of the library is installed (make install)

Linux系统下的编译,编写CMakeLists.txt时,需要找到程序需要的库文件、头文件,但库/头文件需要对应到同一版本。如果在CMakeLists.txt文件中使用了link_directories,则编译器会去系统默认的头文件位置(其中一个为/usr/local/include/)查找需要的头文件。这个错误就是,我手动编译出来的API是2.21.0,但是未包含对应版本的头文件,编译过程自动到/usr/local/include/目录下查找对应API的头文件,导致两者版本不匹配。

9.编译ffmpeg

参考网站:http://www.cnblogs.com/CoderTian/p/6655568.html

下载:

git clone https://github.com/FFmpeg/FFmpeg.git

配置命令:(编译至host目录下)

./configure --prefix=host --enable-shared --disable-static --disable-doc 

编译:make -j8  && make install 

host目录下编译产生:

├── bin
│   ├── ffmpeg
│   └── ffprobe
├── include
│   ├── libavcodec
│   │   ├── ac3_parser.h
│   │   ├── adts_parser.h
│   │   ├── avcodec.h
……
│   ├── libavdevice
│   │   ├── avdevice.h
│   │   └── version.h
│   ├── libavfilter
│   │   ├── avfilter.h
│   │   ├── buffersink.h
│   │   ├── buffersrc.h
│   │   └── version.h
│   ├── libavformat
│   │   ├── avformat.h
│   │   ├── avio.h
│   │   └── version.h
│   ├── libavutil
│   │   ├── adler32.h
│   │   ├── aes_ctr.h
│   │   ├── aes.h
……
│   ├── libswresample
│   │   ├── swresample.h
│   │   └── version.h
│   └── libswscale
│       ├── swscale.h
│       └── version.h
├── lib
│   ├── libavcodec.so -> libavcodec.so.58.52.101
│   ├── libavcodec.so.58 -> libavcodec.so.58.52.101
│   ├── libavcodec.so.58.52.101
│   ├── libavdevice.so -> libavdevice.so.58.7.100
│   ├── libavdevice.so.58 -> libavdevice.so.58.7.100
│   ├── libavdevice.so.58.7.100
│   ├── libavfilter.so -> libavfilter.so.7.53.100
│   ├── libavfilter.so.7 -> libavfilter.so.7.53.100
……

编译muxing.cpp

从doc/examples复制到host目录下,后缀改为cpp,并将头文件改为:

#define __STDC_CONSTANT_MACROS
#include <stdio.h>
#ifdef __cplusplus
extern "C"
{
#endif
#include <stdlib.h>
#include <string.h>
#include <math.h>

#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/error.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#ifdef __cplusplus
};
#endif

host目录下建立CMakeLists.txt

project(ffmpeg_muxing)
cmake_minimum_required(VERSION 2.8)
cmake_policy(SET CMP0015 NEW)

include_directories(include)
set(CMAKE_CXX_FLAGS "-std=c++11")
##添加头文件
link_directories(lib) #添加复制出来的链接库

add_executable(push_flow_muxing muxing.cpp)
#添加后可进行调试
set( CMAKE_BUILD_TYPE Debug )

target_link_libraries(push_flow_muxing libavcodec.so libavformat.so libavutil.so libswresample.so libswscale.so)

编译muxing.cpp的可执行文件:

mkdir build && cd build && cmake .. && make 

修改bug参考网站 :https://github.com/usc-imi/aeo-light/issues/8

host/include/libavutil/error.h:121:95: error: taking address of temporary array av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)
host/include/libavutil/error.h:121:95: error: taking address of temporary array av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)

如果需要将自己写的代码编译成.so文件并链接到可执行文件,只需要将CMakeLists.txt改为以下方式:(编译完成之后会生成一个liblive_libs.so文件以及一个可执行文件app)

………………
set(src_files
    src/1.cpp
    src/2.cpp
    src/3.cpp
    src/4.cpp
   )
add_library(live_libs SHARED ${src_files})

aux_source_directory(./src DIR_SRCS)
add_executable(app ${DIR_SRCS})
target_link_libraries(app ${live_libs})

10.Rock Pi 4B(RK3399)配置Paddle-Lite

参考网站1:https://paddlepaddle.github.io/Paddle-Lite/

参考网站2:https://github.com/YunYang1994/tensorflow-yolov3

 

step1 配置基本工具

  • gcc、g++、git、make、wget、python
  • cmake(建议使用3.10或以上版本)

step2 将tensorflow-yolov3模型转为Paddle-Lite兼容的模型

转换工具:X2Paddle

模型来源:模型格式为(.pb)

  • 按照参考网站2配置完成tensorflow框架下的yolov3,part 1. Quick start。(注意:下载的模型放在checkpoint路径下;运行过程可能存在一些问题,core/utils.py中的scale = min(iw/w, ih/h)需要改为scale = min(float(iw)/w, float(ih)/h))
  • 按照X2Paddle转换(.pb)为PaddlePaddle深度学习框架产出的模型格式:x2paddle --framework=tensorflow --model=tf_model.pb --save_dir=pd_model(可能需要输入tensor的shape,以及模型转换失败,则参考FAQ
  • 模型优化:我是直接在x86_64普通电脑下转的,因此下载工具链接:./model_optimize_tool.x86_64-linux-gnu --model_dir=inference_model/ –--optimize_out_type=naive_buffer  --optimize_out =optimize_model_path/(在RK3399上armv8系统架构下载链接。)

step3 编译Paddle-Lite-Demo:(step2其实也不是必须的。step3可能需要编译opencv交叉编译手动编译

$ cd Paddle-Lite-Demo/PaddleLite-armlinux-demo/image_classification_demo
$ ./run.sh armv8 # RK3399

实现的演示效果如下:

分类cat目标检测

11.Rock Pi 4B(RK3399)配置TensorFlow-Lite v1.14

参考网站1:https://www.wandouip.com/t5i214530/

参考网站2:https://blog.csdn.net/computerme/article/details/80345065

参考网站3:https://github.com/tensorflow/tensorflow/issues/32073

参考网站4:https://tensorflow.google.cn/lite/guide/build_arm64

硬件内核(uname -a):Linux linaro-alip 4.4.154-90-rockchip-ga14f6502e045 #22 SMP Tue Jul 30 10:32:28 UTC 2019 aarch64 GNU/Linux

交叉编译(在x86_64主机上编译,执行直接拷贝至rockpi):

11.1 clone或者下载TensorFlow代码(最好选定为r1.14分支)

git clone git@github.com:tensorflow/tensorflow.git

11.2 根据参考网站4配置交叉编译环境

sudo apt-get install crossbuild-essential-arm64

11.3 下载TensorFlow所需依赖(TensorFlow根目录下运行,国内服务器可能会断开连接,则参考网站1,下载后解压至tensorflow/lite/tools/make/downloads目录下)

./tensorflow/lite/tools/make/download_dependencies.sh

11.4 编译(TensorFlow根目录下运行)

./tensorflow/lite/tools/make/build_aarch64_lib.sh

以上步骤能成功编译出一个静态库:tensorflow/lite/tools/make/gen/aarch64_armv8-a/lib/libtensorflow-lite.a

11.5 编译label_image分类Demo

参考网站1网站3修改tensorflow/lite/tools/make/Makefile文件,修改后的文件如下:

①有MINIMAL的地方,都仿照添加LABEL_IMAGE信息

②添加tensorflow/lite/tools/evaluation/utils.cc文件(否则会报错undefined reference:"tflite::evaluation::CreateGPUDelegate(tflite::FlateBufferModel)")

# Make uses /bin/sh by default, which is incompatible with the bashisms seen
# below.
SHELL := /bin/bash

# Find where we're running from, so we can store generated files here.
ifeq ($(origin MAKEFILE_DIR), undefined)
	MAKEFILE_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
endif

# Try to figure out the host system
HOST_OS :=
ifeq ($(OS),Windows_NT)
	HOST_OS = windows
else
	UNAME_S := $(shell uname -s)
	ifeq ($(UNAME_S),Linux)
		HOST_OS := linux
	endif
	ifeq ($(UNAME_S),Darwin)
		HOST_OS := osx
	endif
endif

HOST_ARCH := $(shell if uname -m | grep -q i[345678]86; then echo x86_32; else uname -m; fi)

# Override these on the make command line to target a specific architecture. For example:
# make -f tensorflow/lite/tools/make/Makefile TARGET=rpi TARGET_ARCH=armv7l
TARGET := $(HOST_OS)
TARGET_ARCH := $(HOST_ARCH)

INCLUDES := \
-I. \
-I$(MAKEFILE_DIR)/../../../../../ \
-I$(MAKEFILE_DIR)/../../../../../../ \
-I$(MAKEFILE_DIR)/downloads/ \
-I$(MAKEFILE_DIR)/downloads/eigen \
-I$(MAKEFILE_DIR)/downloads/absl \
-I$(MAKEFILE_DIR)/downloads/gemmlowp \
-I$(MAKEFILE_DIR)/downloads/neon_2_sse \
-I$(MAKEFILE_DIR)/downloads/farmhash/src \
-I$(MAKEFILE_DIR)/downloads/flatbuffers/include \
-I$(OBJDIR)
# This is at the end so any globally-installed frameworks like protobuf don't
# override local versions in the source tree.
INCLUDES += -I/usr/local/include

# These are the default libraries needed, but they can be added to or
# overridden by the platform-specific settings in target makefiles.
LIBS := \
-lstdc++ \
-lpthread \
-lm \
-lz

# There are no rules for compiling objects for the host system (since we don't
# generate things like the protobuf compiler that require that), so all of
# these settings are for the target compiler.
CXXFLAGS := -O3 -DNDEBUG -fPIC
CXXFLAGS += $(EXTRA_CXXFLAGS)
CFLAGS := ${CXXFLAGS}
CXXFLAGS += --std=c++11
LDOPTS := -L/usr/local/lib
ARFLAGS := -r
TARGET_TOOLCHAIN_PREFIX :=
CC_PREFIX :=

ifeq ($(HOST_OS),windows)
CXXFLAGS += -fext-numeric-literals -D__LITTLE_ENDIAN__
endif

# This library is the main target for this makefile. It will contain a minimal
# runtime that can be linked in to other programs.
LIB_NAME := libtensorflow-lite.a

# Benchmark static library and binary
BENCHMARK_LIB_NAME := benchmark-lib.a
BENCHMARK_BINARY_NAME := benchmark_model
BENCHMARK_PERF_OPTIONS_BINARY_NAME := benchmark_model_performance_options

# A small example program that shows how to link against the library.
MINIMAL_SRCS := \
	tensorflow/lite/examples/minimal/minimal.cc
LABEL_IMAGE_SRCS := \
        tensorflow/lite/examples/label_image/label_image.cc\
        tensorflow/lite/examples/label_image/bitmap_helpers.cc\
        tensorflow/lite/tools/evaluation/utils.cc
        
# What sources we want to compile, must be kept in sync with the main Bazel
# build files.

PROFILER_SRCS := \
  tensorflow/lite/profiling/memory_info.cc \
	tensorflow/lite/profiling/time.cc

PROFILE_SUMMARIZER_SRCS := \
	tensorflow/lite/profiling/profile_summarizer.cc \
	tensorflow/core/util/stats_calculator.cc

CMD_LINE_TOOLS_SRCS := \
	tensorflow/lite/tools/command_line_flags.cc

CORE_CC_ALL_SRCS := \
$(wildcard tensorflow/lite/*.cc) \
$(wildcard tensorflow/lite/*.c) \
$(wildcard tensorflow/lite/c/*.c) \
$(wildcard tensorflow/lite/core/*.cc) \
$(wildcard tensorflow/lite/core/api/*.cc) \
$(wildcard tensorflow/lite/experimental/resource_variable/*.cc) \
$(wildcard tensorflow/lite/experimental/ruy/*.cc)
ifneq ($(BUILD_TYPE),micro)
CORE_CC_ALL_SRCS += \
$(wildcard tensorflow/lite/kernels/*.cc) \
$(wildcard tensorflow/lite/kernels/internal/*.cc) \
$(wildcard tensorflow/lite/kernels/internal/optimized/*.cc) \
$(wildcard tensorflow/lite/kernels/internal/reference/*.cc) \
$(PROFILER_SRCS) \
tensorflow/lite/tools/make/downloads/farmhash/src/farmhash.cc \
tensorflow/lite/tools/make/downloads/fft2d/fftsg.c \
tensorflow/lite/tools/make/downloads/flatbuffers/src/util.cpp
CORE_CC_ALL_SRCS += \
	$(shell find tensorflow/lite/tools/make/downloads/absl/absl/ \
	             -type f -name \*.cc | grep -v test | grep -v benchmark | grep -v synchronization | grep -v debugging)
endif
# Remove any duplicates.
CORE_CC_ALL_SRCS := $(sort $(CORE_CC_ALL_SRCS))
CORE_CC_EXCLUDE_SRCS := \
$(wildcard tensorflow/lite/*test.cc) \
$(wildcard tensorflow/lite/*/*test.cc) \
$(wildcard tensorflow/lite/*/*/benchmark.cc) \
$(wildcard tensorflow/lite/*/*/example*.cc) \
$(wildcard tensorflow/lite/*/*/test*.cc) \
$(wildcard tensorflow/lite/*/*/*test.cc) \
$(wildcard tensorflow/lite/*/*/*/*test.cc) \
$(wildcard tensorflow/lite/kernels/*test_main.cc) \
$(wildcard tensorflow/lite/kernels/*test_util*.cc) \
$(MINIMAL_SRCS)\
$(LABEL_IMAGE_SRCS)

BUILD_WITH_MMAP ?= true
ifeq ($(BUILD_TYPE),micro)
	BUILD_WITH_MMAP=false
endif
ifeq ($(BUILD_TYPE),windows)
	BUILD_WITH_MMAP=false
endif
ifeq ($(BUILD_WITH_MMAP),true)
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/mmap_allocation.cc
else
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/mmap_allocation_disabled.cc
endif

BUILD_WITH_NNAPI ?= true
ifeq ($(BUILD_TYPE),micro)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(TARGET),windows)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(TARGET),ios)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(TARGET),rpi)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(TARGET),generic-aarch64)
	BUILD_WITH_NNAPI=false
endif
ifeq ($(BUILD_WITH_NNAPI),true)
	CORE_CC_ALL_SRCS += tensorflow/lite/delegates/nnapi/nnapi_delegate.cc
  CORE_CC_ALL_SRCS += tensorflow/lite/delegates/nnapi/quant_lstm_sup.cc
	CORE_CC_ALL_SRCS += tensorflow/lite/nnapi/nnapi_implementation.cc
	CORE_CC_ALL_SRCS += tensorflow/lite/nnapi/nnapi_util.cc
	LIBS += -lrt
else
	CORE_CC_ALL_SRCS += tensorflow/lite/delegates/nnapi/nnapi_delegate_disabled.cc
	CORE_CC_ALL_SRCS += tensorflow/lite/nnapi/nnapi_implementation_disabled.cc
endif

ifeq ($(TARGET),ios)
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/minimal_logging_android.cc
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/minimal_logging_default.cc
else
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/minimal_logging_android.cc
	CORE_CC_EXCLUDE_SRCS += tensorflow/lite/minimal_logging_ios.cc
endif


# Filter out all the excluded files.
TF_LITE_CC_SRCS := $(filter-out $(CORE_CC_EXCLUDE_SRCS), $(CORE_CC_ALL_SRCS))

# Benchmark sources
BENCHMARK_SRCS_DIR := tensorflow/lite/tools/benchmark
EVALUATION_UTILS_SRCS := \
  tensorflow/lite/tools/evaluation/utils.cc
BENCHMARK_ALL_SRCS := \
	$(wildcard $(BENCHMARK_SRCS_DIR)/*.cc) \
	$(PROFILE_SUMMARIZER_SRCS) \
	$(CMD_LINE_TOOLS_SRCS) \
	$(EVALUATION_UTILS_SRCS)

BENCHMARK_MAIN_SRC := $(BENCHMARK_SRCS_DIR)/benchmark_main.cc
BENCHMARK_PERF_OPTIONS_SRC := \
	$(BENCHMARK_SRCS_DIR)/benchmark_tflite_performance_options_main.cc
BENCHMARK_LIB_SRCS := $(filter-out \
	$(wildcard $(BENCHMARK_SRCS_DIR)/*_test.cc) \
	$(BENCHMARK_MAIN_SRC) \
	$(BENCHMARK_PERF_OPTIONS_SRC) \
	$(BENCHMARK_SRCS_DIR)/benchmark_plus_flex_main.cc, \
	$(BENCHMARK_ALL_SRCS))

# These target-specific makefiles should modify or replace options like
# CXXFLAGS or LIBS to work for a specific targetted architecture. All logic
# based on platforms or architectures should happen within these files, to
# keep this main makefile focused on the sources and dependencies.
include $(wildcard $(MAKEFILE_DIR)/targets/*_makefile.inc)

ALL_SRCS := \
	$(MINIMAL_SRCS) \
	$(LABEL_IMAGE_SRCS) \
	$(PROFILER_SRCS) \
	$(PROFILER_SUMMARIZER_SRCS) \
	$(TF_LITE_CC_SRCS) \
	$(BENCHMARK_LIB_SRCS) \
  $(CMD_LINE_TOOLS_SRCS)

# Where compiled objects are stored.
GENDIR := $(MAKEFILE_DIR)/gen/$(TARGET)_$(TARGET_ARCH)/
OBJDIR := $(GENDIR)obj/
BINDIR := $(GENDIR)bin/
LIBDIR := $(GENDIR)lib/

LIB_PATH := $(LIBDIR)$(LIB_NAME)
BENCHMARK_LIB := $(LIBDIR)$(BENCHMARK_LIB_NAME)
BENCHMARK_BINARY := $(BINDIR)$(BENCHMARK_BINARY_NAME)
BENCHMARK_PERF_OPTIONS_BINARY := $(BINDIR)$(BENCHMARK_PERF_OPTIONS_BINARY_NAME)
MINIMAL_BINARY := $(BINDIR)minimal
LABEL_IMAGE_BINARY := $(BINDIR)label_image

CXX := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}g++
CC := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}gcc
AR := $(CC_PREFIX)${TARGET_TOOLCHAIN_PREFIX}ar

MINIMAL_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(MINIMAL_SRCS))))
LABEL_IMAGE_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(LABEL_IMAGE_SRCS))))

LIB_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(TF_LITE_CC_SRCS)))))

BENCHMARK_MAIN_OBJ := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(BENCHMARK_MAIN_SRC))))

BENCHMARK_PERF_OPTIONS_OBJ := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(BENCHMARK_PERF_OPTIONS_SRC))))

BENCHMARK_LIB_OBJS := $(addprefix $(OBJDIR), \
$(patsubst %.cc,%.o,$(patsubst %.c,%.o,$(BENCHMARK_LIB_SRCS))))

# For normal manually-created TensorFlow Lite C++ source files.
$(OBJDIR)%.o: %.cc
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@
# For normal manually-created TensorFlow Lite C source files.
$(OBJDIR)%.o: %.c
	@mkdir -p $(dir $@)
	$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
$(OBJDIR)%.o: %.cpp
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) -c $< -o $@

# The target that's compiled if there's no command-line arguments.
all: $(LIB_PATH)  $(MINIMAL_BINARY) $(BENCHMARK_BINARY) $(BENCHMARK_PERF_OPTIONS_BINARY) $(LABEL_IMAGE_BINARY)

# The target that's compiled for micro-controllers
micro: $(LIB_PATH)

# Hack for generating schema file bypassing flatbuffer parsing
tensorflow/lite/schema/schema_generated.h:
	@cp -u tensorflow/lite/schema/schema_generated.h.OPENSOURCE tensorflow/lite/schema/schema_generated.h

# Gathers together all the objects we've compiled into a single '.a' archive.
$(LIB_PATH): tensorflow/lite/schema/schema_generated.h $(LIB_OBJS)
	@mkdir -p $(dir $@)
	$(AR) $(ARFLAGS) $(LIB_PATH) $(LIB_OBJS)

lib: $(LIB_PATH)

$(MINIMAL_BINARY): $(MINIMAL_OBJS) $(LIB_PATH)
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) \
	-o $(MINIMAL_BINARY) $(MINIMAL_OBJS) \
	$(LIBFLAGS) $(LIB_PATH) $(LDFLAGS) $(LIBS)
$(LABEL_IMAGE_BINARY): $(LABEL_IMAGE_OBJS) $(LIB_PATH)
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) \
	-o $(LABEL_IMAGE_BINARY) $(LABEL_IMAGE_OBJS) \
	$(LIBFLAGS) $(LIB_PATH) $(LDFLAGS) $(LIBS)

minimal: $(MINIMAL_BINARY)
label_image: $(LABEL_IMAGE_BINARY)

$(BENCHMARK_LIB) : $(LIB_PATH) $(BENCHMARK_LIB_OBJS)
	@mkdir -p $(dir $@)
	$(AR) $(ARFLAGS) $(BENCHMARK_LIB) $(LIB_OBJS) $(BENCHMARK_LIB_OBJS)

benchmark_lib: $(BENCHMARK_LIB)

$(BENCHMARK_BINARY) : $(BENCHMARK_MAIN_OBJ) $(BENCHMARK_LIB)
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) \
	-o $(BENCHMARK_BINARY) $(BENCHMARK_MAIN_OBJ) \
	$(LIBFLAGS) $(BENCHMARK_LIB) $(LDFLAGS) $(LIBS)

$(BENCHMARK_PERF_OPTIONS_BINARY) : $(BENCHMARK_PERF_OPTIONS_OBJ) $(BENCHMARK_LIB)
	@mkdir -p $(dir $@)
	$(CXX) $(CXXFLAGS) $(INCLUDES) \
	-o $(BENCHMARK_PERF_OPTIONS_BINARY) $(BENCHMARK_PERF_OPTIONS_OBJ) \
	$(LIBFLAGS) $(BENCHMARK_LIB) $(LDFLAGS) $(LIBS)

benchmark: $(BENCHMARK_BINARY) $(BENCHMARK_PERF_OPTIONS_BINARY)

libdir:
	@echo $(LIBDIR)

# Gets rid of all generated files.
clean:
	rm -rf $(MAKEFILE_DIR)/gen

# Gets rid of target files only, leaving the host alone. Also leaves the lib
# directory untouched deliberately, so we can persist multiple architectures
# across builds for iOS and Android.
cleantarget:
	rm -rf $(OBJDIR)
	rm -rf $(BINDIR)

$(DEPDIR)/%.d: ;
.PRECIOUS: $(DEPDIR)/%.d

-include $(patsubst %,$(DEPDIR)/%.d,$(basename $(ALL_SRCS)))

按照11.4再次编译,最终编译生成:tensorflow/lite/tools/make/gen/aarch64_armv8-a/bin/label_image

TensorFlow根目录下使用以下命令可查看这个可执行文件的运行内核环境

file tensorflow/lite/tools/make/gen/aarch64_armv8-a/bin/label_image

输出:ELF 64-bit LSB executable, ARM aarch64, version 1 (GNU/Linux), dynamically linked, interpreter /lib/ld-, for GNU/Linux 3.7.0, BuildID[sha1]=41093a674cedba6b014bd47da95a0a09b9aa9aa8, not stripped

11.6 在网站中下载model和labels文件,并拷贝模型和tensorflow/lite/tools/make/gen/aarch64_armv8-a/下的所有文件至rockpi中,运行命令:

./label_image -v 1 -m ./mobilenet_v1_1.0_224_quant.tflite -i ./grace_hopper.bmp -l ./labels_mobilenet_quant_v1_224.txt 

输出:average time: 113.777 ms
            0.780392: 653 military uniform
            0.105882: 907 Windsor tie
            0.0156863: 458 bow tie
            0.0117647: 466 bulletproof vest
            0.00784314: 835 suit

11.7 测试目标检测模型

参考网站:https://github.com/tensorflow/examples/tree/master/lite/examples/object_detection/raspberry_pi

①rockpi的内核是aarch64,因此下载网站中的tflite_runtime-1.14.0-cp35-cp35m-linux_aarch64.whl,并拷贝至rockpi中,使用pip install tflite_runtime-1.14.0-cp35-cp35m-linux_aarch64.whl安装;

②按照网站中的配置过程,下载代码,若不配置摄像头,仅测试单张图片,则修改网站中的detect_picamera.py

# python3
#
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example using TF Lite to detect objects with the Raspberry Pi camera."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import io
import re
import time

from annotation import Annotator

import numpy as np

from PIL import Image,ImageDraw
from tflite_runtime.interpreter import Interpreter

CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480


def load_labels(path):
  """Loads the labels file. Supports files with or without index numbers."""
  with open(path, 'r', encoding='utf-8') as f:
    lines = f.readlines()
    labels = {}
    for row_number, content in enumerate(lines):
      pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
      if len(pair) == 2 and pair[0].strip().isdigit():
        labels[int(pair[0])] = pair[1].strip()
      else:
        labels[row_number] = pair[0].strip()
  return labels


def set_input_tensor(interpreter, image):
  """Sets the input tensor."""
  tensor_index = interpreter.get_input_details()[0]['index']
  input_tensor = interpreter.tensor(tensor_index)()[0]
  input_tensor[:, :] = image


def get_output_tensor(interpreter, index):
  """Returns the output tensor at the given index."""
  output_details = interpreter.get_output_details()[index]
  tensor = np.squeeze(interpreter.get_tensor(output_details['index']))
  return tensor


def detect_objects(interpreter, image, threshold):
  """Returns a list of detection results, each a dictionary of object info."""
  set_input_tensor(interpreter, image)
  interpreter.invoke()

  #import pdb;pdb.set_trace()
  # Get all output details
  boxes = get_output_tensor(interpreter, 0)
  classes = get_output_tensor(interpreter, 1)
  scores = get_output_tensor(interpreter, 2)
  count = int(get_output_tensor(interpreter, 3))

  results = []
  for i in range(count):
    if scores[i] >= threshold:
      result = {
          'bounding_box': boxes[i],
          'class_id': classes[i],
          'score': scores[i]
      }
      results.append(result)
  return results


def annotate_objects(image, results, labels):
  """Draws the bounding box and label for each object in the results."""
  image = image.resize(
      (CAMERA_WIDTH, CAMERA_HEIGHT), Image.ANTIALIAS)
  draw = ImageDraw.Draw(image)
  for obj in results:
    # Convert the bounding box figures from relative coordinates
    # to absolute coordinates based on the original resolution
    ymin, xmin, ymax, xmax = obj['bounding_box']
    xmin = int(xmin * CAMERA_WIDTH)
    xmax = int(xmax * CAMERA_WIDTH)
    ymin = int(ymin * CAMERA_HEIGHT)
    ymax = int(ymax * CAMERA_HEIGHT)

    # Overlay the box, label, and score on the camera preview
    draw.rectangle((xmin,ymin,xmax,ymax),None,'red')
    draw.text([xmin, ymin],
                   '%s\n%.2f' % (labels[obj['class_id']], obj['score']))
  image.save('car_out.jpg','JPEG')

def main():
  parser = argparse.ArgumentParser(
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument(
      '--model', help='File path of .tflite file.', required=True)
  parser.add_argument(
      '--labels', help='File path of labels file.', required=True)
  parser.add_argument(
      '--threshold',
      help='Score threshold for detected objects.',
      required=False,
      type=float,
      default=0.1)
  args = parser.parse_args()

  labels = load_labels(args.labels)
  interpreter = Interpreter(args.model)
  interpreter.allocate_tensors()
  _, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']

  img_d = Image.open('car.jpg').convert('RGB')
  image = img_d.resize(
      (input_width, input_height), Image.ANTIALIAS)
  start_time = time.monotonic()
  results = detect_objects(interpreter, image, args.threshold)
  elapsed_ms = (time.monotonic() - start_time) * 1000
  annotate_objects(img_d, results, labels)

if __name__ == '__main__':
  main()

执行下面的命令即可 

python3 detect_picamera.py --model detect.tflite --labels coco_labels.txt
  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值