说说Darknet 如何和QT qtcreator配置使用;
darknet yolov系列让作视觉工程的C++程序员喜不自胜!为啥?终于不用额外研究python也可玩转AI图像识别了,而且嵌入式友好、灵活、可扩展。感谢darknet的创作者,无私的提供给工程技术人员。
这里记录以下自己使用的点滴经验,帮助尚未掌握的同行更容易把握这个好用AI图像识别引擎。
1、darknet编译,都好说,网络上已经介绍的很清楚了,这里不再赘述,至于你要用哪个版本,这里还是推荐https://github.com/AlexeyAB/darknet
2、这个版本有windows/linux的,我主要介绍使用ubuntu18.04的吧,按照你是否安装CUDNN和电脑的硬件配置修改makefile;然后生成libdarknet.so文件。
3、其实darknet自带的有libdarknet.so怎么用的example样例代码,就在yolo_console_dll.cpp 几乎相关的API使用都示范,如果大家工程化应用darknet yolov*就可以参考这个文件。
4、废话少说,上代码,真刀真枪!首先声明,你已经新建了qtcreator 工程,把下面代码直接拷贝即可;
#include <QCoreApplication>
#define GPU
#define OPENCV
#include <iostream>
#include <fstream>
#include <QDebug>
#include <opencv2/opencv.hpp>
#include <include/yolo_v2_class.hpp>
#include <include/darknet.h>
std::vector<std::string> objects_names_from_file(std::string const filename) {
std::ifstream file(filename);
std::vector<std::string> file_lines;
if (!file.is_open()) return file_lines;
for(std::string line; getline(file, line);) file_lines.push_back(line);
std::cout << "object names loaded \n";
return file_lines;
}
void draw_boxes(cv::Mat mat_img, std::vector<bbox_t> result_vec, std::vector<std::string> obj_names,
int current_det_fps = -1, int current_cap_fps = -1)
{
int const colors[6][3] = { { 1,0,1 },{ 0,0,1 },{ 0,1,1 },{ 0,1,0 },{ 1,1,0 },{ 1,0,0 } };
for (auto &i : result_vec) {
cv::Scalar color = obj_id_to_color(i.obj_id);
cv::rectangle(mat_img, cv::Rect(i.x, i.y, i.w, i.h), color, 2);
if (obj_names.size() > i.obj_id) {
std::string obj_name = obj_names[i.obj_id];
if (i.track_id > 0) obj_name += " - " + std::to_string(i.track_id);
cv::Size const text_size = getTextSize(obj_name, cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, 2, 0);
int max_width = (text_size.width > i.w + 2) ? text_size.width : (i.w + 2);
max_width = std::max(max_width, (int)i.w + 2);
//max_width = std::max(max_width, 283);
std::string coords_3d;
if (!std::isnan(i.z_3d)) {
std::stringstream ss;
ss << std::fixed << std::setprecision(2) << "x:" << i.x_3d << "m y:" << i.y_3d << "m z:" << i.z_3d << "m ";
coords_3d = ss.str();
cv::Size const text_size_3d = getTextSize(ss.str(), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, 1, 0);
int const max_width_3d = (text_size_3d.width > i.w + 2) ? text_size_3d.width : (i.w + 2);
if (max_width_3d > max_width) max_width = max_width_3d;
}
cv::rectangle(mat_img, cv::Point2f(std::max((int)i.x - 1, 0), std::max((int)i.y - 35, 0)),
cv::Point2f(std::min((int)i.x + max_width, mat_img.cols - 1), std::min((int)i.y, mat_img.rows - 1)),
color, CV_FILLED, 8, 0);
putText(mat_img, obj_name, cv::Point2f(i.x, i.y - 16), cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, cv::Scalar(0, 0, 0), 2);
if(!coords_3d.empty()) putText(mat_img, coords_3d, cv::Point2f(i.x, i.y-1), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cv::Scalar(0, 0, 0), 1);
}
}
if (current_det_fps >= 0 && current_cap_fps >= 0) {
std::string fps_str = "FPS detection: " + std::to_string(current_det_fps) + " FPS capture: " + std::to_string(current_cap_fps);
putText(mat_img, fps_str, cv::Point2f(10, 20), cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, cv::Scalar(50, 255, 0), 2);
}
}
int main()
{
//获取使用GPU的指定ID
gpu_index = 0;
//#define GPU
#ifndef GPU
gpu_index = -1;
#else
if(gpu_index >= 0){
cuda_set_device(gpu_index);
}
#endif
//初始化相机
/// \brief mtwoCamera
///
///
cv::VideoCapture video;
int cameraWidth=320;
int cameraHeight=480;
video.open(0);
video.set(CV_CAP_PROP_FRAME_WIDTH, cameraWidth*2);//宽度
video.set(CV_CAP_PROP_FRAME_HEIGHT, cameraHeight);//高度
video.set(CV_CAP_PROP_FPS, 30);//帧率 帧/秒
video.set(CV_CAP_PROP_BRIGHTNESS, 1);//亮度 1
video.set(CV_CAP_PROP_CONTRAST,40);//对比度 40
video.set(CV_CAP_PROP_SATURATION, 50);//饱和度 50
video.set(CV_CAP_PROP_HUE, 50);//色调 50
video.set(CV_CAP_PROP_EXPOSURE, 50);//曝光 50
if( !video.isOpened() )
{
printf("Read Video failed!\n");
return 0;
}
cv::Mat frame;
bool iscla=false;
std::string cfg_file = "/home/sunkit/Qtkit/DarknetConsol/darknet-master/cfg/yolov4.cfg";
std::string weights_file = "/home/sunkit/Qtkit/DarknetConsol/darknet-master/yolov4.weights";
std::string names_file = "/home/sunkit/Qtkit/DarknetConsol/darknet-master/data/coco.names";
std::string filename = "/home/sunkit/Qtkit/DarknetConsol/darknet-master/data/dog.jpg";
Detector detector(cfg_file, weights_file);
auto obj_names = objects_names_from_file(names_file);
std::cout << "input image or video filename: ";
while (true)
{
// mtwoCamera.getCamFramfirst();
//grab and retrieve each frames of the video sequentially
video >> frame;
if( frame.empty() )
{
std::cout << "frame empty"<<std::endl;
break;
}
//wait for 40 milliseconds
int c = cvWaitKey(40);
//exit the loop if user press "Esc" key (ASCII value of "Esc" is 27)
if (27 == char(c)) break;
if(char(c)=='c')//标定计算
{
iscla=!iscla;
}
cv::Mat mat_img =frame;
if(iscla)
{
auto det_image = detector.mat_to_image_resize(mat_img);
//std::cout << det_image->w << std::endl;
//std::cout << det_image->h << std::endl;
// std::cout << mat_img.size()<< std::endl;
double timeConsume,start;
start = static_cast<double>(cv::getTickCount());
std::vector<bbox_t> result_vec = detector.detect_resized(*det_image, mat_img.size().width, mat_img.size().height,0.4);
timeConsume = ((double)cv::getTickCount() - start)*1000 / cv::getTickFrequency();
// qDebug()<<"time count:"<<timeConsume<<endl;
draw_boxes(mat_img, result_vec, obj_names);
}
cv::imshow("frame",mat_img);
}
}
5、效果如下,效果喜人,这也告诉大家AI 还是个什么程度呢?各位都有自己的见解。
6、qtcreator 工程配置环境,没有这个配置环境就是纯耍帅,不实在。
QT -= gui
CONFIG += c++11 console
CONFIG -= app_bundle
# The following define makes your compiler emit warnings if you use
# any Qt feature that has been marked deprecated (the exact warnings
# depend on your compiler). Please consult the documentation of the
# deprecated API in order to know how to port your code away from it.
DEFINES += QT_DEPRECATED_WARNINGS
# You can also make your code fail to compile if it uses deprecated APIs.
# In order to do so, uncomment the following line.
# You can also select to disable deprecated APIs only up to a certain version of Qt.
#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0
SOURCES += \
main.cpp
# Default rules for deployment.
qnx: target.path = /tmp/$${TARGET}/bin
else: unix:!android: target.path = /opt/$${TARGET}/bin
!isEmpty(target.path): INSTALLS += target
unix: CONFIG += link_pkgconfig
unix: PKGCONFIG += /usr/local/lib/pkgconfig/opencv.pc
#CUDA
DEFINES += GPU CUDNN
LIBS += \
-L"/usr/local/lib" \
-L"/usr/local/cuda/lib64" \
-lcudart -lcufft
DEPENDPATH += .
INCLUDEPATH += /usr/local/cuda/include
QMAKE_LIBDIR += /usr/local/cuda/lib64
CUDA_DIR = /usr/local/cuda
CUDA_SDK = /usr/local/cuda
SYSTEM_NAME = linux
SYSTEM_TYPE = 64
CUDA_ARCH = compute_61
CUDA_CODE = [sm_61,compute_61]
CUDA_OBJECTS_DIR = ./Release/obj
CUDA_LIBS = cudart cufft
CUDA_INC = $$join(INCLUDEPATH,'" -I"','-I"','"')
NVCC_OPTIONS = --use_fast_math
NVCC_LIBS = $$join(CUDA_LIBS,' -l','-l','')
CONFIG(Release){
cuda.input = CUDA_SOURCES
cuda.output = $$CUDA_OBJECTS_DIR/${QMAKE_FILE_BASE}_cuda.o
cuda.commands = $$CUDA_DIR/bin/nvcc $$NVCC_OPTIONS $$CUDA_INC $$NVCC_LIBS --machine $$SYSTEM_TYPE -arch=$$CUDA_ARCH -O3 -c -o ${QMAKE_FILE_OUT} ${QMAKE_FILE_NAME}
cuda.dependency_type = TYPE_C
QMAKE_EXTRA_COMPILERS += cuda
}
win32:CONFIG(release, debug|release): LIBS += -L$$PWD/./release/ -ldarknet
else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/./debug/ -ldarknet
else:unix: LIBS += -L$$PWD/./ -ldarknet
INCLUDEPATH += $$PWD/.
DEPENDPATH += $$PWD/.
结语:
有时候你会遇见darknet 无法运行,无厘头阿,怎么回事呢,这个就要深入了解了,比如你的深度模型网络文件开头没有改为如下:
[net]
#batch=64
#subdivisions=8
batch=1
subdivisions=1
# Training
#width=512
#height=512
width=416
height=416
channels=3
注意:如果你的笔记本电脑显卡内存不大于6G,建议使用416,不多解释。