车道线检测,尝试用传统的方法识别车道线,可以节约硬件资源,但由于场景太复杂,后面还是采用网络模型识别,不过可以作为预判:
#include <iostream>
#include <cmath>
#include <vector>
#include <algorithm>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int lane_roi_hulf(cv::Mat& img_gray, cv::Mat& img_hulf);
int lane_roi(cv::Mat& img_canny, cv::Mat& img_roi);
std::vector<std::vector<cv::Vec4i>> line_separa(std::vector<cv::Vec4i> lines, cv::Mat& img_edges);
std::vector<cv::Point> regression(std::vector<std::vector<cv::Vec4i> > left_right_lines, cv::Mat& inputImage);
int find_contour(cv::Mat& img_roi, cv::Mat& img_contour);
int lane_compel(cv::Mat& img_lane);
int main(int argc, char** argv) {
cv::Mat img_input, img_guas, img_gray, img_canny;
cv::Mat img_hulf, img_roi, img_roi_cont;
img_input = cv::imread("./img/city_03_16_229.jpg");
cv::Mat img_uch(img_input.rows, img_input.cols, CV_8UC1, cv::Scalar::all(0));
double time_start = static_cast<double>(getTickCount());
cv::cvtColor(img_input, img_gray, COLOR_BGR2GRAY);
//lane_roi_hulf(img_gray, img_hulf);
cv::GaussianBlur(img_gray, img_guas, cv::Size(5, 5), 0, 0);
cv::threshold(img_guas, img_uch, 0, 255, CV_THRESH_OTSU + CV_THRESH_BINARY);
lane_roi(img_uch, img_roi_cont);
cv::imshow("img_roi_cont", img_roi_cont);
cv::Canny(img_guas, img_canny, 100, 250);
cv::imshow("img_canny", img_canny);
lane_roi(img_canny, img_roi);
//double time = (static_cast<double>(getTickCount()) - time_start) / getTickFrequency();
//std::cout<<"time: "<<time<<std::endl;
cv::imshow("img_roi", img_roi);
std::vector<cv::Vec4i> lines;
HoughLinesP(img_roi, lines, 1, CV_PI / 180, 40, 40, 20);
cv::Mat img_contour = cv::Mat::zeros(img_roi.size(), img_roi.type());
find_contour(img_roi_cont, img_contour);
imshow("img_contour", img_contour);
cv::Mat img_hough = cv::Mat::zeros(img_uch.size(), img_uch.type());
int j = 0 ;
cv::Point hough_ini, hough_fini;
for(auto i : lines) {
hough_ini = cv::Point(i[0], i[1]);
hough_fini = cv::Point(i[2], i[3]);
cv::line(img_hough, hough_ini, hough_fini, cv::Scalar(255), 1, CV_AA);
j++;
}
std::cout<<"j: "<<j<<std::endl;
cv::imshow("img_hough", img_hough);
std::vector<std::vector<cv::Vec4i> > left_right_lines;
std::vector<cv::Point> lanes;
left_right_lines = line_separa(lines, img_roi);
lanes = regression(left_right_lines, img_roi);
cv::Mat img_lane = cv::Mat::zeros(img_hough.size(), img_hough.type());
int k = 0;
cv::Point left_ini, left_fini;
for(auto i : left_right_lines[0]) {
left_ini = cv::Point(i[0], i[1]);
left_fini = cv::Point(i[2], i[3]);
cv::line(img_lane, left_ini, left_fini, cv::Scalar(255), 1, CV_AA);
k++;
}
cv::Point right_ini, right_fini;
for(auto i : left_right_lines[1]) {
right_ini = cv::Point(i[0], i[1]);
right_fini = cv::Point(i[2], i[3]);
cv::line(img_lane, right_ini, right_fini, cv::Scalar(255), 1, CV_AA);
k++;
}
std::cout<<"k: "<<k<<std::endl;
cv::line(img_lane, cv::Point(640, 0), cv::Point(640, 720), cv::Scalar(255), 1, CV_AA);
//cv::line(img_lane, lanes[0], lanes[1], cv::Scalar(255), 1, CV_AA);
//cv::line(img_lane, lanes[2], lanes[3], cv::Scalar(255), 1, CV_AA);
cv::imshow("img_lane", img_lane);
cv::waitKey(0);
return 0;
}
int lane_roi(cv::Mat& img_canny, cv::Mat& img_roi)
{
int rows_kerl = img_canny.rows / 10;
int cols_kerl = img_canny.cols / 10;
cv::Mat img_fill = cv::Mat::zeros(img_canny.size(), img_canny.type());
img_fill.copyTo(img_roi);
cv::Point pts[4] = {
cv::Point(3 * cols_kerl, 6 * rows_kerl),
cv::Point(7 * cols_kerl, 6 * rows_kerl),
cv::Point(9.5 * cols_kerl, 10 * rows_kerl),
cv::Point(0.5 * cols_kerl, 10 * rows_kerl)
};
cv::fillConvexPoly(img_fill, pts, 4, cv::Scalar(255));
cv::bitwise_and(img_canny, img_fill, img_roi);
}
int lane_roi_hulf(cv::Mat& img_gray, cv::Mat& img_hulf)
{
cv::Mat img_fill = cv::Mat::zeros(img_gray.size(), img_gray.type());
img_fill.copyTo(img_hulf);
cv::Point pts[4] = {
cv::Point(0, img_gray.rows / 2),
cv::Point(img_gray.cols, img_gray.rows / 2),
cv::Point(img_gray.cols, img_gray.rows),
cv::Point(0, img_gray.rows),
};
cv::fillConvexPoly(img_fill, pts, 4, cv::Scalar(255));
cv::bitwise_and(img_gray, img_fill, img_hulf);
}
std::vector<std::vector<cv::Vec4i>> line_separa(std::vector<cv::Vec4i> lines, cv::Mat& img_edges)
{
std::vector<std::vector<cv::Vec4i> > output(2);
size_t j = 0, m = 0, n = 0;
cv::Point ini, fini;
double slope_thresh_min = 0.3;
double slope_thresh_max = 10;
std::vector<double> slopes, slope_left, slope_right;
std::vector<cv::Vec4i> selected_lines, sel_line_left, sel_line_right;
std::vector<cv::Vec4i> right_lines, left_lines;
// Calculate the slope of all the detected lines
for (auto i : lines) {
ini = cv::Point(i[0], i[1]);
fini = cv::Point(i[2], i[3]);
// Basic algebra: slope = (y1 - y0)/(x1 - x0)
double slope = (static_cast<double>(fini.y) - static_cast<double>(ini.y)) /
(static_cast<double>(fini.x) - static_cast<double>(ini.x) + 0.00001);
// If the slope is too horizontal, discard the line
// If not, save them and their respective slope
if (std::abs(slope) > slope_thresh_min && std::abs(slope) < slope_thresh_max) {
slopes.push_back(slope);
selected_lines.push_back(i);
}
}
// Split the lines into right and left lines
double img_center = static_cast<double>((img_edges.cols / 2));
while (j < selected_lines.size()) {
ini = cv::Point(selected_lines[j][0], selected_lines[j][1]);
fini = cv::Point(selected_lines[j][2], selected_lines[j][3]);
// Condition to classify line as left side or right side
if (slopes[j] > 0 && fini.x > img_center && ini.x > img_center) {
right_lines.push_back(selected_lines[j]);
//right_flag = true;
}
else if (slopes[j] < 0 && fini.x < img_center && ini.x < img_center) {
left_lines.push_back(selected_lines[j]);
//left_flag = true;
}
j++;
}
while (m < right_lines.size()) {
ini = cv::Point(right_lines[m][0], right_lines[m][1]);
fini = cv::Point(right_lines[m][2], right_lines[m][3]);
// Basic algebra: slope = (y1 - y0)/(x1x1 - x0)
double slope = (static_cast<double>(fini.y) - static_cast<double>(ini.y)) /
(static_cast<double>(fini.x) - static_cast<double>(ini.x) + 0.00001);
double interc = fini.y - slope * fini.x;
std::cout<<"slope1: "<<slope<<", interc1: "<<interc<<std::endl;
slope_right.push_back(slope);
sel_line_right.push_back(right_lines[m]);
m++;
}
while (n < left_lines.size()) {
ini = cv::Point(left_lines[n][0], left_lines[n][1]);
fini = cv::Point(left_lines[n][2], left_lines[n][3]);
// Basic algebra: slope = (y1 - y0)/(x1x1 - x0)
double slope = (static_cast<double>(fini.y) - static_cast<double>(ini.y)) /
(static_cast<double>(fini.x) - static_cast<double>(ini.x) + 0.00001);
double interc = fini.y - slope * fini.x;
std::cout<<"slope2: "<<slope<<", interc2: "<<interc<<std::endl;
// If the slope is too horizontal, discard the line
// If not, save them and their respective slope
if (std::abs(slope) > slope_thresh_min && std::abs(slope) < slope_thresh_max) {
slope_left.push_back(slope);
sel_line_left.push_back(left_lines[n]);
}
n++;
}
output[0] = right_lines;
output[1] = left_lines;
return output;
}
std::vector<cv::Point> regression(std::vector<std::vector<cv::Vec4i> > left_right_lines, cv::Mat& inputImage)
{
std::vector<cv::Point> output(4);
cv::Point ini, fini;
cv::Point ini2, fini2;
cv::Vec4d right_line, left_line;
std::vector<cv::Point> right_pts, left_pts;
cv::Point right_b; // Members of both line equations of the lane boundaries:
double right_m; // y = m*x + b
cv::Point left_b; //
double left_m; //
// If right lines are being detected, fit a line using all the init and final points of the lines
for (auto i : left_right_lines[0]) {
ini = cv::Point(i[0], i[1]);
fini = cv::Point(i[2], i[3]);
right_pts.push_back(ini);
right_pts.push_back(fini);
}
if (right_pts.size() > 0) {
// The right line is formed here
cv::fitLine(right_pts, right_line, CV_DIST_L2, 0, 0.01, 0.01);
right_m = right_line[1] / right_line[0];
right_b = cv::Point(right_line[2], right_line[3]);
}
// If left lines are being detected, fit a line using all the init and final points of the lin
for (auto j : left_right_lines[1]) {
ini2 = cv::Point(j[0], j[1]);
fini2 = cv::Point(j[2], j[3]);
left_pts.push_back(ini2);
left_pts.push_back(fini2);
}
if (left_pts.size() > 0) {
// The left line is formed here
cv::fitLine(left_pts, left_line, CV_DIST_L2, 0, 0.01, 0.01);
left_m = left_line[1] / left_line[0];
left_b = cv::Point(left_line[2], left_line[3]);
}
// One the slope and offset points have been obtained, apply the line equation to obtain the line points
int ini_y = inputImage.rows;
int fin_y = 470;
double right_ini_x = ((ini_y - right_b.y) / right_m) + right_b.x;
double right_fin_x = ((fin_y - right_b.y) / right_m) + right_b.x;
double left_ini_x = ((ini_y - left_b.y) / left_m) + left_b.x;
double left_fin_x = ((fin_y - left_b.y) / left_m) + left_b.x;
output[0] = cv::Point(right_ini_x, ini_y);
output[1] = cv::Point(right_fin_x, fin_y);
output[2] = cv::Point(left_ini_x, ini_y);
output[3] = cv::Point(left_fin_x, fin_y);
return output;
}
int find_contour(cv::Mat& img_roi, cv::Mat& img_contour)
{
//寻找轮廓
vector<vector<Point>> contours;
findContours(img_roi, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
//计算面积与周长
double length = arcLength(contours[i], true);
double area = contourArea(contours[i]);
//外部矩形边界
Rect rect = boundingRect(contours[i]);
int h = img_roi.rows - 50;
//轮廓分析:
if (length < 50.0 || area < 10.0) {
continue;
}
//cout << "length:" << length << endl;
//cout << "area:" << area << endl;
/*
//最小包围矩形
RotatedRect rrt = minAreaRect(contours[i]);
//关于角度问题:https://blog.csdn.net/weixin_41887615/article/details/91411086
double angle = abs(rrt.angle);
//angle < 50.0 || angle>89.0
if (angle < 20.0 || angle > 89.0) {
continue;
}
*/
if (contours[i].size() > 5) {
//用椭圆拟合
RotatedRect errt = fitEllipse(contours[i]);
if ((errt.angle < 5.0) || (errt.angle > 160.0)) {
//if (80.0 < errt.angle && errt.angle < 100.0) {
continue;
//}
}
}
drawContours(img_contour, contours, i, Scalar(255), 1, 8);
}
return 0;
}
//g++ -std=c++14 lane_test3.cpp -o lane_test3 -lopencv_core -lopencv_highgui -lopencv_videoio -lopencv_imgproc -lopencv_imgcodecs