基于ROS的车道线检测项目记录

2020.09.06

任务:传统OpenCV方法ROS版本改造

选取大神陈光的高级车道线检测方法进行改造,总体逻辑是:
第一、创建订阅者,接收摄像头发布的数据,用cv_bridgeopencv格式的数据转换为ROS的消息格式数据。
第二、创建发布者,将检测到的数据以特定的消息类型发布出去
传统方法的难点
(1)对光照、明暗、车道线磨损、非常敏感
(2)在十字路口转弯时,摄像头检测不到前方车道线,会造成绿色区域“变幻莫测”地跳动
(3)总会出现x expect non zero vector错误,导致程序退出
计划对程序的改进
(1)在十字路口等工况下检测不到车道线时,直接returnspin()函数,等待下一帧数据
(2)在进入np.polyfit()函数进行二次曲线拟合时,先判断参数是否为空
(3)在画面上打印FPS本车距离坐车道线的距离

问题记录

#!/usr/bin/env python
# coding=utf-8
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from moviepy.editor import VideoFileClip
import glob
import time
import math
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError

#################################################################
# Step 3 : Warp image based on src_points and dst_points(透视变换)
#################################################################
# The type of src_points & dst_points should be like
# np.float32([ [0,0], [100,200], [200, 300], [300,400]])
def warpImage(image, src_points, dst_points):
    image_size = (image.shape[1], image.shape[0])
    M = cv2.getPerspectiveTransform(src_points, dst_points)#src_points到dst_points的投影矩阵
    Minv = cv2.getPerspectiveTransform(dst_points, src_points)#逆投影矩阵
    #flags=cv2.INTER_LINEAR对远离摄像头的部分进行线性插值填充像素点
    warped_image = cv2.warpPerspective(image, M,image_size, flags=cv2.INTER_LINEAR)
    return warped_image, M, Minv
#################################################################
# Step 4 : 提取车道线
#################################################################
def hlsLSelect(img, thresh=(220, 255)):  #HLS通道图,对L(亮度)处理,提取白色车道线
    hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)  #print(hls.shape),(1080,1920,3)
    l_channel = hls[:,:,1]  #print(l_channel.shape),(1080,1920)
    #np.max(l_channel),求矩阵中最大的元素,125.#数据的缩放
    l_channel = 255*(l_channel/np.max(l_channel))  #np.max(l_channel)=255.0
    binary_output = np.zeros_like(l_channel)  #创建一个空矩阵,黑图片
    binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 1  #在阈值范围内的点亮
    ###cv2.imshow("whitelane",binary_output)  
    ###cv2.waitKey(0) 
    return binary_output
def labBSelect(img, thresh=(195, 255)):#转为Lab通道的图,随后对b通道进行分割处理,提取图像中黄色的车道线
    # 1) Convert to LAB color space
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)
    lab_b = lab[:,:,2]
    # don't normalize if there are no yellows in the image
    if np.max(lab_b) > 100:#162
        lab_b = 255*(lab_b/np.max(lab_b))#print(np.max(lab_b)),255.0
    # 2) Apply a threshold to the L channel
    #print(lab_b)
    binary_output = np.zeros_like(lab_b)
    binary_output[((lab_b > thresh[0]) & (lab_b <= thresh[1]))] = 1
    ###cv2.imshow("yellolane",binary_output)
    ###cv2.waitKey(0)
    # 3) Return a binary image of threshold result
    return binary_output
#################################################################
# Step 5 : Detect lane lines through moving window(检测车道线)
#################################################################
def find_lane_pixels(binary_warped, nwindows, margin, minpix):
    # Create an output image to draw on and visualize the result,可视化
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))
    # Take a histogram of the bottom half of the image,计算图像下半部分每列上白色像素点之和,shape[0]=1080,//是整除,图像的左上角是坐标原点
    histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0]//2)##midpoint = 960
    leftx_base = np.argmax(histogram[:midpoint])#左车道线起始点,0到960范围内,像素之和最大的横坐标
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint#右车道线起始点
    
    # Set height of windows - based on nwindows above and image shape
    window_height = np.int(binary_warped.shape[0]//nwindows)
    # Identify the x and y positions of all nonzero pixels in the image返回非0元素的索引值
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    # Current positions to be updated later for each window in nwindows
    leftx_current = leftx_base
    rightx_current = rightx_base

    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []

    # Step through the windows one by one
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = binary_warped.shape[0
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值