OpenCV 360度行车辅助系统——红绿灯智能预判

  1. AI学习红绿灯

此功能仅限前置摄像头。当视频刚开始播放时,通过自研AI算法学习红绿像素高频出现的位置,学习结束后根据学习结果判断是否使用AI红绿灯 区域,并在接下来的视频部分针对区域进行红绿灯识别。

检测到红灯则在视频底端显示“Red”字样,检测到绿灯则在视频底端显示“Green”字样。

 

 

1、收集前5%帧的红绿色素点

2、按照坐标大小排序,删掉前后10%,删掉异常大、小的值

3、筛选后判断结果数量是否充足,充足则开始使用,不足则用系统默认的区域

4、在应用的过程中持续监测红绿像素,如果出现大幅度像素密集点偏移,则重新学习

Mat player::RG_detector(Mat frame)
{
    Mat detected_frame = frame.clone();

    Mat roi(detected_frame, Rect(400,200,400,100));

    Rect detected_rect(400,200,400,100);
    //调整亮度
    frame.convertTo(img, img.type(), a, b);

    //转换为YCrCb颜色空间
    cvtColor(img, imgYCrCb, CV_BGR2YCrCb);

    imgRed.create(imgYCrCb.rows, imgYCrCb.cols, CV_8UC1);
    imgGreen.create(imgYCrCb.rows, imgYCrCb.cols, CV_8UC1);

    //分解YCrCb的三个成分
    vector<Mat> planes;
    split(imgYCrCb, planes);
    // 遍历以根据Cr分量拆分红色和绿色
    MatIterator_<uchar> it_Cr = planes[1].begin<uchar>(),
                        it_Cr_end = planes[1].end<uchar>();
    MatIterator_<uchar> it_Red = imgRed.begin<uchar>();
    MatIterator_<uchar> it_Green = imgGreen.begin<uchar>();

    for (; it_Cr != it_Cr_end; ++it_Cr, ++it_Red, ++it_Green)
    {
        // RED, 145<Cr<470
        if (*it_Cr > 145 && *it_Cr < 470)
            *it_Red = 255;
        else
            *it_Red = 0;

        // GREEN 95<Cr<110
        if (*it_Cr > 95 && *it_Cr < 110)
            *it_Green = 255;
        else
            *it_Green = 0;
    }

    redCount = processImgR(imgRed);
    greenCount = processImgG(imgGreen);
    cout << "red:" << redCount << ";  " << "green:" << greenCount << endl;

    if(currentFrameNumber < 50)
    {
        putText(frame, "AI learning", hintTextPoint, font_face, font_scale, Scalar(135, 74, 32), thickness, 8, 0);
    }
    else
    {
        if(redCount > greenCount )
        {
            putText(frame, "Red", hintTextPoint, font_face, font_scale, Scalar(41, 41, 239), thickness, 8, 0);
        }
        else if(redCount < greenCount )
        {
            putText(frame, "Green", hintTextPoint, font_face, font_scale, Scalar(22, 210, 115), thickness, 8, 0);
        }
    }
    return frame;
}


bool player::RGB_AI_learn_x_y(int x, int y)
{
    if(currentFrameNumber <= totalFrameNumber / 20)
    {
        if(y <= frame.cols/2)
        {
            RGB_learn_array_x.push_back(x);
            RGB_learn_array_y.push_back(y);
        }
    }

    else
    {
        sort(RGB_learn_array_x.begin(), RGB_learn_array_x.end());
        sort(RGB_learn_array_y.begin(), RGB_learn_array_y.end());

        if(int subArrayNum = RGB_learn_array_x.size() - RGB_learn_array_y.size() != 0)
        {
            //删除多余元素
            if(subArrayNum > 0)
            {
                for(int i=0; i<subArrayNum; i++)
                {
                    RGB_learn_array_x.pop_back();
                }
            }
            else if(subArrayNum < 0)
            {
                for(int i=0; i>subArrayNum; i--)
                {
                    RGB_learn_array_y.pop_back();
                }
            }
        }
        int delete_count = 0, delete_total = RGB_learn_array_x.size()/10;
        for(delete_count = 0; delete_count < delete_total; delete_count++)
        {
            //删除 10% 较大值
            RGB_learn_array_x.pop_back();
            RGB_learn_array_y.pop_back();
        }

        sort(RGB_learn_array_x.rbegin(), RGB_learn_array_x.rend());
        sort(RGB_learn_array_y.rbegin(), RGB_learn_array_y.rend());
        for(delete_count = 0; delete_count < delete_total; delete_count++)
        {
            //删除 10% 较小值
            RGB_learn_array_x.pop_back();
            RGB_learn_array_y.pop_back();
        }

        sort(RGB_learn_array_x.begin(), RGB_learn_array_x.end());
        sort(RGB_learn_array_y.begin(), RGB_learn_array_y.end());

        long long int sum_x, sum_y;
        sum_x = accumulate(begin(RGB_learn_array_x),end(RGB_learn_array_x), 0);
        sum_y = accumulate(begin(RGB_learn_array_y),end(RGB_learn_array_y), 0);

        //计算均值
        RGB_learn_x = (int)sum_x / RGB_learn_array_x.size();
        RGB_learn_y = (int)sum_y / RGB_learn_array_y.size();
        //计算范围
        RGB_learn_w = RGB_learn_array_x.end() - RGB_learn_array_x.begin();
        RGB_learn_h = RGB_learn_array_y.end() - RGB_learn_array_y.begin();

        if(RGB_learn_array_x.size()>50)
            return true;//如果学习到大量红绿灯,认为学习有效
        else return false;
    }
}

int player::processImgR(Mat src)
{
    Mat tmp;

    vector<vector<Point>> contours;
    vector<Vec4i> hierarchy;
    vector<Point> hull;

    CvPoint2D32f tempNode;
    CvMemStorage* storage = cvCreateMemStorage();
    CvSeq* pointSeq = cvCreateSeq(CV_32FC2, sizeof(CvSeq), sizeof(CvPoint2D32f), storage);

    Rect* trackBox;
    Rect* result;
    int resultNum = 0;

    int area = 0;

    src.copyTo(tmp);
    //提取轮廓
    findContours(tmp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

    if (contours.size() > 0)
    {
        trackBox = new Rect[contours.size()];
        result = new Rect[contours.size()];

        //确定要跟踪的区域
        for (int i = 0; i < contours.size(); i++)
        {
            cvClearSeq(pointSeq);
            // 获取凸包的点集
            convexHull(Mat(contours[i]), hull, true);
            int hullcount = (int)hull.size();
            // 凸包的保存点
            for (int j = 0; j < hullcount - 1; j++)
            {
                //if(HoughCircle_detector(src) > 1)
                {
                    bool final_learn_result;
                    if(currentFrameNumber <= totalFrameNumber / 20)
                    {

                        //前面 5% 帧用来学习红绿灯位置
                        final_learn_result = RGB_AI_learn_x_y(hull[j].x, hull[j].y);
                    }
                    else
                    {
                        if(final_learn_result == true)
                        {
                            //学习有效,使用AI识别红绿灯
                            if(hull[j].x >= RGB_learn_x-RGB_learn_w/2 && hull[j].x <= RGB_learn_x+RGB_learn_w/2 &&
                               hull[j].y >= RGB_learn_y-RGB_learn_h/2 && hull[j].y <= RGB_learn_y+RGB_learn_h/2)
                            {
                                //cout<<"AI success!"<<endl;
                                tempNode.x = hull[j].x;
                                tempNode.y = hull[j].y;
                                cvSeqPush(pointSeq, &tempNode);
                            }
                        }
                        else
                        {
                            if(hull[j].x >= 400 && hull[j].x <= 800 &&
                               hull[j].y >= 400 && hull[j].y <= 550)
                            {
                                tempNode.x = hull[j].x;
                                tempNode.y = hull[j].y;
                                cvSeqPush(pointSeq, &tempNode);
                            }
                        }
                    }

                }
            }

            trackBox[i] = cvBoundingRect(pointSeq);
        }

        if (isFirstDetectedR)
        {
            lastTrackBoxR = new Rect[contours.size()];
            for (int i = 0; i < contours.size(); i++)
                lastTrackBoxR[i] = trackBox[i];
            lastTrackNumR = contours.size();
            isFirstDetectedR = false;
        }
        else
        {
            for (int i = 0; i < contours.size(); i++)
            {
                for (int j = 0; j < lastTrackNumR; j++)
                {
                    if (isIntersected(trackBox[i], lastTrackBoxR[j]))
                    {
                        result[resultNum] = trackBox[i];
                        break;
                    }
                }
                resultNum++;
            }
            delete[] lastTrackBoxR;
            lastTrackBoxR = new Rect[contours.size()];
            for (int i = 0; i < contours.size(); i++)
            {
                lastTrackBoxR[i] = trackBox[i];
            }
            lastTrackNumR = contours.size();
        }

        delete[] trackBox;
    }
    else
    {
        isFirstDetectedR = true;
        result = NULL;
    }
    cvReleaseMemStorage(&storage);

    if (result != NULL)
    {
        for (int i = 0; i < resultNum; i++)
        {
            area += result[i].area();
        }
    }
    delete[] result;

    return area;
}

int player::processImgG(Mat src)
{
    Mat tmp;

    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    vector< Point > hull;

    CvPoint2D32f tempNode;
    CvMemStorage* storage = cvCreateMemStorage();
    CvSeq* pointSeq = cvCreateSeq(CV_32FC2, sizeof(CvSeq), sizeof(CvPoint2D32f), storage);

    Rect* trackBox;
    Rect* result;
    int resultNum = 0;

    int area = 0;

    src.copyTo(tmp);
    // Extract the contour
    findContours(tmp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);

    if (contours.size() > 0)
    {
        trackBox = new Rect[contours.size()];
        result = new Rect[contours.size()];

        // Determine the area to track
        for (int i = 0; i < contours.size(); i++)
        {
            cvClearSeq(pointSeq);
            // Get the point set of the convex hull
            convexHull(Mat(contours[i]), hull, true);
            int hullcount = (int)hull.size();
            // Save points of the convex hull
            for (int j = 0; j < hullcount - 1; j++)
            {
                //if(HoughCircle_detector(src) > 1)
                {
                    bool final_learn_result;
                    if(currentFrameNumber <= totalFrameNumber / 20)
                    {
                        //前面 5% 帧用来学习红绿灯位置
                        final_learn_result = RGB_AI_learn_x_y(hull[j].x, hull[j].y);
                    }
                    else
                    {
                        if(final_learn_result == true)
                        {
                            //学习有效,使用AI识别红绿灯
                            if(hull[j].x >= RGB_learn_x-RGB_learn_w/2 && hull[j].x <= RGB_learn_x+RGB_learn_w/2 &&
                               hull[j].y >= RGB_learn_y-RGB_learn_h/2 && hull[j].y <= RGB_learn_y+RGB_learn_h/2)
                            {
                                //cout<<"AI success!"<<endl;
                                tempNode.x = hull[j].x;
                                tempNode.y = hull[j].y;
                                cvSeqPush(pointSeq, &tempNode);
                            }
                        }
                        else
                        {
                            if(hull[j].x >= 400 && hull[j].x <= 800 &&
                               hull[j].y >= 400 && hull[j].y <= 550)
                            {
                                tempNode.x = hull[j].x;
                                tempNode.y = hull[j].y;
                                cvSeqPush(pointSeq, &tempNode);
                            }
                        }
                    }

                }

            }

            trackBox[i] = cvBoundingRect(pointSeq);
        }

        if (isFirstDetectedG)
        {
            lastTrackBoxG = new Rect[contours.size()];
            for (int i = 0; i < contours.size(); i++)
                lastTrackBoxG[i] = trackBox[i];
            lastTrackNumG = contours.size();
            isFirstDetectedG = false;
        }
        else
        {
            for (int i = 0; i < contours.size(); i++)
            {
                for (int j = 0; j < lastTrackNumG; j++)
                {
                    if (isIntersected(trackBox[i], lastTrackBoxG[j]))
                    {
                        result[resultNum] = trackBox[i];
                        break;
                    }
                }
                resultNum++;
            }
            delete[] lastTrackBoxG;
            lastTrackBoxG = new Rect[contours.size()];
            for (int i = 0; i < contours.size(); i++)
            {
                lastTrackBoxG[i] = trackBox[i];
            }
            lastTrackNumG = contours.size();
        }

        delete[] trackBox;
    }
    else
    {
        isFirstDetectedG = true;
        result = NULL;
    }
    cvReleaseMemStorage(&storage);

    if (result != NULL)
    {
        for (int i = 0; i < resultNum; i++)
        {
            area += result[i].area();
        }
    }
    delete[] result;

    return area;
}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值