Hyper_端对端LPR_DNN_源码解析

整体结构

代码量较少,整体结构较为简单,主要还是图片导入–预处理–车牌定位–车牌矫正–通过CV::DNN:SETINPUT和DNN:FORWARD()输出字符分割结果,最后逐一匹配。所以本工程最重要的还是DNN模型的训练,以及样本库的完整性。

CNNRecognizer.cpp:加载模型预测标签;
FastDeskew.cpp:快速旋转歪斜的车牌;
FineMapping.cpp:绘制车牌检测框;
Pipeline.cpp:这是串联程序,串联起了plateDetection、fineMapping、plateSegmentation、generalRecognizer、segmentationFreeRecognizer;
PlateDetection.cpp:探测车牌在图中何处位置;
PlateSegmentation.cpp:将每一个字符从车牌图像中分离;
Recognizer.cpp:挨个识别字符并返回识别结果;
SegmentationFreeRecognizer.cpp:检测单个图像并把结果保存在mapping_table;

首先是主函数main

#include "../include/Pipeline.h"
using namespace pr;

	void TEST_PIPELINE(){

    pr::PipelinePR prc("../lpr/model/cascade.xml",
                      "../lpr/model/HorizonalFinemapping.prototxt","../lpr/model/HorizonalFinemapping.caffemodel",
                      "../lpr/model/Segmentation.prototxt","../lpr/model/Segmentation.caffemodel",
                      "../lpr/model/CharacterRecognization.prototxt","../lpr/model/CharacterRecognization.caffemodel"
                    );
	/*导入本地图片,保存为Mat类型*/
    cv::Mat image = cv::imread("../6.jpg");
	/*窗口显示图片*/
    cv::imshow("image",image);
    cv::waitKey(0);
	/*申明一个存储该图片车牌信息检测结果的列表,读取结果*/
    std::vector<pr::PlateInfo> res = prc.RunPiplineAsImage(image);
    float conf = 0 ;
	/*开始对每一个结果进行遍历*/
    for(auto st:res) 
	{	/*打印出高于0.75的识别结果*/
        if(st.confidence > 0.75) 
		{
            std::cout << st.getPlateName() << " " << st.confidence << std::endl;
            conf += st.confidence;
        }
    }
    std::cout<<conf<<std::endl;
	system("pause");
	}

int main()
{
   TEST_PIPELINE();
   return 0 ;
}

其次是关于车牌识别的主要函数和变量 Pipeline.h,函数实现皆在类内定义,未独立CPP。

#ifndef SWIFTPR_PIPLINE_H
#define SWIFTPR_PIPLINE_H

#include "PlateDetection.h"
#include "PlateSegmentation.h"
#include "CNNRecognizer.h"
#include "PlateInfo.h"
#include "FastDeskew.h"
#include "FineMapping.h"
#include "Recognizer.h"

namespace pr{
    class PipelinePR{
	    public:
            GeneralRecognizer *generalRecognizer;
            PlateDetection *plateDetection;
            PlateSegmentation *plateSegmentation;
            FineMapping *fineMapping;
			/*构造函数,输入训练模型*/
			PipelinePR(std::string detector_filename,
				std::string finemapping_prototxt, std::string finemapping_caffemodel,
				std::string segmentation_prototxt, std::string segmentation_caffemodel,
				std::string charRecognization_proto, std::string charRecognization_caffemodel
			) 
			{
				/*//创建车牌位置检测模型实例指针;*/
				plateDetection = new PlateDetection(detector_filename);
				/*创建抠出车牌图像模型实例指针;*/
				fineMapping = new FineMapping(finemapping_prototxt, finemapping_caffemodel);
				/*创建车牌分割模型实例指针;*/
				plateSegmentation = new PlateSegmentation(segmentation_prototxt, segmentation_caffemodel);
				/*创建识别模型实例指针;*/
				generalRecognizer = new CNNRecognizer(charRecognization_proto, charRecognization_caffemodel); 
			}
			/*析构函数,删除读取模型的动态内存区*/
			~PipelinePR() 
			{
				delete plateDetection;
				delete fineMapping;
				delete plateSegmentation;
				delete generalRecognizer;
			}
			/*车牌识别过程中可能用到了汉字数字字母字符*/
			std::vector<std::string> chars_code{ "京","沪","津","渝","冀","晋","蒙","辽","吉","黑","苏","浙","皖","闽","赣","鲁","豫","鄂","湘","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z" };
			
			/*保存识别结果的向量*/
			std::vector<std::string> plateRes;

			/*对单图片中所有的车牌及进行识别,识别结果存在返回值向量中*/
			std::vector<PlateInfo> RunPiplineAsImage(cv::Mat plateImage) 
			{
				/*声明存放识别结果的向量列表;*/
				std::vector<PlateInfo> results;
				/*保存车牌中间信息---车牌定位位置*/
				std::vector<pr::PlateInfo> plates;
				/*执行车牌粗略探测位置(结果存在plates内)*/
				plateDetection->plateDetectionRough(plateImage, plates);
				//plateDetectionRough(cv::Mat InputImage,std::vector<pr::PlateInfo>  &plateInfos,int min_w=36,int max_w=800)其中两个参数为默认的定值,不用输入


				/*迭代图中每个车牌的中间信息(主要是车牌区域的图片Mat和坐标)*/
				for (pr::PlateInfo plateinfo : plates) 
				{
					/*获取该车牌区域的图像(image_finemapping的finemapping是为了分割出尽量只包含单个车牌的图像)*/
					cv::Mat image_finemapping = plateinfo.getPlateImage();
					/*对图像垂直处理*/
					image_finemapping = fineMapping->FineMappingVertical(image_finemapping);
					/*校正角度*/
					image_finemapping = pr::fastdeskew(image_finemapping, 5);

					/*对图像水平处理*/
					image_finemapping = fineMapping->FineMappingHorizon(image_finemapping, 2, 5);
					/*大小调整*/
					cv::resize(image_finemapping, image_finemapping, cv::Size(136, 36));
					/*录入plateinfo*/
					plateinfo.setPlateImage(image_finemapping);
					/*定义矩形框列表---https://blog.csdn.net/qq_30214939/article/details/65648273*/
					std::vector<cv::Rect> rects;
					/*对车牌图像的字符分割,结果存在rects*/
					plateSegmentation->segmentPlatePipline(plateinfo, 1, rects);
					/*将每个rect中的字符子图又存到plateinfo中去*/
					plateSegmentation->ExtractRegions(plateinfo, rects);
					/*复制图像并且制作边界;处理边界卷积(将image_finemapping的黑色边界填充)*/
					cv::copyMakeBorder(image_finemapping, image_finemapping, 0, 0, 0, 20, cv::BORDER_REPLICATE);
					/*录入车牌图像到plateinfo*/
					plateinfo.setPlateImage(image_finemapping);

					/*对存放车牌信息的向量进行逐一的识别*/
					/*识别(分割)结果放在plateCoding中*/
					generalRecognizer->SegmentBasedSequenceRecognition(plateinfo);

					/*解码车牌中所有字符*/
					plateinfo.decodePlateNormal(chars_code);

					/*最后识别结果加入列表*/
					results.push_back(plateinfo);
					/*打印出该结果*/
					std::cout << plateinfo.getPlateName() << std::endl;
				}

				//        for (auto str:results) {
				//            std::cout << str << std::endl;
				//        }
				return results;
			}




    };


}
#endif //SWIFTPR_PIPLINE_H

其次是车牌定位的实现接口PlateDetection.cpp,主要是定位出图片中的含有车牌部分“矩形框”和图片。

#include "../include/PlateDetection.h"

#include "util.h"

namespace pr{


    PlateDetection::PlateDetection(std::string filename_cascade)
	{
        cascade.load(filename_cascade);
    };

	/*寻找图中所有的车牌位置信息,保存在向量中*/
    void PlateDetection::plateDetectionRough(cv::Mat InputImage,std::vector<pr::PlateInfo>  &plateInfos,int min_w,int max_w)
	{
		/*定义一个将要处理图片的Mat格式变量*/
        cv::Mat processImage;
		/*将RGB图片在颜色域转换到GRAY灰度图*/
        cv::cvtColor(InputImage,processImage,cv::COLOR_BGR2GRAY);
		/*申请存放车牌位置信息的向量*/
        std::vector<cv::Rect> platesRegions;
//        std::vector<PlateInfo> plates;
		/*间接访问图片像素点中的Size(x,y)灰度值*/
        cv::Size minSize(min_w,min_w/4);
        cv::Size maxSize(max_w,max_w/4);
//        cv::imshow("input",InputImage);
//                cv::waitKey(0);

/*
参数1:image–待检测图片,一般为灰度图像以加快检测速度;
参数2:objects–被检测物体的矩形框向量组;为输出量,如某特征检测矩阵Mat
参数3:scaleFactor–表示在前后两次相继的扫描中,搜索窗口的比例系数。默认为1.1即每次搜索窗口依次扩大10%
参数4:minNeighbors–表示构成检测目标的相邻矩形的最小个数(默认为3个)。 如果组成检测目标的小矩形的个数和小于 min_neighbors - 1 都会被排除。 如果min_neighbors 为 0, 则函数不做任何操作就返回所有的被检候选矩形框, 这种设定值一般用在用户自定义对检测结果的组合程序上;
参数5:flags–要么使用默认值,要么使用CV_HAAR_DO_CANNY_PRUNING,如果设置为CV_HAAR_DO_CANNY_PRUNING,那么函数将会使用Canny边缘检测来排除边缘过多或过少的区域,因此这些区域通常不会是某特征所在区域;
参数6、7:minSize和maxSize用来限制得到的目标区域的范围。也就是我本次训练得到实际项目尺寸大小 
函数介绍: detectMultiscale函数为多尺度多目标检测: 
多尺度:通常搜索目标的模板尺寸大小是固定的,但是不同图片大小不同,所以目标对象的大小也是不定的,所以多尺度即不断缩放图片大小(缩放到与模板匹配),通过模板滑动窗函数搜索匹配;同一副图片可能在不同尺度下都得到匹配值,所以多尺度检测函数detectMultiscale是多尺度合并的结果。 
多目标:通过检测符合模板匹配对象,可得到多个目标,均输出到objects参数2向量里面。
*/
        cascade.detectMultiScale( processImage, platesRegions,1.1, 3, cv::CASCADE_SCALE_IMAGE,minSize,maxSize);

		/*对每个候选的车牌区域便利一遍*/
        for(auto plate:platesRegions)
        {
            // extend rects
//            x -= w * 0.14
//            w += w * 0.28
//            y -= h * 0.6
//            h += h * 1.1;
			/*隐式安全类型转换来将每个车牌的边缘*/
            int zeroadd_w  = static_cast<int>(plate.width*0.28);
            int zeroadd_h = static_cast<int>(plate.height*1.2);
            int zeroadd_x = static_cast<int>(plate.width*0.14);
            int zeroadd_y = static_cast<int>(plate.height*0.6);

            plate.x-=zeroadd_x;
            plate.y-=zeroadd_y;
            plate.height += zeroadd_h;
            plate.width += zeroadd_w;

			/*
			图片裁剪
			cropImage: crop images and save resize_width * resize_height area which contains labels
			*/
            cv::Mat plateImage = util::cropFromImage(InputImage,plate);
			/*将当前拍照位置信息和图像保存起来*/
            PlateInfo plateInfo(plateImage,plate);
			/*将车牌信息压入输出的向量中*/
            plateInfos.push_back(plateInfo);
        }
    }
//    std::vector<pr::PlateInfo> PlateDetection::plateDetectionRough(cv::Mat InputImage,cv::Rect roi,int min_w,int max_w){
//        cv::Mat roi_region = util::cropFromImage(InputImage,roi);
//        return plateDetectionRough(roi_region,min_w,max_w);
//    }




}//namespace pr

然后是定位出的车牌原始照片的预处理接口,还有字符识别输出接口FineMapping.cpp,这里我没有过多注释

#include "FineMapping.h"
namespace pr{

    const int FINEMAPPING_H = 50;
    const int FINEMAPPING_W = 120;
    const int PADDING_UP_DOWN = 30;
    void drawRect(cv::Mat image,cv::Rect rect)
    {
        cv::Point p1(rect.x,rect.y);
        cv::Point p2(rect.x+rect.width,rect.y+rect.height);
        cv::rectangle(image,p1,p2,cv::Scalar(0,255,0),1);
    }


    FineMapping::FineMapping(std::string prototxt,std::string caffemodel) {
         net = cv::dnn::readNetFromCaffe(prototxt, caffemodel);

    }

    cv::Mat FineMapping::FineMappingHorizon(cv::Mat FinedVertical,int leftPadding,int rightPadding)
    {

//        if(FinedVertical.channels()==1)
//            cv::cvtColor(FinedVertical,FinedVertical,cv::COLOR_GRAY2BGR);
        cv::Mat inputBlob = cv::dnn::blobFromImage(FinedVertical, 1/255.0, cv::Size(66,16),
                                      cv::Scalar(0,0,0),false);

        net.setInput(inputBlob,"data");
        cv::Mat prob = net.forward();
        int front = static_cast<int>(prob.at<float>(0,0)*FinedVertical.cols);
        int back = static_cast<int>(prob.at<float>(0,1)*FinedVertical.cols);
        front -= leftPadding ;
        if(front<0) front = 0;
        back +=rightPadding;
        if(back>FinedVertical.cols-1) back=FinedVertical.cols - 1;
        cv::Mat cropped  = FinedVertical.colRange(front,back).clone();
        return  cropped;


    }
    std::pair<int,int> FitLineRansac(std::vector<cv::Point> pts,int zeroadd = 0 )
    {
        std::pair<int,int> res;
        if(pts.size()>2)
        {
            cv::Vec4f line;
            cv::fitLine(pts,line,cv::DIST_HUBER,0,0.01,0.01);
            float vx = line[0];
            float vy = line[1];
            float x = line[2];
            float y = line[3];
            int lefty = static_cast<int>((-x * vy / vx) + y);
            int righty = static_cast<int>(((136- x) * vy / vx) + y);
            res.first = lefty+PADDING_UP_DOWN+zeroadd;
            res.second = righty+PADDING_UP_DOWN+zeroadd;
            return res;
        }
        res.first = zeroadd;
        res.second = zeroadd;
        return res;
    }

    cv::Mat FineMapping::FineMappingVertical(cv::Mat InputProposal,int sliceNum,int upper,int lower,int windows_size)
	{
        cv::Mat PreInputProposal;
        cv::Mat proposal;

        cv::resize(InputProposal,PreInputProposal,cv::Size(FINEMAPPING_W,FINEMAPPING_H));
        if(InputProposal.channels() == 3)
            cv::cvtColor(PreInputProposal,proposal,cv::COLOR_BGR2GRAY);
        else
            PreInputProposal.copyTo(proposal);

//            proposal = PreInputProposal;

        // this will improve some sen
        cv::Mat kernal = cv::getStructuringElement(cv::MORPH_ELLIPSE,cv::Size(1,3));
//        cv::erode(proposal,proposal,kernal);


        float diff = static_cast<float>(upper-lower);
        diff/=static_cast<float>(sliceNum-1);
        cv::Mat binary_adaptive;
        std::vector<cv::Point> line_upper;
        std::vector<cv::Point> line_lower;
        int contours_nums=0;

        for(int i = 0 ; i < sliceNum ; i++)
        {
            std::vector<std::vector<cv::Point> > contours;
            float k =lower + i*diff;
            cv::adaptiveThreshold(proposal,binary_adaptive,255,cv::ADAPTIVE_THRESH_MEAN_C,cv::THRESH_BINARY,windows_size,k);
            cv::Mat draw;
            binary_adaptive.copyTo(draw);
            cv::findContours(binary_adaptive,contours,cv::RETR_EXTERNAL,cv::CHAIN_APPROX_SIMPLE);
            for(auto contour: contours)
            {
                cv::Rect bdbox =cv::boundingRect(contour);
                float lwRatio = bdbox.height/static_cast<float>(bdbox.width);
                int  bdboxAera = bdbox.width*bdbox.height;
                if ((   lwRatio>0.7&&bdbox.width*bdbox.height>100 && bdboxAera<300)
                    || (lwRatio>3.0 && bdboxAera<100 && bdboxAera>10))
                {

                    cv::Point p1(bdbox.x, bdbox.y);
                    cv::Point p2(bdbox.x + bdbox.width, bdbox.y + bdbox.height);
                    line_upper.push_back(p1);
                    line_lower.push_back(p2);
                    contours_nums+=1;
                }
            }
        }

        std:: cout<<"contours_nums "<<contours_nums<<std::endl;

        if(contours_nums<41)
        {
            cv::bitwise_not(InputProposal,InputProposal);
            cv::Mat kernal = cv::getStructuringElement(cv::MORPH_ELLIPSE,cv::Size(1,5));
            cv::Mat bak;
            cv::resize(InputProposal,bak,cv::Size(FINEMAPPING_W,FINEMAPPING_H));
            cv::erode(bak,bak,kernal);
            if(InputProposal.channels() == 3)
                cv::cvtColor(bak,proposal,cv::COLOR_BGR2GRAY);
            else
                proposal = bak;
            int contours_nums=0;

            for(int i = 0 ; i < sliceNum ; i++)
            {
                std::vector<std::vector<cv::Point> > contours;
                float k =lower + i*diff;
                cv::adaptiveThreshold(proposal,binary_adaptive,255,cv::ADAPTIVE_THRESH_MEAN_C,cv::THRESH_BINARY,windows_size,k);
//                cv::imshow("image",binary_adaptive);
//            cv::waitKey(0);
                cv::Mat draw;
                binary_adaptive.copyTo(draw);
                cv::findContours(binary_adaptive,contours,cv::RETR_EXTERNAL,cv::CHAIN_APPROX_SIMPLE);
                for(auto contour: contours)
                {
                    cv::Rect bdbox =cv::boundingRect(contour);
                    float lwRatio = bdbox.height/static_cast<float>(bdbox.width);
                    int  bdboxAera = bdbox.width*bdbox.height;
                    if ((   lwRatio>0.7&&bdbox.width*bdbox.height>120 && bdboxAera<300)
                        || (lwRatio>3.0 && bdboxAera<100 && bdboxAera>10))
                    {

                        cv::Point p1(bdbox.x, bdbox.y);
                        cv::Point p2(bdbox.x + bdbox.width, bdbox.y + bdbox.height);
                        line_upper.push_back(p1);
                        line_lower.push_back(p2);
                        contours_nums+=1;
                    }
                }
            }
//            std:: cout<<"contours_nums "<<contours_nums<<std::endl;
        }

            cv::Mat rgb;
            cv::copyMakeBorder(PreInputProposal, rgb, 30, 30, 0, 0, cv::BORDER_REPLICATE);
//        cv::imshow("rgb",rgb);
//        cv::waitKey(0);
//


            std::pair<int, int> A;
            std::pair<int, int> B;
            A = FitLineRansac(line_upper, -2);
            B = FitLineRansac(line_lower, 2);
            int leftyB = A.first;
            int rightyB = A.second;
            int leftyA = B.first;
            int rightyA = B.second;
            int cols = rgb.cols;
            int rows = rgb.rows;
//        pts_map1  = np.float32([[cols - 1, rightyA], [0, leftyA],[cols - 1, rightyB], [0, leftyB]])
//        pts_map2 = np.float32([[136,36],[0,36],[136,0],[0,0]])
//        mat = cv2.getPerspectiveTransform(pts_map1,pts_map2)
//        image = cv2.warpPerspective(rgb,mat,(136,36),flags=cv2.INTER_CUBIC)
            std::vector<cv::Point2f> corners(4);
            corners[0] = cv::Point2f(cols - 1, rightyA);
            corners[1] = cv::Point2f(0, leftyA);
            corners[2] = cv::Point2f(cols - 1, rightyB);
            corners[3] = cv::Point2f(0, leftyB);
            std::vector<cv::Point2f> corners_trans(4);
            corners_trans[0] = cv::Point2f(136, 36);
            corners_trans[1] = cv::Point2f(0, 36);
            corners_trans[2] = cv::Point2f(136, 0);
            corners_trans[3] = cv::Point2f(0, 0);
            cv::Mat transform = cv::getPerspectiveTransform(corners, corners_trans);
            cv::Mat quad = cv::Mat::zeros(36, 136, CV_8UC3);
            cv::warpPerspective(rgb, quad, transform, quad.size());
        return quad;

    }


}

还有变换中较为难得图像校正部分FastDeskew.cpp

#include "FastDeskew.h"

namespace pr{


    const int  ANGLE_MIN = 30 ;
    const int ANGLE_MAX = 150 ;
    const int PLATE_H = 36;
    const int PLATE_W = 136;

    int angle(float x,float y)
    {
        return atan2(x,y)*180/3.1415;
    }

    std::vector<float> avgfilter(std::vector<float> angle_list,int windowsSize) {
        std::vector<float> angle_list_filtered(angle_list.size() - windowsSize + 1);
        for (int i = 0; i < angle_list.size() - windowsSize + 1; i++) {
            float avg = 0.00f;
            for (int j = 0; j < windowsSize; j++) {
                avg += angle_list[i + j];
            }
            avg = avg / windowsSize;
            angle_list_filtered[i] = avg;
        }

        return angle_list_filtered;
    }


    void drawHist(std::vector<float> seq){
        cv::Mat image(300,seq.size(),CV_8U);
        image.setTo(0);

        for(int i = 0;i<seq.size();i++)
        {
            float l = *std::max_element(seq.begin(),seq.end());

            int p = int(float(seq[i])/l*300);

            cv::line(image,cv::Point(i,300),cv::Point(i,300-p),cv::Scalar(255,255,255));
        }
        cv::imshow("vis",image);
    }

    cv::Mat  correctPlateImage(cv::Mat skewPlate,float angle,float maxAngle)
    {

        cv::Mat dst;

        cv::Size size_o(skewPlate.cols,skewPlate.rows);


        int extend_padding = 0;
//        if(angle<0)
            extend_padding = static_cast<int>(skewPlate.rows*tan(cv::abs(angle)/180* 3.14) );
//        else
//            extend_padding = static_cast<int>(skewPlate.rows/tan(cv::abs(angle)/180* 3.14) );

//        std::cout<<"extend:"<<extend_padding<<std::endl;

        cv::Size size(skewPlate.cols + extend_padding ,skewPlate.rows);

        float interval = abs(sin((angle /180) * 3.14)* skewPlate.rows);
//        std::cout<<interval<<std::endl;

        cv::Point2f pts1[4] = {cv::Point2f(0,0),cv::Point2f(0,size_o.height),cv::Point2f(size_o.width,0),cv::Point2f(size_o.width,size_o.height)};
        if(angle>0) {
            cv::Point2f pts2[4] = {cv::Point2f(interval, 0), cv::Point2f(0, size_o.height),
                                   cv::Point2f(size_o.width, 0), cv::Point2f(size_o.width - interval, size_o.height)};
            cv::Mat M  = cv::getPerspectiveTransform(pts1,pts2);
            cv::warpPerspective(skewPlate,dst,M,size);


        }
        else {
            cv::Point2f pts2[4] = {cv::Point2f(0, 0), cv::Point2f(interval, size_o.height), cv::Point2f(size_o.width-interval, 0),
                                   cv::Point2f(size_o.width, size_o.height)};
            cv::Mat M  = cv::getPerspectiveTransform(pts1,pts2);
            cv::warpPerspective(skewPlate,dst,M,size,cv::INTER_CUBIC);

        }
        return  dst;
    }

	/*
	------------快速旋转车牌图像---------------
	@skewPlate : 需要校正的图像;
	@blockSize : 角点检测步长;
	*/
    cv::Mat fastdeskew(cv::Mat skewImage,int blockSize)
	{
		/*过滤的窗口大小*/
        const int FILTER_WINDOWS_SIZE = 5;
		/*声明一个angle_list存储角度*/
        std::vector<float> angle_list(180);
		/*为其分配内存*/
        memset(angle_list.data(),0,angle_list.size()*sizeof(int));
		/*用于备份原图像的图像bak*/
        cv::Mat bak;
		/*将原图赋值给bak*/
        skewImage.copyTo(bak);
		/*如果是rgb图像--转换成灰度图*/
        if(skewImage.channels() == 3)
            cv::cvtColor(skewImage,skewImage,cv::COLOR_RGB2GRAY);
		/*如果是灰度图*/
        if(skewImage.channels() == 1)
        {
			/*声明特征矩阵eige*/
            cv::Mat eigen;
			/*计算图像块的特征值和特征向量,用于角点检测,结果保存在eigen*/
            cv::cornerEigenValsAndVecs(skewImage,eigen,blockSize,5);
			/*遍历skewImage的每个像素*/
            for( int j = 0; j < skewImage.rows; j+=blockSize )
            { 
				for( int i = 0; i < skewImage.cols; i+=blockSize )
                {
					/*(x2,y2)存储的是skewImage存在eigen的角点信息*/
                    float x2 = eigen.at<cv::Vec6f>(j, i)[4];
                    float y2 = eigen.at<cv::Vec6f>(j, i)[5];
					/*计算角度*/
                    int angle_cell = angle(x2,y2);
					/*在对应角度上作累计计数*/
                    angle_list[(angle_cell + 180)%180]+=1.0;
                }
            }
        }
		/*计算平滑窗口大小为FILTER_WINDOWS_SIZE的平均角度过滤*/
        std::vector<float> filtered = avgfilter(angle_list,5);
		/*计算均角过滤的最大位置*/
        int maxPos = std::max_element(filtered.begin(),filtered.end()) - filtered.begin() + FILTER_WINDOWS_SIZE/2;
		/*超过了ANGLE_MAX---做角度变换*/
        if(maxPos>ANGLE_MAX)
            maxPos = (-maxPos+90+180)%180;
		/*没有超过---控制在90以内*/
        if(maxPos<ANGLE_MIN)
            maxPos-=90;
		/*再变换*/
        maxPos=90-maxPos;
		/*按maxPos校正图像*/
		cv::Mat deskewed = correctPlateImage(bak, static_cast<float>(maxPos),60.0f);
        return deskewed;
    }



}//namespace pr

以及专门存放车牌中间数据的接口PlateSegmentation.cpp

#include "../include/PlateSegmentation.h"
#include "../include/niBlackThreshold.h"


//#define DEBUG
namespace pr{

    PlateSegmentation::PlateSegmentation(std::string prototxt,std::string caffemodel) {
        net = cv::dnn::readNetFromCaffe(prototxt, caffemodel);
    }
    cv::Mat PlateSegmentation::classifyResponse(const cv::Mat &cropped){
        cv::Mat inputBlob = cv::dnn::blobFromImage(cropped, 1/255.0, cv::Size(22,22), cv::Scalar(0,0,0),false);
        net.setInput(inputBlob,"data");
        return net.forward();
    }

    void drawHist(float* seq,int size,const char* name){
        cv::Mat image(300,size,CV_8U);
        image.setTo(0);
        float* start =seq;
        float* end = seq+size;
        float l = *std::max_element(start,end);
        for(int i = 0;i<size;i++)
        {
            int p = int(float(seq[i])/l*300);
            cv::line(image,cv::Point(i,300),cv::Point(i,300-p),cv::Scalar(255,255,255));
        }
        cv::resize(image,image,cv::Size(600,100));
        cv::imshow(name,image);
    }

    inline void computeSafeMargin(int &val,const int &rows){
        val = std::min(val,rows);
        val = std::max(val,0);
    }

    cv::Rect boxFromCenter(const cv::Point center,int left,int right,int top,int bottom,cv::Size bdSize)
    {
        cv::Point p1(center.x -  left ,center.y - top);
        cv::Point p2( center.x + right, center.y + bottom);
        p1.x = std::max(0,p1.x);
        p1.y = std::max(0,p1.y);
        p2.x = std::min(p2.x,bdSize.width-1);
        p2.y = std::min(p2.y,bdSize.height-1);
        cv::Rect rect(p1,p2);
        return rect;
    }

    cv::Rect boxPadding(cv::Rect rect,int left,int right,int top,int bottom,cv::Size bdSize)
    {

        cv::Point center(rect.x+(rect.width>>1),rect.y + (rect.height>>1));
        int rebuildLeft  = (rect.width>>1 )+ left;
        int rebuildRight = (rect.width>>1 )+ right;
        int rebuildTop = (rect.height>>1 )+ top;
        int rebuildBottom = (rect.height>>1 )+ bottom;
        return boxFromCenter(center,rebuildLeft,rebuildRight,rebuildTop,rebuildBottom,bdSize);

    }



    void PlateSegmentation:: refineRegion(cv::Mat &plateImage,const std::vector<int> &candidatePts,const int padding,std::vector<cv::Rect> &rects){
        int w = candidatePts[5] - candidatePts[4];
        int cols = plateImage.cols;
        int rows = plateImage.rows;
        for(int i = 0 ; i < candidatePts.size()  ; i++)
        {
            int left = 0;
            int right = 0 ;

            if(i == 0 ){
                left= candidatePts[i];
                right = left+w+padding;
                }
            else {
                left = candidatePts[i] - padding;
                right = left + w + padding * 2;
            }

            computeSafeMargin(right,cols);
            computeSafeMargin(left,cols);
            cv::Rect roi(left,0,right - left,rows-1);
            cv::Mat roiImage;
            plateImage(roi).copyTo(roiImage);

            if (i>=1)
            {

                cv::Mat roi_thres;
//                cv::threshold(roiImage,roi_thres,0,255,cv::THRESH_OTSU|cv::THRESH_BINARY);

                niBlackThreshold(roiImage,roi_thres,255,cv::THRESH_BINARY,15,0.3,BINARIZATION_NIBLACK);

                std::vector<std::vector<cv::Point>> contours;
                cv::findContours(roi_thres,contours,cv::RETR_LIST,cv::CHAIN_APPROX_SIMPLE);
                cv::Point boxCenter(roiImage.cols>>1,roiImage.rows>>1);

                cv::Rect final_bdbox;
                cv::Point final_center;
                int final_dist = INT_MAX;


                for(auto contour:contours)
                {
                    cv::Rect bdbox = cv::boundingRect(contour);
                    cv::Point center(bdbox.x+(bdbox.width>>1),bdbox.y + (bdbox.height>>1));
                    int dist = (center.x - boxCenter.x)*(center.x - boxCenter.x);
                    if(dist<final_dist && bdbox.height > rows>>1)
                    {   final_dist =dist;
                        final_center = center;
                        final_bdbox = bdbox;
                    }
                }

                //rebuild box
                if(final_bdbox.height/ static_cast<float>(final_bdbox.width) > 3.5 && final_bdbox.width*final_bdbox.height<10)
                    final_bdbox = boxFromCenter(final_center,8,8,(rows>>1)-3 , (rows>>1) - 2,roiImage.size());
                else {
                    if(i == candidatePts.size()-1)
                        final_bdbox = boxPadding(final_bdbox, padding/2, padding, padding/2, padding/2, roiImage.size());
                    else
                        final_bdbox = boxPadding(final_bdbox, padding, padding, padding, padding, roiImage.size());


//                    std::cout<<final_bdbox<<std::endl;
//                    std::cout<<roiImage.size()<<std::endl;
#ifdef DEBUG
                    cv::imshow("char_thres",roi_thres);

                    cv::imshow("char",roiImage(final_bdbox));
                    cv::waitKey(0);
#endif


                }


                final_bdbox.x += left;

                rects.push_back(final_bdbox);
//

            }
            else
            {
                rects.push_back(roi);
            }

//            else
//            {
//
//            }

//            cv::GaussianBlur(roiImage,roiImage,cv::Size(7,7),3);
//
//            cv::imshow("image",roiImage);
//            cv::waitKey(0);


        }



    }
    void avgfilter(float *angle_list,int size,int windowsSize) {
        float *filterd = new float[size];
        for(int i = 0 ; i < size ; i++) filterd [i] = angle_list[i];
//        memcpy(filterd,angle_list,size);

        cv::Mat kernal_gaussian = cv::getGaussianKernel(windowsSize,3,CV_32F);
        float *kernal = (float*)kernal_gaussian.data;
//        kernal+=windowsSize;
        int r = windowsSize/2;




        for (int i = 0; i < size; i++) {
            float avg = 0.00f;
            for (int j = 0; j < windowsSize; j++) {
                if(i+j-r>0&&i+j+r<size-1)
                    avg += filterd[i + j-r]*kernal[j];
            }
//            avg = avg / windowsSize;
            angle_list[i] = avg;

        }

        delete filterd;
    }

    void PlateSegmentation::templateMatchFinding(const cv::Mat &respones,int windowsWidth,std::pair<float,std::vector<int>> &candidatePts){
        int rows = respones.rows;
        int cols = respones.cols;



        float *data = (float*)respones.data;
        float *engNum_prob = data;
        float *false_prob = data+cols;
        float *ch_prob = data+cols*2;

        avgfilter(engNum_prob,cols,5);
        avgfilter(false_prob,cols,5);
//        avgfilter(ch_prob,cols,5);
        std::vector<int> candidate_pts(7);
#ifdef DEBUG
        drawHist(engNum_prob,cols,"engNum_prob");
        drawHist(false_prob,cols,"false_prob");
        drawHist(ch_prob,cols,"ch_prob");
                cv::waitKey(0);
#endif




        int cp_list[7];
        float loss_selected = -1;

        for(int start = 0 ; start < 20 ; start+=2)
            for(int  width = windowsWidth-5; width < windowsWidth+5 ; width++ ){
                for(int interval = windowsWidth/2; interval < windowsWidth; interval++)
                {
                    int cp1_ch  = start;
                    int cp2_p0 = cp1_ch+ width;
                    int cp3_p1 = cp2_p0+ width + interval;
                    int cp4_p2 = cp3_p1 + width;
                    int cp5_p3 = cp4_p2 + width+1;
                    int cp6_p4 = cp5_p3 + width+2;
                    int cp7_p5= cp6_p4+ width+2;

                    int md1 = (cp1_ch+cp2_p0)>>1;
                    int md2 = (cp2_p0+cp3_p1)>>1;
                    int md3 = (cp3_p1+cp4_p2)>>1;
                    int md4 = (cp4_p2+cp5_p3)>>1;
                    int md5 = (cp5_p3+cp6_p4)>>1;
                    int md6 = (cp6_p4+cp7_p5)>>1;




                    if(cp7_p5>=cols)
                        continue;
                    float loss = ch_prob[cp1_ch]+
                       engNum_prob[cp2_p0] +engNum_prob[cp3_p1]+engNum_prob[cp4_p2]+engNum_prob[cp5_p3]+engNum_prob[cp6_p4] +engNum_prob[cp7_p5]
                    + (false_prob[md2]+false_prob[md3]+false_prob[md4]+false_prob[md5]+false_prob[md5] + false_prob[md6]);
//                    float loss = ch_prob[cp1_ch]*3 -(false_prob[cp3_p1]+false_prob[cp4_p2]+false_prob[cp5_p3]+false_prob[cp6_p4]+false_prob[cp7_p5]);




                    if(loss>loss_selected)
                    {
                        loss_selected = loss;
                        cp_list[0]= cp1_ch;
                        cp_list[1]= cp2_p0;
                        cp_list[2]= cp3_p1;
                        cp_list[3]= cp4_p2;
                        cp_list[4]= cp5_p3;
                        cp_list[5]= cp6_p4;
                        cp_list[6]= cp7_p5;
                    }
                }
            }
        candidate_pts[0] = cp_list[0];
        candidate_pts[1] = cp_list[1];
        candidate_pts[2] = cp_list[2];
        candidate_pts[3] = cp_list[3];
        candidate_pts[4] = cp_list[4];
        candidate_pts[5] = cp_list[5];
        candidate_pts[6] = cp_list[6];

        candidatePts.first = loss_selected;
        candidatePts.second = candidate_pts;

    };


    void PlateSegmentation::segmentPlateBySlidingWindows(cv::Mat &plateImage,int windowsWidth,int stride,cv::Mat &respones){


        cv::resize(plateImage,plateImage,cv::Size(136,36));

        cv::Mat plateImageGray;
        cv::cvtColor(plateImage,plateImageGray,cv::COLOR_BGR2GRAY);

        int height = plateImage.rows - 1;
        int width = plateImage.cols - 1;

        for(int i = 0 ; i < plateImage.cols - windowsWidth +1 ; i +=stride)
        {
            cv::Rect roi(i,0,windowsWidth,height);
            cv::Mat roiImage = plateImageGray(roi);
            cv::Mat response = classifyResponse(roiImage);
            respones.push_back(response);
        }




        respones =  respones.t();
//        std::pair<float,std::vector<int>> images ;
//
//
//        std::cout<<images.first<<" ";
//        for(int i = 0 ; i < images.second.size() ; i++)
//        {
//            std::cout<<images.second[i]<<" ";
            cv::line(plateImageGray,cv::Point(images.second[i],0),cv::Point(images.second[i],36),cv::Scalar(255,255,255),1); //DEBUG
//        }

//        int w = images.second[5] - images.second[4];

//        cv::line(plateImageGray,cv::Point(images.second[5]+w,0),cv::Point(images.second[5]+w,36),cv::Scalar(255,255,255),1); //DEBUG
//        cv::line(plateImageGray,cv::Point(images.second[5]+2*w,0),cv::Point(images.second[5]+2*w,36),cv::Scalar(255,255,255),1); //DEBUG


//        RefineRegion(plateImageGray,images.second,5);

//        std::cout<<w<<std::endl;

//        std::cout<<<<std::endl;

//        cv::resize(plateImageGray,plateImageGray,cv::Size(600,100));



    }

//    void filterGaussian(cv::Mat &respones,float sigma){
//
//    }


    void PlateSegmentation::segmentPlatePipline(PlateInfo &plateInfo,int stride,std::vector<cv::Rect> &Char_rects){
        cv::Mat plateImage = plateInfo.getPlateImage(); // get src image .
        cv::Mat plateImageGray;
        cv::cvtColor(plateImage,plateImageGray,cv::COLOR_BGR2GRAY);
        //do binarzation
        //
        std::pair<float,std::vector<int>> sections ; // segment points variables .

        cv::Mat respones; //three response of every sub region from origin image .
        segmentPlateBySlidingWindows(plateImage,DEFAULT_WIDTH,1,respones);
        templateMatchFinding(respones,DEFAULT_WIDTH/stride,sections);

//        std::cout<<sections<<std::endl;

        refineRegion(plateImageGray,sections.second,5,Char_rects);
#ifdef DEBUG
        for(int i = 0 ; i < sections.second.size() ; i++)
        {
            std::cout<<sections.second[i]<<" ";
            cv::line(plateImageGray,cv::Point(sections.second[i],0),cv::Point(sections.second[i],36),cv::Scalar(255,255,255),1); //DEBUG
        }
        cv::imshow("plate",plateImageGray);
        cv::waitKey(0);
#endif
//        cv::waitKey(0);

    }

    void PlateSegmentation::ExtractRegions(PlateInfo &plateInfo,std::vector<cv::Rect> &rects)
	{
        cv::Mat plateImage = plateInfo.getPlateImage();
        for(int i = 0 ; i < rects.size(); i++)
		{
            cv::Mat charImage;
			/*车牌图像拷贝*/
            plateImage(rects[i]).copyTo(charImage);
			/*转换成RAY灰度图*/
            if(charImage.channels())
                cv::cvtColor(charImage,charImage,cv::COLOR_BGR2GRAY);
//            cv::imshow("image",charImage);
//            cv::waitKey(0);
			/*实现图像直方图均衡化--横向拉伸*/
            cv::equalizeHist(charImage,charImage);
			/*结构模板--将两个结构体或者枚举放在一对--frist为第一个结构体辅助/second为第二个结构体赋值*/
            std::pair<CharType,cv::Mat> char_instance;
            if(i == 0 )
			{
				/*汉字车牌*/
                char_instance.first = CHINESE;
            } 
			else if(i == 1)
			{
				/*字母车牌*/
                char_instance.first = LETTER;
            }
            else
			{
				/*字母数字车牌*/
                char_instance.first = LETTER_NUMS;
            }
			/*将图片放进去*/
            char_instance.second = charImage;
            plateInfo.appendPlateChar(char_instance);
        }
    }

}//namespace pr

最后还有最为重要的导入DNN模型实现字符的获取

#include "../include/Recognizer.h"

namespace pr{

    void GeneralRecognizer::SegmentBasedSequenceRecognition(PlateInfo &plateinfo)
	{
		/*遍历每个车牌的*/
        for(auto char_instance:plateinfo.plateChars)
        {
			/*pair双结构体*/
            std::pair<CharType,cv::Mat> res;
			/*DNN识别这个结构体中存放的车牌图片*/
            cv::Mat code_table= recognizeCharacter(char_instance.second);
			/*汉字车牌还是字母车牌还是字母数字混合车牌*/
            res.first = char_instance.first;
			/*res.second车牌图片的内容粘贴到code_table*/
            code_table.copyTo(res.second);
            plateinfo.appendPlateCoding(res);
        }
   }
}

扛不住了,得去洗洗睡觉了。

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值