V4L2视频驱动-应用层代码如何编写篇章
- V4L2支持多种接口:capture(捕获)、output(输出)、overlay(预览)等等,这里主要分析capture。
- 大致流程如下:
流程 | 描述 |
---|---|
1 | 打开设备文件。(int fd=open(“/dev/video0”,O_RDWR);) |
2 | 查询设备属性:取得设备的capability,看看设备具有什么功能,比如是否具有视频输入,或者音频输入输出等。(ioctl(fd_v4l, VIDIOC_QUERYCAP, &cap);) |
3 | 选择视频输入,一个视频设备可以有多个视频输入。(VIDIOC_S_INPUT, struct v4l2_input) |
4 | 设置视频采集的参数。 |
—设置视频的制式,制式包括PAL/NTSC,使用 ioctl(fd_v4l, VIDIOC_S_STD, &std_id) | |
—设置视频图像的采集窗口的大小,使用 ioctl(fd_v4l, VIDIOC_S_CROP, &crop) | |
—设置视频帧格式,包括帧的点阵格式,宽度和高度等,使用 ioctl(fd_v4l, VIDIOC_S_FMT, &fmt) | |
—设置视频的帧率,使用 ioctl(fd_v4l, VIDIOC_S_PARM, &parm) | |
—设置视频的旋转方式,使用 ioctl(fd_v4l, VIDIOC_S_CTRL, &ctrl) | |
5 | 向驱动申请帧缓冲,一般不超过5个。(ioctl(fd_v4l, VIDIOC_REQBUFS, &req);) |
6 | 查询帧缓冲区在内核空间中的长度和偏移量 (ioctl(fd_v4l, VIDIOC_QUERYBUF, &buf);,将申请到的帧缓冲映射到用户空间 mmap,这样就可以直接操作采集到的帧了,而不必去复制。(buffers[i].start = mmap (NULL, buffers[i].length, PROT_READ |
7 | 将申请到的帧缓冲全部入队列,以便存放采集到的数据。(ioctl (fd_v4l, VIDIOC_QBUF, &buf) ) |
8 | 开始视频的采集。(ioctl (fd_v4l, VIDIOC_STREAMON, &type) ) |
9 | 出队列以取得已采集数据的帧缓冲,取得原始采集数据。(ioctl (fd_v4l, VIDIOC_DQBUF, &buf) ) |
10 | 处理完后, 将该帧缓冲重新入队列尾,这样可以循环采集(循环步骤8-10),直到停止采集。 |
11 | 停止视频的采集。(ioctl (fd_v4l, VIDIOC_STREAMOFF, &type) ;) |
12 | 释放申请的视频帧缓冲区 unmap,关闭视频设备。(close(fd_v4l);) |
1. 打开设备open
//
// 打开设备节点
INT32 CV4l2CaptureIr::openVideoDev()
{
LOGMSG("CV4l2CaptureIr::openVideoDev");
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
struct stat stInfo;
MEMSET(stInfo);
// 获取文件信息
if (ReturnCode_Error == stat(DALI_IR_SENSOR_DEV_NAME, &stInfo))
{
LOGERROR("CV4l2CaptureIr::openVideoDev can't identify DALI_IR_SENSOR_DEV_NAME=[%s] errno=[%d] reason=[%s]", DALI_IR_SENSOR_DEV_NAME, errno, strerror(errno));
return ReturnCode_Error;
}
// 检查是否为一个字符设备
if (!S_ISCHR(stInfo.st_mode))
{
LOGERROR("CV4l2CaptureIr::openVideoDev isn't char device DALI_IR_SENSOR_DEV_NAME=[%s] errno=[%d] reason=[%s]", DALI_IR_SENSOR_DEV_NAME, errno, strerror(errno));
return ReturnCode_Error;
}
// 用户空间调用open打开对应的视频文件 非阻塞打开用户空间的/dev/video0文件
m_nFd = open(DALI_IR_SENSOR_DEV_NAME, O_RDWR | O_NONBLOCK, 0);
if (-1 == m_nFd)
{
LOGERROR("CV4l2CaptureIr::openVideoDev can't open the device DALI_IR_SENSOR_DEV_NAME=[%s] errno=[%d] reason=[%s]", DALI_IR_SENSOR_DEV_NAME, errno, strerror(errno));
return ReturnCode_Error;
}
LOGMSG("CV4l2CaptureIr::openVideoDev is suc... nTotalCostTm=[%llu] m_nFd=[%d]", (currentTime() - nTotalCostTm).count(), m_nFd);
return ReturnCode_Success;
}
/* 注意 video0 可以直接在linux系统中/dev/videox与之对应,根据具体的设备节点进行open操作,因为都是字符设备。*/
2. 查询设备功能ioctl
/*
* struct v4l2_capability {
* __u8 driver[16]; // 驱动名称
* __u8 card[32]; // 设备名称
* __u8 bus_info[32]; // 总线信息
* __u32 version; // 驱动版本号
* __u32 capabilities; // 设备具备的功能
* __u32 device_caps; // 通过特定设备(节点)访问的功能(不知道用处,网上其它资料没有该字段)
* __u32 reserved[3]; // 保留字段
* };
* capabilities 常用值:
* V4L2_CAP_VIDEO_CAPTURE 是否支持图像获取
*/
struct v4l2_capability v4l2CapInfo;
MEMSET(v4l2CapInfo);
// 初始化视频设备,检查cap中的设备能力信息
if (-1 == this->ioCtrl(m_nFd, VIDIOC_QUERYCAP, &v4l2CapInfo))
{
if (EINVAL == errno)
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... DALI_IR_SENSOR_DEV_NAME=[%s] is not V4L2 device", DALI_IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
else
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... VIDIOC_QUERYCAP error errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_Error;
}
}
// 2019-04-13 00:29:24.585 Message: CV4l2CaptureIr::initVideoDev DriverName=[uvcvideo] nCard Name=[MC_Client] nBus info=[usb-AmbUSB-1] nDriverVersion=[4.14.164]
LOGMSG("CV4l2CaptureIr::initVideoDev DriverName=[%s] nCard Name=[%s] nBus info=[%s] nDriverVersion=[%u.%u.%u]", v4l2CapInfo.driver, v4l2CapInfo.card, v4l2CapInfo.bus_info, (v4l2CapInfo.version >> 16) & 0xFF, (v4l2CapInfo.version >> 8) & 0xFF, (v4l2CapInfo.version) & 0xFF);
3. 设置图像格式
3.1 枚举支持的像素格式
/*
* struct v4l2_fmtdesc
* {
* __u32 index; // 要查询的格式序号,应用程序设置
* enum v4l2_buf_type type; // 帧类型,应用程序设置
* __u32 flags; // 是否为压缩格式
* __u8 description[32]; // 格式名称
* __u32 pixelformat; // 格式
* __u32 reserved[4]; // 保留
* };
*/
struct v4l2_fmtdesc v4l2FmtDescInfo;
MEMSET(v4l2FmtDescInfo);
// 显示所有支持的格式
v4l2FmtDescInfo.index = 0;
v4l2FmtDescInfo.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
while (ioctl(m_nFd, VIDIOC_ENUM_FMT, &v4l2FmtDescInfo) != -1)
{
LOGMSG("CV4l2CaptureIr::initVideoDev index=[%d] description=[%s]", v4l2FmtDescInfo.index + 1, v4l2FmtDescInfo.description);
v4l2FmtDescInfo.index++;
}
3.2 设置图像格式
/*
* VIDIOC_G_FMT,VIDIOC_S_FMT
* VIDIOC_G_FMT,VIDIOC_S_FMT
* struct v4l2_format
* {
* enum v4l2_buf_type type; // 帧类型,应用程序设置
* union fmt
* {
* struct v4l2_pix_format pix; // 视频设备使用 V4L2_BUF_TYPE_VIDEO_CAPTURE
* struct v4l2_window win; // V4L2_BUF_TYPE_VIDEO_OVERLAY
* struct v4l2_vbi_format vbi;
* struct v4l2_sliced_vbi_format sliced;
* __u8 raw_data[200]; // user-defined
* };
* };
* struct v4l2_pix_format
* {
* __u32 width; // 帧宽,单位像素
* __u32 height; // 帧高,单位像素
* __u32 pixelformat; // 帧格式
* enum v4l2_field field;
* __u32 bytesperline;
* __u32 sizeimage;
* enum v4l2_colorspace colorspace;
* __u32 priv;
* };
*/
struct v4l2_format v4l2FmtInfo;
MEMSET(v4l2FmtInfo);
// 设置摄像头采集数据格式,如设置采集数据的 长,宽,图像格式(JPEG,YUYV,MJPEG等格式)
v4l2FmtInfo.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (devConfRef.irParam.daLiIrParam.nForceFormat)
{
v4l2FmtInfo.fmt.pix.width = devConfRef.irParam.daLiIrParam.nWidth; /* 160 */
v4l2FmtInfo.fmt.pix.height = devConfRef.irParam.daLiIrParam.nHeight; /* 240 */
v4l2FmtInfo.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV /* devConfRef.irParam.daLiIrParam.nPixelFormat */;
v4l2FmtInfo.fmt.pix.field = V4L2_FIELD_INTERLACED;
// 设置当前格式
if (-1 == this->ioCtrl(m_nFd, VIDIOC_S_FMT, &v4l2FmtInfo))
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... DALI_IR_SENSOR_DEV_NAME=[%s] VIDIOC_S_FMT nWidth=[%u] nHeight=[%u] nPixelFormat=[%x] field=[%d]", DALI_IR_SENSOR_DEV_NAME, v4l2FmtInfo.fmt.pix.width, v4l2FmtInfo.fmt.pix.height, v4l2FmtInfo.fmt.pix.pixelformat, v4l2FmtInfo.fmt.pix.field);
return ReturnCode_Error;
}
/* Note VIDIOC_S_FMT may change width and height. */
}
else
{
// 查看当前格式
if (-1 == this->ioCtrl(m_nFd, VIDIOC_G_FMT, &v4l2FmtInfo))
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error...DALI_IR_SENSOR_DEV_NAME=[%s] VIDIOC_G_FMT", DALI_IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
}
4. 申请数据帧缓存
/*
* 申请和管理缓冲区,应用程序和设备有三种交换数据的方法,直接read/write ,内存映射(memorymapping) ,用户指针
* 向驱动申请帧缓存,v4l2_requestbuffers结构中定义了缓存的数量,驱动会据此申请对应数量的视频缓存。多个缓存可以用于建立FIFO,来提高视频采集的效率
* struct v4l2_request buffers
* {
* __u32 count; // 缓冲区内缓冲帧的数目
* enum v4l2_buf_type type; // 缓冲帧数据格式
* enum v4l2_memory memory; // 区别是内存映射还是用户指针方式
* __u32 reserved[2];
* };
*
* enum v4l2_buf_type
* {
* V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
* V4L2_BUF_TYPE_VIDEO_OUTPUT = 2,
* V4L2_BUF_TYPE_VIDEO_OVERLAY = 3,
* V4L2_BUF_TYPE_VBI_CAPTURE = 4,
* V4L2_BUF_TYPE_VBI_OUTPUT = 5,
* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
* V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
* V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
* V4L2_BUF_TYPE_PRIVATE = 0x80,
* };
* enum v4l2_memoy
* {
* V4L2_MEMORY_MMAP,
* V4L2_MEMORY_USERPTR
* };
* 1. VIDIC_REQBUFS ioct会清空所有的buffer
* 2. 将实际申请到的buffer的个数赋值,用于返回到用户空间
* *count = allocated_buffers;
* 3. 在驱动的vb2_core_reqbufs接口和queue_setup中
* size = dev->width * dev->height * 2;
* if (0 == *nbuffers)
* *nbuffers = 32;
* while (size * *nbuffers > vid_limit * 1024 * 1024)
* (*nbuffers)--;
* 如果分辨率是 1920*1080,vid_limit = 16
* 所以值为 16*1024*1024 / (1920*1080*2) = 4.xxx ,所以最后得到的buffer个数是4
*/
// 向驱动申请帧缓冲的请求,里面包含申请的个数 count、type、memory都要应用程序设置.
struct v4l2_requestbuffers info;
MEMSET(info);
info.count = m_nBuffersCount; // 缓冲区内缓冲帧的数目
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; // 缓冲帧数据格式
info.memory = V4L2_MEMORY_MMAP; // 区别是内存映射还是用户指针方式,在这里是内存映射
// 申请视频缓冲区(这个缓冲区位于内核空间,需要通过mmap映射到出来-用户空间)
// 这一步操作可能会修改info.count的值,修改为实际成功申请缓冲区个数
// 请求申请若干个帧缓冲区,一般为不少于3个
if (-1 == this->ioCtrl(m_nFd, VIDIOC_REQBUFS, &info)) // 向设备申请缓冲区
{
if (EINVAL == errno)
{
LOGERROR("CV4l2CaptureIr::initMmap is error... DALI_IR_SENSOR_DEV_NAME=[%s] does not support memory mapping", DALI_IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
else
{
LOGERROR("CV4l2CaptureIr::initMmap is error... DALI_IR_SENSOR_DEV_NAME=[%s] VIDIOC_REQBUFS", DALI_IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
}
//把实际成功申请缓冲区个数的值赋给m_nBuffersCount这个变量,因为在申请的时候可能会修改info.count的值
if ((unsigned int)m_nBuffersCount > info.count)
{
LOGWARNING("CV4l2CaptureIr::initMmap VIDIOC_REQBUFS m_nBuffersCount=[%d] > info.count=[%u]", m_nBuffersCount, info.count);
m_nBuffersCount = info.count;
}
LOGMSG("CV4l2CaptureIr::initMmap VIDIOC_REQBUFS m_nBuffersCount=[%d]", m_nBuffersCount);
5. 所有的缓存放入队列
struct v4l2_buffer info;
MEMSET(info);
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
info.memory = V4L2_MEMORY_MMAP;
info.index = nIdx;
// 将申请的内核缓冲区放入视频采集输入队列中排队
if (-1 == this->ioCtrl(m_nFd, VIDIOC_QBUF, &info))
{
LOGERROR("CV4l2CaptureIr::initMmap is error... VIDIOC_QBUF");
return ReturnCode_Error;
}
6. mmap内存映射
-
v4l2设备读取数据的方式有3种,一种是
read
方式,一种是mmap
方式,一种是userptr
方式 -
read
方式很容易理解,就是通过read
函数读取 -
对于mmap就是在内核空间中维护一个缓存队列,然后将内存映射到用户空间,应用读取图像数据就是一个不断的出队列和入队列的过程。
-
如果使用
read
方式读取的话,图像数据是从内核拷贝到应用空间,而一副图像的数据一般来讲是比较大的,所以效率会比较低。而如果使用映射的方式,将内核空间的内存映射到用户空间,那么用户空间读取数据就像直接操作内存一样,不需要经过内核空间到用户空间的拷贝,大大提高效率。
// 为这个结构体变量分配内存,这个结构体主要的目的保存的是每一个缓冲帧的地址和大小
// 将多个(m_nBuffersCount)已申请到的缓冲帧映射到应用程序空间,用m_pBuffers指针记录
// 获取每个缓存的信息,并mmap到用户空间
m_pBuffers = (StuV4l2FrameBuffer *)calloc(info.count, sizeof(StuV4l2FrameBuffer));
CHECKI(m_pBuffers);
LOGMSG("CV4l2CaptureIr::initMmap calloc m_pBuffers=[%p] size=[%u]", m_pBuffers, info.count * sizeof(StuV4l2FrameBuffer));
// 映射所有的缓存
for (int nIdx = 0; nIdx < m_nBuffersCount; ++nIdx)
{
/*
* struct v4l2_buffer
* {
* __u32 index; // buffer 序号
* enum v4l2_buf_type type; // buffer 类型
* __u32 bytesused; // buffer 中已使用的字节数
* __u32 flags; // 区分是MMAP 还是USERPTR
* enum v4l2_field field;
* struct timeval timestamp; // 获取第一个字节时的系统时间
* struct v4l2_timecode timecode;
* __u32 sequence; // 队列中的序号
* enum v4l2_memory memory; // IO 方式,被应用程序设置
* union m
* {
* __u32 offset; // 缓冲帧地址,只对MMAP 有效
* unsigned long userptr;
* };
* __u32 length; // 缓冲帧长度
* __u32 input;
* __u32 reserved;
* };
*/
///...
// 查询帧缓冲区在内核空间中的长度和偏移量
// 查询序号为n_buffers 的缓冲区,得到其起始物理地址和大小
// 获取到对应index的缓存信息,此处主要利用length信息及offset信息来完成后面的mmap操作
// 查询申请到的缓冲区的信息
if (-1 == this->ioCtrl(m_nFd, VIDIOC_QUERYBUF, &info))
{
LOGERROR("CV4l2CaptureIr::initMmap is error... DALI_IR_SENSOR_DEV_NAME=[%s] VIDIOC_QUERYBUF", DALI_IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
// 转换成相对地址
m_pBuffers[nIdx].unLength = info.length;
/*
* 将这些帧缓冲区从内核空间映射到用户空间,便于应用程序读取 / 处理视频数据;
* void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset);
* addr 映射起始地址,一般为NULL ,让内核自动选择
* length 被映射内存块的长度
* prot 标志映射后能否被读写,其值为PROT_EXEC,PROT_READ,PROT_WRITE, PROT_NONE
* flags 确定此内存映射能否被其他进程共享,MAP_SHARED,MAP_PRIVATE
* fd,offset, 确定被映射的内存地址
* 返回成功映射后的地址,不成功返回MAP_FAILED ((void*)-1);
*
* int munmap(void *addr, size_t length);// 断开映射
* addr 为映射后的地址,length 为映射后的内存长度
*
* 2019-04-13 00:29:24.595 Message: CV4l2CaptureIr::initMmap nIdx=[0] pBuffer=[0x7fbda41000] unLength=[77440] m_nFd=[3] offset=[0]
* 2019-04-13 00:29:24.595 Message: CV4l2CaptureIr::initMmap nIdx=[1] pBuffer=[0x7fbda2e000] unLength=[77440] m_nFd=[3] offset=[77824]
* 2019-04-13 00:29:24.595 Message: CV4l2CaptureIr::initMmap nIdx=[2] pBuffer=[0x7fbda1b000] unLength=[77440] m_nFd=[3] offset=[155648]
* 2019-04-13 00:29:24.595 Message: CV4l2CaptureIr::initMmap nIdx=[3] pBuffer=[0x7fbda08000] unLength=[77440] m_nFd=[3] offset=[233472]
* 分析一下这个结果,帧数据大小为160*242*2 = 77440,而且是平面视频格式,offset是length经过也对齐后的,77440页对齐后大小为77824,
* mmap需要页对齐,所以这里将length进行一个页对齐的操作,通过offset找到对应的buffer及plane的值;
*
*/
// 应用程序通过内存映射,将帧缓冲区的地址映射到用户空间,返回映射后的地址
m_pBuffers[nIdx].pBuffer = mmap(NULL /*addr anywhere */, info.length, PROT_READ | PROT_WRITE /* required */, MAP_SHARED /* recommended */, m_nFd, info.m.offset);
// MAP_FAILED表示mmap没有成功映射,其返回的值
if (MAP_FAILED == m_pBuffers[nIdx].pBuffer)
{
LOGERROR("CV4l2CaptureIr::initMmap DALI_IR_SENSOR_DEV_NAME=[%s] VIDIOC_QUERYBUF", DALI_IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
LOGMSG("CV4l2CaptureIr::initMmap nIdx=[%d] pBuffer=[%p] unLength=[%d] m_nFd=[%d] offset=[%u]", nIdx, m_pBuffers[nIdx].pBuffer, m_pBuffers[nIdx].unLength, m_nFd, info.m.offset);
}
LOGMSG("CV4l2CaptureIr::initMmap is suc... nTotalCostTm=[%llu]", (currentTime() - nTotalCostTm).count());
7. 运行设备
// 开始视频流数据的采集
unBuffType = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == this->ioCtrl(m_nFd, VIDIOC_STREAMON, &unBuffType))
{
LOGERROR("CV4l2CaptureIr::startCapture is error... VIDIOC_STREAMON m_unIoMethod=[%d]", m_unIoMethod);
return ReturnCode_Error;
}
8. 获取数据
- 获取图像数据其实就是一个不断地入队列出队列的过程,在出队列前要调用poll等到数据完成
8.1.poll或者select执行查询
int nRet = ReturnCode_Error;
for (;;)
{
fd_set readFds;
struct timeval tv;
FD_ZERO(&readFds);
FD_SET(m_nFd, &readFds);
/* Timeout. */
tv.tv_sec = 2;
tv.tv_usec = 0;
nRet = ::select(m_nFd + 1, &readFds, NULL, NULL, &tv);
if (SOCKET_ERROR == nRet) // 出错
{
if (EINTR == errno)
{
continue;
}
LOGERROR("CV4l2CaptureIr::queryDesc is error... select nRet=[-1]");
return ReturnCode_Error;
}
else if (0 == nRet) //超时
{
LOGERROR("CV4l2CaptureIr::queryDesc is warning... select timeout nRet=[0]");
return ReturnCode_Success;
}
else
{
//LOGMSG("CV4l2CaptureIr::queryDesc select is suc... m_nFd=[%d] nRet=[%d]", m_nFd, nRet);
// 有数据了
return ReturnCode_Success;
}
}
8.2 出队列
// 取出FIFO缓存中已经采样的帧缓存
memset(&info, 0, sizeof(info));
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
info.memory = V4L2_MEMORY_MMAP;
info.index = 0; // 此值由下面的ioctl返回
// 应用程序从视频采集输出队列中取出已经采集好数据的帧缓冲区
if (-1 == this->ioCtrl(m_nFd, VIDIOC_DQBUF, &info))
{
switch (errno)
{
case EAGAIN:
{
//LOGWARNING("CV4l2CaptureIr::getData V4L2_IO_METHOD_MMAP is warning... EAGAIN errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_WouldBlock;
}
case EIO:
{
LOGWARNING("CV4l2CaptureIr::getData V4L2_IO_METHOD_MMAP is warning... EIO errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_IoCodeError;
}
/* fall through */
default:
{
LOGWARNING("CV4l2CaptureIr::getData V4L2_IO_METHOD_MMAP is error... VIDIOC_DQBUF errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_Error;
}
}
}
CHECKI(nIdx < m_nBuffersCount);
// 设置标识位
bQBuffFlag = true;
// 获取数据指针
pBuffer = (unsigned char*)m_pBuffers[info.index].pBuffer;
// 设置size bytesused=0不知道为啥???
nSize = info.bytesused;
// 图像处理
goto process_image;
出队列后得到了缓存下标info.index
,然后找到对应的缓存,通过映射过会的地址进行数据的读取
9. 数据处理
根据自己的业务逻辑处理数据...
10. 数据帧重新放入队列
// 根据返回的buf.index找到对应的mmap映射好的缓存,取出视频数据
// 将视频输出的缓冲帧放回到视频输入的缓冲区中去
// 将刚刚处理完的帧缓冲区重新加入采集队列列尾,这样可以循环采集
if (bQBuffFlag)
{
if(-1 == this->ioCtrl(m_nFd, VIDIOC_QBUF, &info))
{
LOGERROR("CV4l2CaptureIr::getData VIDIOC_QBUF is error... errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_Error;
}
else
{
//LOGMSG("CV4l2CaptureIr::getData VIDIOC_QBUF is suc... index=[%u] offset=[%u] length=[%u] bytesused=[%u] sequence=[%u] flags=[0x%08x] tv_sec=[%u] tv_usec=[%u]", info.index, info.m.offset, info.length, info.bytesused, info.sequence, info.flags, info.timestamp.tv_sec, info.timestamp.tv_usec);
}
}
11. 关闭设备
11.1 停止设备
// 停止视频的采集
enum v4l2_buf_type unBuffType = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == this->ioCtrl(m_nFd, VIDIOC_STREAMOFF, &unBuffType))
{
LOGERROR("CV4l2CaptureIr::stopCapture is error... VIDIOC_STREAMOFF m_unIoMethod=[V4L2_IO_METHOD_MMAP]");
return ReturnCode_Error;
}
LOGMSG("CV4l2CaptureIr::stopCapture VIDIOC_STREAMOFF m_unIoMethod=[V4L2_IO_METHOD_MMAP]");
11.2 取消映射
// 释放内存映射
for (int nIdx = 0; nIdx < m_nBuffersCount; ++nIdx)
{
if (-1 == munmap(m_pBuffers[nIdx].pBuffer, m_pBuffers[nIdx].unLength))
{
LOGERROR("CV4l2CaptureIr::deinitMmap is error... nIdx=[%d] pBuffer=[%p] unLength=[%u]", nIdx, m_pBuffers[nIdx].pBuffer, m_pBuffers[nIdx].unLength);
continue;
}
LOGMSG("CV4l2CaptureIr::deinitMmap nIdx=[%d] pBuffer=[%p] unLength=[%u]", nIdx, m_pBuffers[nIdx].pBuffer, m_pBuffers[nIdx].unLength);
m_pBuffers[nIdx].pBuffer = NULL;
m_pBuffers[nIdx].unLength = 0;
}
11.3 关闭设备
// 检查m_nFd
if (m_nFd != INVALID_FD_ID)
{
close(m_nFd);
m_nFd = INVALID_FD_ID;
}
12. 源代码如下:
12.1 V4l2CaptureDef.h
文件
#ifndef __V4L2_CAPTURE_DEF_H__
#define __V4L2_CAPTURE_DEF_H__
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <getopt.h> /* getopt_long() */
#include <errno.h>
#include <fcntl.h> /* low-level i/o */
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
/*
* VIDIOC_REQBUFS // 分配内存
* VIDIOC_QUERYBUF // 把VIDIOC_REQBUFS中分配的数据缓存转换成物理地址
* VIDIOC_QUERYCAP // 查询驱动功能
* VIDIOC_ENUM_FMT // 获取当前驱动支持的视频格式
* VIDIOC_S_FMT // 设置当前驱动的频捕获格式
* VIDIOC_G_FMT // 读取当前驱动的频捕获格式
* VIDIOC_TRY_FMT // 验证当前驱动的显示格式
* VIDIOC_CROPCAP // 查询驱动的修剪能力
* VIDIOC_S_CROP // 设置视频信号的矩形边框
* VIDIOC_G_CROP // 读取视频信号的矩形边框
* VIDIOC_QBUF // 把数据从缓存中读取出来
* VIDIOC_DQBUF // 把数据放回缓存队列
* VIDIOC_STREAMON // 开始视频显示函数
* VIDIOC_STREAMOFF // 结束视频显示函数
* VIDIOC_QUERYSTD // 检查当前视频设备支持的标准,例如PAL或NTSC
*/
#include <linux/videodev2.h>
#include "Base/Common/GpCommon.h"
/*
* ir模组图像数据格式
* 固定的帧率不可设置(每秒显示帧数(Frames per Second,简称:FPS)或”赫兹”(Hz))
* 12Hz(图像和温度同时输出)
* 14Hz(单独图像或温度输出)
*
* +----------------------------------------+
* | 160 |
* +-----+----------------------------------------+
* | | 伪彩视频流 |
* | 120 | 160 * 120 |
* | | YUY2(16bit) |
* +-----+----------------------------------------+
* | | 温度视频流 |
* | 120 | 160 * 120 |
* | | raw data(16bit) |
* +-----+----------------------------------------+
* | | 状态数据 |
* | 2 | 160 * 2 |
* | | status data |
* +-----+----------------------------------------+
*/
#define _IR_SENSOR_DEV_NAME "/dev/video0" // 红外模组设备节点
#define V4L2_VIDEO_LIST_BUFFER_DEFAULT_COUNT 4; // 默认设置的v4l2 video buffer default count=4个数据帧队列
#define _IR_SENSOR_IMAGE_PIXEL_BIT_WIDE 2 // ir图像的位宽
#define _IR_SENSOR_TEMP_IMAGE_WIDTH 160 // ir模组温度图像的宽
#define _IR_SENSOR_TEMP_IMAGE_HEIGHT 120 // ir模组温度图像的高
#define _IR_SENSOR_PSEUDO_COLOR_IMAGE_WIDTH 160 // ir模组伪彩图像的宽
#define _IR_SENSOR_PSEUDO_COLOR_IMAGE_HEIGHT 120 // ir模组伪彩图像的高
#define _IR_SENSOR_IMAGE_WIDTH 160 // ir模组图像的宽(伪彩图像=160*120+温度图像=160*120)
#define _IR_SENSOR_IMAGE_HEIGHT 240 // ir模组图像的高(伪彩图像=160*120+温度图像=160*120)
#define _IR_SENSOR_CONTAIN_STATUS_DATA_IMAGE_WIDTH 160 // ir模组图像的宽(伪彩图像=160*120+温度图像=160*120+状态数据=160*2)
#define _IR_SENSOR_CONTAIN_STATUS_DATA_IMAGE_HEIGHT 242 // ir模组图像的高(伪彩图像=160*120+温度图像=160*120+状态数据=160*2)
/*
* v4l2获取数据方式
* V4L2通常支持三种不同IO访问方式
* 1.read/write基本IO访问方式
* read方式读取一帧数据,数据需要从内核空间传输到用户空间,增加了内存访问的开销,对于图像类的应用效率不高
* 2.mmap内存映射方式
* 这是在内核空间开辟的缓冲区,这些缓冲区可能支持DMA功能,这样极大的提高了数据从设备搬运到内存的效率。用户只需要使用mmap()系统调用将其映射到用户空间后,可以直接使用。这种方式支持的设备很多。
* 3.userptr用户空间缓冲区方式
* 这是在用户空间开辟的缓冲区,再把缓冲区的指针告诉内核,这种方式虽然也能减少内存拷贝,但是内核驱动或者硬件设备在处理这些用户空间的地址会麻烦很多,不是所有的设备都会支持
*/
enum emV4l2IoMethod
{
V4L2_IO_METHOD_READ, // read系统调用方式
V4L2_IO_METHOD_MMAP, // mmap内存映射方式
V4L2_IO_METHOD_USERPTR, // 用户层内存指针方式
};
/*
* v4l2数据buffer
*/
typedef struct tagV4l2FrameBuffer
{
void* pBuffer; // 内存指针
size_t unLength; // 数据长度
}StuV4l2FrameBuffer;
/*
* v4l2数据帧(gpts项目大力ir红外数据帧(原始16位))
*/
typedef struct tagV4l2IrFrameInfo
{
int nWidth; // 宽
int nHeight; // 高
int nSize; // size
void* pBuffer; // buffer
}StuV4l2IrFrameInfo;
#endif // _GPTS_PROJECT_FLAG_
#endif // __V4L2_CAPTURE_DEF_H__
12.2 V4l2CaptureIr.h
文件
/************************************************************************/
/* 该类用于linux 内核v4l2视频模块 */
/************************************************************************/
#ifndef __V4L2_CAPTURE_IR_H__
#define __V4L2_CAPTURE_IR_H__
#include "PlatForm/Capture/IGpCapture.h"
#include "Extension/OLei/Include/libxt.h"
#include "PlatForm/Capture/V4l2CaptureDef.h"
#include <queue>
#include <mutex>
#include <algorithm>
using namespace std;
//
class CV4l2CaptureIr : public IGpCapture
{
public:
// constructor
CV4l2CaptureIr();
// destructor
virtual ~CV4l2CaptureIr();
// 打开设备节点
INT32 openVideoDev();
// 关闭视频设备
void closeVideoDev();
// 初始化视频设备
INT32 initVideoDev();
// 初始化视频采集方式(read)
INT32 initRead(unsigned int unBufferSize);
// 释放视频采集方式(read)
void deinitRead();
// 初始化视频采集方式(mmap)
INT32 initMmap();
// 释放视频采集方式(mmap)
void deinitMmap();
// 初始化视频采集方式(userp)
INT32 initUserPtr(unsigned int unBufferSize);
// 释放视频采集方式(userp)
void deinitUserPtr();
// 开始采集数据
INT32 startCapture();
// 停止数据采集
INT32 stopCapture();
// 释放内存
void deinitVideoDev();
// 执行ioctl
INT32 ioCtrl(int nFd, unsigned long int unRequest, void* pArg);
// uvc设置
INT32 uvcSet(int nValue);
// 快门校正(机芯内部的挡片控制),用于立即执行机芯内部校正
void shutterCalibration();
//---------------------------------属性设置-------------------------------------//
DECLARE_DATA_GET_SET_METHOD(int, void, BuffersCount, int, nBuffersCount, 4);
// IGpCapture
public:
// 获取抽象接口类指针
virtual IGpCapture *queryICapture() override { return (IGpCapture *)this; }
// 初始化
virtual INT32 init(INT32 nIavFd, UINT32 unBuffSize) override;
// 获取描述符信息
virtual INT32 queryDesc(void* pQueryDesc, UINT32 unId) override;
// 获取数据
virtual INT32 getData(void* pDataDesc) override;
// 释放资源
virtual INT32 deinit() override;
// 获取图像数据帧
virtual INT32 getCaptureFrame() override;
private:
int m_nFd; // 设备节点句柄
enum emV4l2IoMethod m_unIoMethod; // v4l2 io method
int m_nBuffersCount; // buffers 数量
StuV4l2FrameBuffer* m_pBuffers; // buffers
UINT32 m_unImageSize; // 图像大小用于start中初始化中使用
};
#endif // _GPTS_PROJECT_FLAG_
#endif // __V4L2_CAPTURE_IR_H__
12.3 V4l2CaptureIr.cpp
文件
#include "V4l2CaptureIr.h"
#include "Project/Gpts/Device/GptsDevice.h"
#include "Stream/GpFrameProcessCenter.h"
#include "Sensor/XXXXIrSensor.h"
//
//
CV4l2CaptureIr::CV4l2CaptureIr()
{
m_unIoMethod = V4L2_IO_METHOD_MMAP;
m_nBuffersCount = V4L2_VIDEO_LIST_BUFFER_DEFAULT_COUNT; // 申请一个拥有四个缓冲帧的缓冲区
m_pBuffers = NULL;
m_nFd = INVALID_FD_ID;
m_unImageSize = 0;
}
//
//
CV4l2CaptureIr::~CV4l2CaptureIr()
{
// 关闭SDK库,释放内存空间
this->deinit();
}
//
//
IMP_DATA_GET_METHOD(CV4l2CaptureIr, int, BuffersCount, m_nBuffersCount);
IMP_DATA_SET_METHOD(CV4l2CaptureIr, void, BuffersCount, m_nBuffersCount, int, nBuffersCount);
//
// 初始化
INT32 CV4l2CaptureIr::init(INT32 nIavFd, UINT32 unBuffSize)
{
LOGMSG("CV4l2CaptureIr::init nIavFd=[%d] unBuffSize=[%u]", nIavFd, unBuffSize);
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 获取相机配置文件参数
CDaLiIrSensor* pSensor = dynamic_cast<CDaLiIrSensor*>(this->getSensor());
CHECKI(pSensor);
CGptsDevice* pDev = dynamic_cast<CGptsDevice*>(pSensor->getDevice());
CHECKI(pDev);
StuGptsDeviceConfig& devConfRef = pDev->getDevConfig();
// 如果配置文件配置了读取配置文件
m_nBuffersCount = devConfRef.irParam.XXXXIrParam.nBuffersCount == 0 ? m_nBuffersCount : devConfRef.irParam.XXXXIrParam.nBuffersCount;
// 读取io模式
m_unIoMethod = (enum emV4l2IoMethod)devConfRef.irParam.XXXXIrParam.nIoMethod;
LOGMSG("CV4l2CaptureIr::startCapture read gpts dev config m_nBuffersCount=[%d] m_unIoMethod=[%u]", m_nBuffersCount, m_unIoMethod);
// 执行打开文件
CHECKI(this->openVideoDev() == ReturnCode_Success);
// 初始化视频设备
CHECKI(this->initVideoDev() == ReturnCode_Success);
LOGMSG("CV4l2CaptureIr::init is suc... costTm=[%llu]", (currentTime() - nTotalCostTm).count());
return ReturnCode_Success;
}
//
// 打开视频设备节点
INT32 CV4l2CaptureIr::openVideoDev()
{
LOGMSG("CV4l2CaptureIr::openVideoDev");
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
struct stat stInfo;
MEMSET(stInfo);
// 获取文件信息
if (ReturnCode_Error == stat(_IR_SENSOR_DEV_NAME, &stInfo))
{
LOGERROR("CV4l2CaptureIr::openVideoDev can't identify _IR_SENSOR_DEV_NAME=[%s] errno=[%d] reason=[%s]", _IR_SENSOR_DEV_NAME, errno, strerror(errno));
return ReturnCode_Error;
}
// 检查是否为一个字符设备
if (!S_ISCHR(stInfo.st_mode))
{
LOGERROR("CV4l2CaptureIr::openVideoDev isn't char device _IR_SENSOR_DEV_NAME=[%s] errno=[%d] reason=[%s]", _IR_SENSOR_DEV_NAME, errno, strerror(errno));
return ReturnCode_Error;
}
// 用户空间调用open打开对应的视频文件 非阻塞打开用户空间的/dev/video0文件
m_nFd = open(_IR_SENSOR_DEV_NAME, O_RDWR | O_NONBLOCK, 0);
if (-1 == m_nFd)
{
LOGERROR("CV4l2CaptureIr::openVideoDev can't open the device _IR_SENSOR_DEV_NAME=[%s] errno=[%d] reason=[%s]", _IR_SENSOR_DEV_NAME, errno, strerror(errno));
return ReturnCode_Error;
}
LOGMSG("CV4l2CaptureIr::openVideoDev is suc... nTotalCostTm=[%llu] m_nFd=[%d]", (currentTime() - nTotalCostTm).count(), m_nFd);
return ReturnCode_Success;
}
//
// 关闭视频设备节点
void CV4l2CaptureIr::closeVideoDev()
{
LOGMSG("CV4l2CaptureIr::closeVideoDev");
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 检查m_nFd
if (m_nFd != INVALID_FD_ID)
{
close(m_nFd);
m_nFd = INVALID_FD_ID;
}
LOGMSG("CV4l2CaptureIr::closeVideoDev is suc... nTotalCostTm=[%llu]", (currentTime() - nTotalCostTm).count());
}
//
// 初始化视频设备
INT32 CV4l2CaptureIr::initVideoDev()
{
LOGMSG("CV4l2CaptureIr::initVideoDev");
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 检查m_nFd
CHECKI(m_nFd != INVALID_FD_ID);
/*
* 设备属性
* 结构体来源:/usr/include/linux/videodev2.h文件。
* struct v4l2_capability {
* __u8 driver[16]; // 驱动名称
* __u8 card[32]; // 设备名称
* __u8 bus_info[32]; // 总线信息
* __u32 version; // 驱动版本号
* __u32 capabilities; // 设备具备的功能
* __u32 device_caps; // 通过特定设备(节点)访问的功能(不知道用处,网上其它资料没有该字段)
* __u32 reserved[3]; // 保留字段
* };
* capabilities 常用值:
* V4L2_CAP_VIDEO_CAPTURE 是否支持图像获取
*/
struct v4l2_capability v4l2CapInfo;
MEMSET(v4l2CapInfo);
/*
* 图像的缩放
* VIDIOC_CROPCAP
* int ioctl(int fd,int request, struct v4l2_cropcap *argp);
* struct v4l2_cropcap
* {
* enum v4l2_buf_type type; // 应用程序设置
* struct v4l2_rect bounds; // 最大边界
* struct v4l2_rect defrect; // 默认值
* struct v4l2_fract pixelaspect;
* };
*
* enum v4l2_buf_type
* {
* V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
* V4L2_BUF_TYPE_VIDEO_OUTPUT = 2,
* V4L2_BUF_TYPE_VIDEO_OVERLAY = 3,
* V4L2_BUF_TYPE_VBI_CAPTURE = 4,
* V4L2_BUF_TYPE_VBI_OUTPUT = 5,
* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
* V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
* V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
* V4L2_BUF_TYPE_PRIVATE = 0x80,
* };
*/
struct v4l2_cropcap v4l2CropCapInfo;
MEMSET(v4l2CropCapInfo);
/*
* 视频的采集窗口参数
* struct v4l2_crop
* {
* enum v4l2_buf_type type;
* struct v4l2_rect c;
* };
*/
struct v4l2_crop v4l2CropInfo;
MEMSET(v4l2CropInfo);
/*
* VIDIOC_G_FMT,VIDIOC_S_FMT
* VIDIOC_G_FMT,VIDIOC_S_FMT
* struct v4l2_format
* {
* enum v4l2_buf_type type; // 帧类型,应用程序设置
* union fmt
* {
* struct v4l2_pix_format pix; // 视频设备使用 V4L2_BUF_TYPE_VIDEO_CAPTURE
* struct v4l2_window win; // V4L2_BUF_TYPE_VIDEO_OVERLAY
* struct v4l2_vbi_format vbi;
* struct v4l2_sliced_vbi_format sliced;
* __u8 raw_data[200]; // user-defined
* };
* };
* struct v4l2_pix_format
* {
* __u32 width; // 帧宽,单位像素
* __u32 height; // 帧高,单位像素
* __u32 pixelformat; // 帧格式
* enum v4l2_field field;
* __u32 bytesperline;
* __u32 sizeimage;
* enum v4l2_colorspace colorspace;
* __u32 priv;
* };
*/
struct v4l2_format v4l2FmtInfo;
MEMSET(v4l2FmtInfo);
/*
* struct v4l2_fmtdesc
* {
* __u32 index; // 要查询的格式序号,应用程序设置
* enum v4l2_buf_type type; // 帧类型,应用程序设置
* __u32 flags; // 是否为压缩格式
* __u8 description[32]; // 格式名称
* __u32 pixelformat; // 格式
* __u32 reserved[4]; // 保留
* };
*/
struct v4l2_fmtdesc v4l2FmtDescInfo;
MEMSET(v4l2FmtDescInfo);
// 初始化视频设备,检查cap中的设备能力信息
if (-1 == this->ioCtrl(m_nFd, VIDIOC_QUERYCAP, &v4l2CapInfo))
{
if (EINVAL == errno)
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... _IR_SENSOR_DEV_NAME=[%s] is not V4L2 device", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
else
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... VIDIOC_QUERYCAP error errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_Error;
}
}
// 2019-04-13 00:29:24.585 Message: CV4l2CaptureIr::initVideoDev DriverName=[uvcvideo] nCard Name=[MC_Client] nBus info=[usb-AmbUSB-1] nDriverVersion=[4.14.164]
LOGMSG("CV4l2CaptureIr::initVideoDev DriverName=[%s] nCard Name=[%s] nBus info=[%s] nDriverVersion=[%u.%u.%u]", v4l2CapInfo.driver, v4l2CapInfo.card, v4l2CapInfo.bus_info, (v4l2CapInfo.version >> 16) & 0xFF, (v4l2CapInfo.version >> 8) & 0xFF, (v4l2CapInfo.version) & 0xFF);
// 显示所有支持的格式
v4l2FmtDescInfo.index = 0;
v4l2FmtDescInfo.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
while (ioctl(m_nFd, VIDIOC_ENUM_FMT, &v4l2FmtDescInfo) != -1)
{
LOGMSG("CV4l2CaptureIr::initVideoDev index=[%d] description=[%s]", v4l2FmtDescInfo.index + 1, v4l2FmtDescInfo.description);
v4l2FmtDescInfo.index++;
}
// 判断是否是一个视频捕捉设备
if (!(v4l2CapInfo.capabilities & V4L2_CAP_VIDEO_CAPTURE))
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... _IR_SENSOR_DEV_NAME=[%s] is no video capture device", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
switch (m_unIoMethod)
{
case V4L2_IO_METHOD_READ:
{
// read/write systemcalls
if (!(v4l2CapInfo.capabilities & V4L2_CAP_READWRITE))
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... _IR_SENSOR_DEV_NAME=[%s] does not support read i/o", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
}break;
case V4L2_IO_METHOD_MMAP:
{
// 判断是否是一个视频捕捉设备
if (!(v4l2CapInfo.capabilities & V4L2_CAP_STREAMING))
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... _IR_SENSOR_DEV_NAME=[%s] does not support streaming i/o", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
}break;
case V4L2_IO_METHOD_USERPTR:
{
// 判断是否是一个视频捕捉设备
if (!(v4l2CapInfo.capabilities & V4L2_CAP_STREAMING))
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... _IR_SENSOR_DEV_NAME=[%s] does not support streaming i/o", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
}
break;
default:
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... _IR_SENSOR_DEV_NAME=[%s] m_unIoMethod=[%d] isn't match", _IR_SENSOR_DEV_NAME, m_unIoMethod);
return ReturnCode_Error;
}break;
}
// 获取相机配置文件参数
CDaLiIrSensor* pSensor = dynamic_cast<CDaLiIrSensor*>(this->getSensor());
CHECKI(pSensor);
CGptsDevice* pDev = dynamic_cast<CGptsDevice*>(pSensor->getDevice());
CHECKI(pDev);
StuGptsDeviceConfig& devConfRef = pDev->getDevConfig();
// 设置摄像头采集数据格式,如设置采集数据的 长,宽,图像格式(JPEG,YUYV,MJPEG等格式)
v4l2FmtInfo.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (devConfRef.irParam.XXXXIrParam.nForceFormat)
{
v4l2FmtInfo.fmt.pix.width = devConfRef.irParam.XXXXIrParam.nWidth; /* 160 */
v4l2FmtInfo.fmt.pix.height = devConfRef.irParam.XXXXIrParam.nHeight; /* 240 */
v4l2FmtInfo.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV /* devConfRef.irParam.XXXXIrParam.nPixelFormat */;
v4l2FmtInfo.fmt.pix.field = V4L2_FIELD_INTERLACED;
// 设置当前格式
if (-1 == this->ioCtrl(m_nFd, VIDIOC_S_FMT, &v4l2FmtInfo))
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error... _IR_SENSOR_DEV_NAME=[%s] VIDIOC_S_FMT nWidth=[%u] nHeight=[%u] nPixelFormat=[%x] field=[%d]", _IR_SENSOR_DEV_NAME, v4l2FmtInfo.fmt.pix.width, v4l2FmtInfo.fmt.pix.height, v4l2FmtInfo.fmt.pix.pixelformat, v4l2FmtInfo.fmt.pix.field);
return ReturnCode_Error;
}
/* Note VIDIOC_S_FMT may change width and height. */
}
else
{
// 查看当前格式
if (-1 == this->ioCtrl(m_nFd, VIDIOC_G_FMT, &v4l2FmtInfo))
{
LOGERROR("CV4l2CaptureIr::initVideoDev is error..._IR_SENSOR_DEV_NAME=[%s] VIDIOC_G_FMT", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
}
// 2019-04-13 00:29:24.595 Message: CV4l2CaptureIr::initVideoDev _IR_SENSOR_DEV_NAME=[/dev/video0] VIDIOC_S_FMT nWidth=[160] nHeight=[242] nPixelFormat=[56595559] field=[1]
LOGMSG("CV4l2CaptureIr::initVideoDev _IR_SENSOR_DEV_NAME=[%s] VIDIOC_S_FMT nWidth=[%u] nHeight=[%u] nPixelFormat=[%x] field=[%d] m_unImageSize=[%u]", _IR_SENSOR_DEV_NAME, v4l2FmtInfo.fmt.pix.width, v4l2FmtInfo.fmt.pix.height, v4l2FmtInfo.fmt.pix.pixelformat, v4l2FmtInfo.fmt.pix.field, v4l2FmtInfo.fmt.pix.sizeimage);
if ((unsigned int)devConfRef.irParam.XXXXIrParam.nWidth != v4l2FmtInfo.fmt.pix.width ||
(unsigned int)devConfRef.irParam.XXXXIrParam.nHeight != v4l2FmtInfo.fmt.pix.height)
{
LOGWARNING("CV4l2CaptureIr::initVideoDev devConfRef != v4l2FmtInfo is warning... devConfRef.irParam.XXXXIrParam.nWidth=[%d] devConfRef.irParam.XXXXIrParam.nHeight=[%d] v4l2FmtInfo.fmt.pix.width=[%u] v4l2FmtInfo.fmt.pix.height=[%u]", devConfRef.irParam.XXXXIrParam.nWidth, devConfRef.irParam.XXXXIrParam.nHeight, v4l2FmtInfo.fmt.pix.width, v4l2FmtInfo.fmt.pix.height);
}
else
{
LOGMSG("CV4l2CaptureIr::initVideoDev devConfRef == v4l2FmtInfo is suc... devConfRef.irParam.XXXXIrParam.nWidth=[%d] devConfRef.irParam.XXXXIrParam.nHeight=[%d] v4l2FmtInfo.fmt.pix.width=[%u] v4l2FmtInfo.fmt.pix.height=[%u]", devConfRef.irParam.XXXXIrParam.nWidth, devConfRef.irParam.XXXXIrParam.nHeight, v4l2FmtInfo.fmt.pix.width, v4l2FmtInfo.fmt.pix.height);
}
// VIDIOC_S_FMT后可能更新设备frame宽高,需要在进行一下更新设置
pSensor->setFrameWide(v4l2FmtInfo.fmt.pix.width);
pSensor->setFrameHeight(v4l2FmtInfo.fmt.pix.height);
// 获取image size
m_unImageSize = v4l2FmtInfo.fmt.pix.sizeimage;
LOGMSG("CV4l2CaptureIr::initVideoDev is suc... nTotalCostTm=[%llu] _IR_SENSOR_DEV_NAME=[%s] width=[%d] height=[%d] m_unImageSize=[%u]", (currentTime() - nTotalCostTm).count(), _IR_SENSOR_DEV_NAME, pSensor->getFrameWide(), pSensor->getFrameHeight(), m_unImageSize);
return ReturnCode_Success;
}
//
// 初始化视频采集方式(read)
INT32 CV4l2CaptureIr::initRead(unsigned int unBufferSize)
{
LOGMSG("CV4l2CaptureIr::initRead");
CHECKI(m_unImageSize > 0);
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
m_pBuffers = (StuV4l2FrameBuffer *)malloc(sizeof(StuV4l2FrameBuffer));
CHECKI(m_pBuffers);
memset(m_pBuffers, 0, sizeof(StuV4l2FrameBuffer));
m_pBuffers->unLength = unBufferSize;
m_pBuffers->pBuffer = (void*)malloc(unBufferSize);
CHECKI(m_pBuffers->pBuffer);
LOGMSG("CV4l2CaptureIr::initRead is suc... nTotalCostTm=[%llu] m_pBuffers=[%p] m_pBuffers->pBuffer=[%p] m_pBuffers->unLength=[%d]", (currentTime() - nTotalCostTm).count(), m_pBuffers, m_pBuffers->pBuffer, m_pBuffers->unLength);
return ReturnCode_Success;
}
//
// 初始化视频采集方式(mmap)
INT32 CV4l2CaptureIr::initMmap()
{
LOGMSG("CV4l2CaptureIr::initMmap");
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 检查m_nFd
CHECKI(m_nFd != INVALID_FD_ID);
// 获取相机配置文件参数
CXXXXIrSensor* pSensor = dynamic_cast<CXXXXIrSensor*>(this->getSensor());
CHECKI(pSensor);
CGptsDevice* pDev = dynamic_cast<CGptsDevice*>(pSensor->getDevice());
CHECKI(pDev);
StuGptsDeviceConfig& devConfRef = pDev->getDevConfig();
// 如果配置文件配置了读取配置文件
m_nBuffersCount = devConfRef.irParam.XXXXIrParam.nBuffersCount == 0 ? m_nBuffersCount : devConfRef.irParam.XXXXIrParam.nBuffersCount;
LOGMSG("CV4l2CaptureIr::initMmap read gpts dev config m_nBuffersCount=[%d]", m_nBuffersCount);
/*
* 申请和管理缓冲区,应用程序和设备有三种交换数据的方法,直接read/write ,内存映射(memorymapping) ,用户指针
* 向驱动申请帧缓存,v4l2_requestbuffers结构中定义了缓存的数量,驱动会据此申请对应数量的视频缓存。多个缓存可以用于建立FIFO,来提高视频采集的效率
* struct v4l2_request buffers
* {
* __u32 count; // 缓冲区内缓冲帧的数目
* enum v4l2_buf_type type; // 缓冲帧数据格式
* enum v4l2_memory memory; // 区别是内存映射还是用户指针方式
* __u32 reserved[2];
* };
* enum v4l2_buf_type
* {
* V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
* V4L2_BUF_TYPE_VIDEO_OUTPUT = 2,
* V4L2_BUF_TYPE_VIDEO_OVERLAY = 3,
* V4L2_BUF_TYPE_VBI_CAPTURE = 4,
* V4L2_BUF_TYPE_VBI_OUTPUT = 5,
* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
* V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
* V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
* V4L2_BUF_TYPE_PRIVATE = 0x80,
* };
* enum v4l2_memoy
* {
* V4L2_MEMORY_MMAP,
* V4L2_MEMORY_USERPTR
* };
*/
/*
* 1. VIDIC_REQBUFS ioct会清空所有的buffer
* 2. 将实际申请到的buffer的个数赋值,用于返回到用户空间
* *count = allocated_buffers;
* 3. 在驱动的vb2_core_reqbufs接口和queue_setup中
* size = dev->width * dev->height * 2;
* if (0 == *nbuffers)
* *nbuffers = 32;
* while (size * *nbuffers > vid_limit * 1024 * 1024)
* (*nbuffers)--;
* 如果分辨率是 1920*1080,vid_limit = 16
* 所以值为 16*1024*1024 / (1920*1080*2) = 4.xxx ,所以最后得到的buffer个数是4
*/
// 向驱动申请帧缓冲的请求,里面包含申请的个数 count、type、memory都要应用程序设置.
struct v4l2_requestbuffers info;
MEMSET(info);
info.count = m_nBuffersCount; // 缓冲区内缓冲帧的数目
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; // 缓冲帧数据格式
info.memory = V4L2_MEMORY_MMAP; // 区别是内存映射还是用户指针方式,在这里是内存映射
// 申请视频缓冲区(这个缓冲区位于内核空间,需要通过mmap映射到出来-用户空间)
// 这一步操作可能会修改info.count的值,修改为实际成功申请缓冲区个数
// 请求申请若干个帧缓冲区,一般为不少于3个
if (-1 == this->ioCtrl(m_nFd, VIDIOC_REQBUFS, &info)) // 向设备申请缓冲区
{
if (EINVAL == errno)
{
LOGERROR("CV4l2CaptureIr::initMmap is error... _IR_SENSOR_DEV_NAME=[%s] does not support memory mapping", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
else
{
LOGERROR("CV4l2CaptureIr::initMmap is error... _IR_SENSOR_DEV_NAME=[%s] VIDIOC_REQBUFS", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
}
//把实际成功申请缓冲区个数的值赋给m_nBuffersCount这个变量,因为在申请的时候可能会修改info.count的值
if ((unsigned int)m_nBuffersCount > info.count)
{
LOGWARNING("CV4l2CaptureIr::initMmap VIDIOC_REQBUFS m_nBuffersCount=[%d] > info.count=[%u]", m_nBuffersCount, info.count);
m_nBuffersCount = info.count;
}
LOGMSG("CV4l2CaptureIr::initMmap VIDIOC_REQBUFS m_nBuffersCount=[%d]", m_nBuffersCount);
// 为这个结构体变量分配内存,这个结构体主要的目的保存的是每一个缓冲帧的地址和大小
// 将多个(m_nBuffersCount)已申请到的缓冲帧映射到应用程序空间,用m_pBuffers指针记录
// 获取每个缓存的信息,并mmap到用户空间
m_pBuffers = (StuV4l2FrameBuffer *)calloc(info.count, sizeof(StuV4l2FrameBuffer));
CHECKI(m_pBuffers);
LOGMSG("CV4l2CaptureIr::initMmap calloc m_pBuffers=[%p] size=[%u]", m_pBuffers, info.count * sizeof(StuV4l2FrameBuffer));
// 映射所有的缓存
for (int nIdx = 0; nIdx < m_nBuffersCount; ++nIdx)
{
/*
* struct v4l2_buffer
* {
* __u32 index; // buffer 序号
* enum v4l2_buf_type type; // buffer 类型
* __u32 bytesused; // buffer 中已使用的字节数
* __u32 flags; // 区分是MMAP 还是USERPTR
* enum v4l2_field field;
* struct timeval timestamp; // 获取第一个字节时的系统时间
* struct v4l2_timecode timecode;
* __u32 sequence; // 队列中的序号
* enum v4l2_memory memory; // IO 方式,被应用程序设置
* union m
* {
* __u32 offset; // 缓冲帧地址,只对MMAP 有效
* unsigned long userptr;
* };
* __u32 length; // 缓冲帧长度
* __u32 input;
* __u32 reserved;
* };
*/
struct v4l2_buffer info;
MEMSET(info);
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
info.memory = V4L2_MEMORY_MMAP;
info.index = nIdx;
// 将申请的内核缓冲区放入视频采集输入队列中排队
if (-1 == this->ioCtrl(m_nFd, VIDIOC_QBUF, &info))
{
LOGERROR("CV4l2CaptureIr::initMmap is error... VIDIOC_QBUF");
return ReturnCode_Error;
}
// 查询帧缓冲区在内核空间中的长度和偏移量
// 查询序号为n_buffers 的缓冲区,得到其起始物理地址和大小
// 获取到对应index的缓存信息,此处主要利用length信息及offset信息来完成后面的mmap操作
// 查询申请到的缓冲区的信息
if (-1 == this->ioCtrl(m_nFd, VIDIOC_QUERYBUF, &info))
{
LOGERROR("CV4l2CaptureIr::initMmap is error... _IR_SENSOR_DEV_NAME=[%s] VIDIOC_QUERYBUF", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
// 转换成相对地址
m_pBuffers[nIdx].unLength = info.length;
/*
* 将这些帧缓冲区从内核空间映射到用户空间,便于应用程序读取 / 处理视频数据;
* void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset);
* addr 映射起始地址,一般为NULL ,让内核自动选择
* length 被映射内存块的长度
* prot 标志映射后能否被读写,其值为PROT_EXEC,PROT_READ,PROT_WRITE, PROT_NONE
* flags 确定此内存映射能否被其他进程共享,MAP_SHARED,MAP_PRIVATE
* fd,offset, 确定被映射的内存地址
* 返回成功映射后的地址,不成功返回MAP_FAILED ((void*)-1);
*
* int munmap(void *addr, size_t length);// 断开映射
* addr 为映射后的地址,length 为映射后的内存长度
*
* 2019-04-13 00:29:24.595 Message: CV4l2CaptureIr::initMmap nIdx=[0] pBuffer=[0x7fbda41000] unLength=[77440] m_nFd=[3] offset=[0]
* 2019-04-13 00:29:24.595 Message: CV4l2CaptureIr::initMmap nIdx=[1] pBuffer=[0x7fbda2e000] unLength=[77440] m_nFd=[3] offset=[77824]
* 2019-04-13 00:29:24.595 Message: CV4l2CaptureIr::initMmap nIdx=[2] pBuffer=[0x7fbda1b000] unLength=[77440] m_nFd=[3] offset=[155648]
* 2019-04-13 00:29:24.595 Message: CV4l2CaptureIr::initMmap nIdx=[3] pBuffer=[0x7fbda08000] unLength=[77440] m_nFd=[3] offset=[233472]
* 分析一下这个结果,帧数据大小为160*242*2 = 77440,而且是平面视频格式,offset是length经过也对齐后的,77440页对齐后大小为77824,
* mmap需要页对齐,所以这里将length进行一个页对齐的操作,通过offset找到对应的buffer及plane的值;
*
*/
// 应用程序通过内存映射,将帧缓冲区的地址映射到用户空间,返回映射后的地址
m_pBuffers[nIdx].pBuffer = mmap(NULL /*addr anywhere */, info.length, PROT_READ | PROT_WRITE /* required */, MAP_SHARED /* recommended */, m_nFd, info.m.offset);
// MAP_FAILED表示mmap没有成功映射,其返回的值
if (MAP_FAILED == m_pBuffers[nIdx].pBuffer)
{
LOGERROR("CV4l2CaptureIr::initMmap _IR_SENSOR_DEV_NAME=[%s] VIDIOC_QUERYBUF", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
LOGMSG("CV4l2CaptureIr::initMmap nIdx=[%d] pBuffer=[%p] unLength=[%d] m_nFd=[%d] offset=[%u]", nIdx, m_pBuffers[nIdx].pBuffer, m_pBuffers[nIdx].unLength, m_nFd, info.m.offset);
}
LOGMSG("CV4l2CaptureIr::initMmap is suc... nTotalCostTm=[%llu]", (currentTime() - nTotalCostTm).count());
return ReturnCode_Success;
}
//
// 初始化视频采集方式(userptr)-用户空间内存指针
INT32 CV4l2CaptureIr::initUserPtr(unsigned int unBufferSize)
{
LOGMSG("CV4l2CaptureIr::initUserPtr");
CHECKI(m_unImageSize > 0);
// 检查m_nFd
CHECKI(m_nFd != INVALID_FD_ID);
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 获取相机配置文件参数
CDaLiIrSensor* pSensor = dynamic_cast<CDaLiIrSensor*>(this->getSensor());
CHECKI(pSensor);
CGptsDevice* pDev = dynamic_cast<CGptsDevice*>(pSensor->getDevice());
CHECKI(pDev);
StuGptsDeviceConfig& devConfRef = pDev->getDevConfig();
// 如果配置文件配置了读取配置文件
m_nBuffersCount = devConfRef.irParam.daLiIrParam.nBuffersCount == 0 ? m_nBuffersCount : devConfRef.irParam.daLiIrParam.nBuffersCount;
LOGMSG("CV4l2CaptureIr::initUserPtr read gpts dev config m_nBuffersCount=[%d]", m_nBuffersCount);
// 向驱动申请帧缓存,v4l2_requestbuffers结构中定义了缓存的数量,驱动会据此申请对应数量的视频缓存。多个缓存可以用于建立FIFO,来提高视频采集的效率
struct v4l2_requestbuffers info;
MEMSET(info);
info.count = m_nBuffersCount;
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
info.memory = V4L2_MEMORY_USERPTR;
if (-1 == this->ioCtrl(m_nFd, VIDIOC_REQBUFS, &info))
{
if (EINVAL == errno)
{
LOGERROR("CV4l2CaptureIr::initUserPtr is error... _IR_SENSOR_DEV_NAME=[%s] does not support user pointer i/o", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
else
{
LOGERROR("CV4l2CaptureIr::initUserPtr is error... _IR_SENSOR_DEV_NAME=[%s] VIDIOC_REQBUFS", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
}
// 用户空间申请内存
m_pBuffers = (StuV4l2FrameBuffer *)calloc(info.count, sizeof(StuV4l2FrameBuffer));
CHECKI(m_pBuffers);
LOGMSG("CV4l2CaptureIr::initUserPtr calloc m_pBuffers=[%p] count=[%u]", m_pBuffers, info.count * sizeof(StuV4l2FrameBuffer));
// 申请用户空间申请内存
for (int nIdx = 0; nIdx < m_nBuffersCount; ++nIdx)
{
m_pBuffers[nIdx].unLength = unBufferSize;
// 申请内存空间
m_pBuffers[nIdx].pBuffer = (void*)malloc(unBufferSize);
if (!m_pBuffers[nIdx].pBuffer)
{
/*
* 注意
* 存在内存泄漏后面优化???
*/
LOGERROR("CV4l2CaptureIr::initUserPtr is error... _IR_SENSOR_DEV_NAME=[%s] Out of memory", _IR_SENSOR_DEV_NAME);
return ReturnCode_Error;
}
// 将应用层申请的内存缓冲区放入视频采集输入队列中排队
struct v4l2_buffer info;
MEMSET(info);
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
info.memory = V4L2_MEMORY_USERPTR;
info.index = nIdx;
info.m.userptr = reinterpret_cast<unsigned long>(m_pBuffers[nIdx].pBuffer);
info.length = m_pBuffers[nIdx].unLength;
if (-1 == this->ioCtrl(m_nFd, VIDIOC_QBUF, &info))
{
LOGERROR("CV4l2CaptureIr::initUserPtr is error... VIDIOC_QBUF nIdx=[%d]", nIdx);
return ReturnCode_Error;
}
LOGMSG("CV4l2CaptureIr::initUserPtr nIdx=[%d] pBuffer=[%p] unLength=[%d]", nIdx, m_pBuffers[nIdx].pBuffer, m_pBuffers[nIdx].unLength);
}
LOGMSG("CV4l2CaptureIr::initUserPtr is suc... nTotalCostTm=[%llu] unBufferSize=[%u]", (currentTime() - nTotalCostTm).count(), unBufferSize);
return ReturnCode_Success;
}
//
// 释放视频采集方式(read)
void CV4l2CaptureIr::deinitRead()
{
LOGMSG("CV4l2CaptureIr::deinitRead");
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 释放内存
if (m_pBuffers)
{
SAFE_FREE(m_pBuffers->pBuffer);
}
// 释放内存
SAFE_FREE(m_pBuffers);
LOGMSG("CV4l2CaptureIr::deinitRead is suc... nTotalCostTm=[%llu]", (currentTime() - nTotalCostTm).count());
return;
}
//
// 释放视频采集方式(mmap)
void CV4l2CaptureIr::deinitMmap()
{
LOGMSG("CV4l2CaptureIr::deinitMmap");
CHECK(m_pBuffers);
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 释放内存映射
for (int nIdx = 0; nIdx < m_nBuffersCount; ++nIdx)
{
if (-1 == munmap(m_pBuffers[nIdx].pBuffer, m_pBuffers[nIdx].unLength))
{
LOGERROR("CV4l2CaptureIr::deinitMmap is error... nIdx=[%d] pBuffer=[%p] unLength=[%u]", nIdx, m_pBuffers[nIdx].pBuffer, m_pBuffers[nIdx].unLength);
continue;
}
LOGMSG("CV4l2CaptureIr::deinitMmap nIdx=[%d] pBuffer=[%p] unLength=[%u]", nIdx, m_pBuffers[nIdx].pBuffer, m_pBuffers[nIdx].unLength);
m_pBuffers[nIdx].pBuffer = NULL;
m_pBuffers[nIdx].unLength = 0;
}
// 向驱动申请帧缓冲的请求,里面包含申请的个数 count、type、memory都要应用程序设置.
struct v4l2_requestbuffers info;
MEMSET(info);
info.count = 0; // 缓冲区内缓冲帧的数目
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; // 缓冲帧数据格式
info.memory = V4L2_MEMORY_MMAP; // 区别是内存映射还是用户指针方式,在这里是内存映射
if (-1 == this->ioCtrl(m_nFd, VIDIOC_REQBUFS, &info)) // 向设备申请缓冲区
{
if (EINVAL == errno)
{
LOGERROR("CV4l2CaptureIr::deinitMmap is error... _IR_SENSOR_DEV_NAME=[%s] does not support memory mapping", _IR_SENSOR_DEV_NAME);
}
else
{
LOGERROR("CV4l2CaptureIr::deinitMmap is error... _IR_SENSOR_DEV_NAME=[%s] VIDIOC_REQBUFS unable to release buffers", _IR_SENSOR_DEV_NAME);
}
}
LOGMSG("CV4l2CaptureIr::deinitMmap VIDIOC_REQBUFS release buffers info.count=[0] info.type=[V4L2_BUF_TYPE_VIDEO_CAPTURE] info.memory=[V4L2_MEMORY_MMAP]");
// buffer数量清0
m_nBuffersCount = 0;
// 释放内存
SAFE_FREE(m_pBuffers);
LOGMSG("CV4l2CaptureIr::deinitMmap is suc... nTotalCostTm=[%llu] m_pBuffers=[%p] m_nBuffersCount=[%d]", (currentTime() - nTotalCostTm).count(), m_pBuffers, m_nBuffersCount);
return;
}
//
// 初始化视频采集方式(userptr)-用户空间内存指针
void CV4l2CaptureIr::deinitUserPtr()
{
LOGMSG("CV4l2CaptureIr::deinitUserPtr");
CHECK(m_pBuffers);
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 向驱动申请帧缓冲的请求,里面包含申请的个数 count、type、memory都要应用程序设置.
struct v4l2_requestbuffers info;
MEMSET(info);
info.count = 0; // 缓冲区内缓冲帧的数目
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; // 缓冲帧数据格式
info.memory = V4L2_MEMORY_MMAP; // 区别是内存映射还是用户指针方式,在这里是内存映射
if (-1 == this->ioCtrl(m_nFd, VIDIOC_REQBUFS, &info)) // 向设备申请缓冲区
{
if (EINVAL == errno)
{
LOGERROR("CV4l2CaptureIr::deinitUserPtr is error... _IR_SENSOR_DEV_NAME=[%s] does not support memory mapping", _IR_SENSOR_DEV_NAME);
}
else
{
LOGERROR("CV4l2CaptureIr::deinitUserPtr is error... _IR_SENSOR_DEV_NAME=[%s] VIDIOC_REQBUFS unable to release buffers", _IR_SENSOR_DEV_NAME);
}
}
for (int nIdx = 0; nIdx < m_nBuffersCount; ++nIdx)
{
SAFE_FREE(m_pBuffers[nIdx].pBuffer);
}
// buffer数量清0
m_nBuffersCount = 0;
// 释放内存
SAFE_FREE(m_pBuffers);
LOGMSG("CV4l2CaptureIr::deinitUserPtr is suc... nTotalCostTm=[%llu]", (currentTime() - nTotalCostTm).count());
return;
}
//
// 启动视频采集后,驱动程序开始采集一帧数据,把采集的数据放入视频采集输入队列的第一个帧缓冲区,
// 一帧数据采集完成,也就是第一个帧缓冲区存满一帧数据后,驱动程序将该帧缓冲区移至视频采集
// 输出队列,等待应用程序从输出队列取出。驱动程序接下来采集下一帧数据,放入第二个帧缓冲区,
// 同样帧缓冲区存满下一帧数据后,被放入视频采集输出队列。所以在开始采集视频数据之前,
// 我们需要将申请的缓冲区放入视频采集输入队列中排队,这样视频采集输入队列中才有帧缓冲区,
// 这样也才能保存我们才采集的数据
// 开始采集数据
INT32 CV4l2CaptureIr::startCapture()
{
LOGMSG("CV4l2CaptureIr::startCapture");
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 检查m_nFd
CHECKI(m_nFd != INVALID_FD_ID);
//int nIdx;
enum v4l2_buf_type unBuffType;
/*
* 注意
* 1. 每次VIDIOC_STREAMON之前需要向驱动申请资源
* 2. 每次VIDIOC_STREAMOFF之后需要释放资源
*/
// 申请数据流资源
switch (m_unIoMethod)
{
case V4L2_IO_METHOD_READ:
{
/* Nothing to do. */
// 初始化视频采集方式(read)
CHECKI(this->initRead(m_unImageSize) == ReturnCode_Success);
}
break;
case V4L2_IO_METHOD_MMAP:
{
/*
* 缓冲区处理好之后,就可以开始获取数据了
* 1. 启动 / 停止数据流
* 2. VIDIOC_STREAMON, VIDIOC_STREAMOFF
* 3. int ioctl(intfd, int request, const int *argp);
* 4. argp 为流类型指针,如V4L2_BUF_TYPE_VIDEO_CAPTURE
* 5. 在开始之前,还应当把缓冲帧放入缓冲队列:
* 6. VIDIOC_QBUF// 把帧放入队列
* 7. VIDIOC_DQBUF// 从队列中取出帧
* 原文链接:https ://blog.csdn.net/u010661782/article/details/49020695
* 把四个缓冲帧放入队列,并启动数据流
* 将缓冲帧放入队列
*/
// 需要重新执行video buffer添加到队列中,否则下次循环的时候获取不到
// 初始化视频采集方式(mmap)
CHECKI(this->initMmap() == ReturnCode_Success);
}
break;
case V4L2_IO_METHOD_USERPTR:
{
// 初始化视频采集方式(userptr)
CHECKI(this->initUserPtr(m_unImageSize) == ReturnCode_Success);
}
break;
default:
{
LOGERROR("CV4l2CaptureIr::startCapture is error... m_unIoMethod=[%d]", m_unIoMethod);
return ReturnCode_Error;
}
break;
}
// 开始视频流数据的采集
unBuffType = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == this->ioCtrl(m_nFd, VIDIOC_STREAMON, &unBuffType))
{
LOGERROR("CV4l2CaptureIr::startCapture is error... VIDIOC_STREAMON m_unIoMethod=[%d]", m_unIoMethod);
return ReturnCode_Error;
}
LOGMSG("CV4l2CaptureIr::startCapture is suc... VIDIOC_STREAMON nTotalCostTm=[%llu] unBuffType=[%u]", (currentTime() - nTotalCostTm).count(), unBuffType);
return ReturnCode_Success;
}
//
// 停止数据采集
INT32 CV4l2CaptureIr::stopCapture()
{
LOGMSG("CV4l2CaptureIr::stopCapture");
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
// 检查m_nFd
CHECKI(m_nFd != INVALID_FD_ID);
// 停止视频的采集
enum v4l2_buf_type unBuffType = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == this->ioCtrl(m_nFd, VIDIOC_STREAMOFF, &unBuffType))
{
LOGERROR("CV4l2CaptureIr::stopCapture is error... VIDIOC_STREAMOFF m_unIoMethod=[V4L2_IO_METHOD_MMAP]");
return ReturnCode_Error;
}
LOGMSG("CV4l2CaptureIr::stopCapture VIDIOC_STREAMOFF m_unIoMethod=[V4L2_IO_METHOD_MMAP]");
// 释放数据流资源
switch (m_unIoMethod)
{
case V4L2_IO_METHOD_READ:
{
/* Nothing to do. */
this->deinitRead();
}
break;
case V4L2_IO_METHOD_MMAP:
{
/* 注意需要进行资源释放
* 具体请看内核v4l2驱动代码
* 中vb2_core_streamoff函数中
* Cancel will pause streaming and remove all buffers from the driver
* and videobuf, effectively returning control over them to userspace.
* Note that we do this even if q->streaming == 0: if you prepare or
* queue buffers, and then call streamoff without ever having called
* streamon, you would still expect those buffers to be returned to
* their normal dequeued state.
*/
// 释放视频采集方式(mmap)
this->deinitMmap();
}
break;
case V4L2_IO_METHOD_USERPTR:
{
// 释放视频采集方式(mmap)
this->deinitUserPtr();
}
break;
default:
{
LOGERROR("CV4l2CaptureIr::stopCapture is error... m_unIoMethod=[%d]", m_unIoMethod);
return ReturnCode_Error;
}
break;
}
LOGMSG("CV4l2CaptureIr::stopCapture is suc... nTotalCostTm=[%llu] unBuffType=[%u] m_unIoMethod=[%u]", (currentTime() - nTotalCostTm).count(), unBuffType, m_unIoMethod);
return ReturnCode_Success;
}
//
// 获取图像数据帧,由于帧率很低大概只有1所以先不打印错误
INT32 CV4l2CaptureIr::getCaptureFrame()
{
CDaLiIrSensor* pSensor = dynamic_cast<CDaLiIrSensor *>(this->getSensor());
CHECKI(pSensor);
// 执行查询
INT32 nRet = ReturnCode_Error;
struct iav_querydesc queryDescInfo;
memset(&queryDescInfo, 0, sizeof(queryDescInfo));
nRet = this->queryDesc(&queryDescInfo, pSensor->getVincId());
if (nRet != ReturnCode_Success)
{
LOGERROR("CV4l2CaptureIr::getCaptureFrame queryDesc is fail...");
return ReturnCode_Error;
}
// 获取数据
StuV4l2IrFrameInfo info;
memset(&info, 0, sizeof(info));
nRet = this->getData(&info);
if (nRet != ReturnCode_Success &&
nRet != ReturnCode_WouldBlock &&
nRet != ReturnCode_IoCodeError)
{
LOGWARNING("CV4l2CaptureIr::getCaptureFrame getData is warning... nRet=[%d]", nRet);
return ReturnCode_Error;
}
//
// 统计1秒钟帧率(实际是转发包的个数,一个原有包会拆成若干转发包,数量是不相等的)
if ((currentTime() - this->getFpsTimePoint()).count() >= FPS_CNT_TIME_INTERVAL)
{
// 计数清0
this->setFpsCnt(1);
// 更新时间戳
this->getFpsTimePoint() = currentTime();
}
else
{
// 计数加1
this->addFpsCnt(1);
}
return ReturnCode_Success;
}
//
// 获取描述符信息
INT32 CV4l2CaptureIr::queryDesc(void* pQueryDesc, UINT32 unId)
{
// 由于这个类是虚拟的不需要对接硬件设备所以这一步暂且认为可以不做
CHECKI(pQueryDesc);
// 等待耗时统计
//TimePoint nTotalCostTm = currentTime();
int nRet = ReturnCode_Error;
for (;;)
{
fd_set readFds;
struct timeval tv;
FD_ZERO(&readFds);
FD_SET(m_nFd, &readFds);
/* Timeout. */
tv.tv_sec = 2;
tv.tv_usec = 0;
nRet = ::select(m_nFd + 1, &readFds, NULL, NULL, &tv);
if (SOCKET_ERROR == nRet) // 出错
{
if (EINTR == errno)
{
continue;
}
LOGERROR("CV4l2CaptureIr::queryDesc is error... select nRet=[-1]");
return ReturnCode_Error;
}
else if (0 == nRet) //超时
{
LOGERROR("CV4l2CaptureIr::queryDesc is warning... select timeout nRet=[0]");
return ReturnCode_Success;
}
else
{
//LOGMSG("CV4l2CaptureIr::queryDesc select is suc... m_nFd=[%d] nRet=[%d]", m_nFd, nRet);
// 有数据了
return ReturnCode_Success;
}
}
//LOGMSG("CV4l2CaptureIr::queryDesc is suc... costTm=[%llu]", (currentTime() - nTotalCostTm).count());
return ReturnCode_Success;
}
//
// 获取数据
INT32 CV4l2CaptureIr::getData(void* pDataDesc)
{
// LOGMSG("CV4l2CaptureIr::getData");
// 等待耗时统计
//TimePoint nTotalCostTm = currentTime();
CDaLiIrSensor* pSensor = dynamic_cast<CDaLiIrSensor*>(this->getSensor());
CHECKI(pSensor);
CGptsDevice* pDev = dynamic_cast<CGptsDevice *>(pSensor->getDevice());
CHECKI(pDev);
// 检查m_nFd
CHECKI(m_nFd != INVALID_FD_ID);
CHECKI(m_pBuffers);
CGpFrameProcessCenter* pFrameProCenter = NULL;
INT32 nRet = ReturnCode_Success;
CGpFrame* pIrFrame = NULL;
INT32 nIdx = 0;
bool bQBuffFlag = false; // 是否需要入队列
INT32 nImageWidth = pSensor->getFrameWide();
INT32 nImageHeight = pSensor->getFrameHeight();
INT32 nSize = nImageWidth * nImageHeight * 2;
static unsigned int unFrameId = 0;
unsigned char* pBuffer = NULL;
struct v4l2_buffer info;
memset(&info, 0, sizeof(info));
unsigned short* pFrameData = NULL;
unsigned short* pBufferData = NULL;
TimePoint nTotalCostTm = currentTime();
int nRow = 0;
int nCol = 0;
int nValue1 = 0;
int nValue2 = 0;
int nValue3 = 0;
int nValue4 = 0;
int nValue5 = 0;
int nValue6 = 0;
int nValue7 = 0;
int nValue8 = 0;
int nValue9 = 0;
int nValue10 = 0;
switch (m_unIoMethod)
{
case V4L2_IO_METHOD_READ:
{
if (-1 == read(m_nFd, m_pBuffers->pBuffer, m_pBuffers->unLength))
{
switch (errno)
{
case EAGAIN:
{
LOGWARNING("CV4l2CaptureIr::getData V4L2_IO_METHOD_READ is warning... EAGAIN errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_WouldBlock;
}
case EIO:
{
return ReturnCode_IoCodeError;
}
/* fall through */
default:
{
LOGWARNING("CV4l2CaptureIr::getData V4L2_IO_METHOD_READ is warning... errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_Error;
}
}
}
// 获取数据指针
pBuffer = (unsigned char*)m_pBuffers->pBuffer;
// 设置size
nSize = m_pBuffers->unLength;
// 图像处理
goto process_image;
}
break;
case V4L2_IO_METHOD_MMAP:
{
// 取出FIFO缓存中已经采样的帧缓存
memset(&info, 0, sizeof(info));
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
info.memory = V4L2_MEMORY_MMAP;
info.index = 0; // 此值由下面的ioctl返回
// 应用程序从视频采集输出队列中取出已经采集好数据的帧缓冲区
if (-1 == this->ioCtrl(m_nFd, VIDIOC_DQBUF, &info))
{
switch (errno)
{
case EAGAIN:
{
//LOGWARNING("CV4l2CaptureIr::getData V4L2_IO_METHOD_MMAP is warning... EAGAIN errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_WouldBlock;
}
case EIO:
{
LOGWARNING("CV4l2CaptureIr::getData V4L2_IO_METHOD_MMAP is warning... EIO errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_IoCodeError;
}
/* fall through */
default:
{
LOGWARNING("CV4l2CaptureIr::getData V4L2_IO_METHOD_MMAP is error... VIDIOC_DQBUF errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_Error;
}
}
}
CHECKI(nIdx < m_nBuffersCount);
// 设置标识位
bQBuffFlag = true;
// 获取数据指针
pBuffer = (unsigned char*)m_pBuffers[info.index].pBuffer;
// 设置size bytesused=0不知道为啥???
nSize = info.bytesused;
// 图像处理
goto process_image;
}
break;
case V4L2_IO_METHOD_USERPTR:
{
/*
* 从缓冲区取出一个缓冲帧并处理
*/
MEMSET(info);
info.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
info.memory = V4L2_MEMORY_USERPTR;
// 应用程序从视频采集输出队列中取出已经采集好数据的帧缓冲区
if (-1 == this->ioCtrl(m_nFd, VIDIOC_DQBUF, &info))
{
switch (errno)
{
case EAGAIN:
{
return ReturnCode_WouldBlock;
}
case EIO:
{
return ReturnCode_IoCodeError;
}
default:
{
LOGWARNING("CV4l2CaptureIr::getData V4L2_IO_METHOD_USERPTR is error... VIDIOC_DQBUF errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_Error;
}
}
}
for (nIdx = 0; nIdx < m_nBuffersCount; ++nIdx)
{
if (info.m.userptr == (unsigned long)m_pBuffers[nIdx].pBuffer && info.length == m_pBuffers[nIdx].unLength)
{
break;
}
}
CHECKI(nIdx < m_nBuffersCount);
// 设置标识位
bQBuffFlag = true;
// 获取数据指针
pBuffer = (unsigned char*)info.m.userptr;
// 设置size
nSize = info.bytesused;
// 图像处理
goto process_image;
}
break;
default:
{
LOGERROR("CV4l2CaptureIr::getData m_unIoMethod is error... m_unIoMethod=[%d]", m_unIoMethod);
return ReturnCode_Error;
}
}
//LOGERROR("CV4l2CaptureIr::getData is error... nTotalCostTm=[%llu]", (currentTime() - nTotalCostTm).count();
return ReturnCode_Error;
process_image:
// 检查buff指针
if (pBuffer == NULL)
{
LOGERROR("CV4l2CaptureIr::getData pBuffer is NULL");
nRet = ReturnCode_Error;
goto queue_buff;
}
// 因为只输出ir数据,伪彩图像不输出,所以需要对nSize进行更新
nSize = _IR_SENSOR_TEMP_IMAGE_WIDTH * _IR_SENSOR_TEMP_IMAGE_HEIGHT * _IR_SENSOR_IMAGE_PIXEL_BIT_WIDE;
// 点云流没有帧率的概念,都是后面组的。而且一个点云的包要拆成若干个gige包转发,这里统计的是转发的包的个数
pIrFrame = new CGpFrame(nSize);
if (pIrFrame == NULL)
{
LOGERROR("CV4l2CaptureIr::getData new CGpFrame pIrFrame is NULL");
nRet = ReturnCode_Error;
goto queue_buff;
}
// 模组默认状态为同时输出伪彩图像和温度图像,输出分辨率为 160×242,伪彩图像在上,温度图像在下,上下拼接同时输出。第 1 行~第 120 行为伪彩图像,第 121 行~第 240 行为温度数据,最后两行为状态信息
// 探测目标
// 的温度测量值 Tc(℃),与组件 0x25 返回的温度量 Vc(灰度)的换算关系为:
// Tc = Vc / 10–273;
// 注意:此处直接应用性能和帧率,待优化
// 获取ir图像的个点温度值
pBufferData = (unsigned short *)(pBuffer + _IR_SENSOR_PSEUDO_COLOR_IMAGE_WIDTH * _IR_SENSOR_PSEUDO_COLOR_IMAGE_HEIGHT * _IR_SENSOR_IMAGE_PIXEL_BIT_WIDE);
pFrameData = (unsigned short *)pIrFrame->getData();
// 等待耗时统计
//nTotalCostTm = currentTime();
// nImageHeight=242 nImageWidth=160 第 121 行~第 240 行为温度数据
for (nRow = _IR_SENSOR_PSEUDO_COLOR_IMAGE_HEIGHT; nRow < _IR_SENSOR_IMAGE_HEIGHT; nRow++)
{
for (nCol = 0; nCol < nImageWidth; nCol += 10)
{
nValue1 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue1;
nValue2 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue2;
nValue3 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue3;
nValue4 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue4;
nValue5 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue5;
nValue6 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue6;
nValue7 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue7;
nValue8 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue8;
nValue9 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue9;
nValue10 = *pBufferData++ / 10 - 273;
*pFrameData++ = nValue10;
//LOGMSG("CV4l2CaptureIr::getData nRow=[%3d] nCol=[%3d] nValue=[%d %d %d %d %d %d %d %d %d %d]", nRow, nCol, nValue1, nValue2, nValue3, nValue4, nValue5, nValue6, nValue7, nValue8, nValue9, nValue10);
}
}
//LOGMSG("CV4l2CaptureIr::getData nTotalCostTm=[%llu]", (currentTime() - nTotalCostTm).count());
pIrFrame->setFrameId(unFrameId >= UINT_MAX ? 0 : unFrameId++);
// 不使用该参数
pIrFrame->setChannelIndex(0);
// 不使用该参数
pIrFrame->setFrameIndex(0);
// 1400*960 -> 14*96
pIrFrame->setFrameCount(1);
// 设置为点云数据流
pIrFrame->setFrameType(FRAME_TYPE_EXT_IR);
// 不使用该参数
pIrFrame->setHdrIndex(0);
// 一个包存成一行
pIrFrame->setWidth(_IR_SENSOR_TEMP_IMAGE_WIDTH);
// 100个包存成一帧
pIrFrame->setHeight(_IR_SENSOR_TEMP_IMAGE_HEIGHT);
pIrFrame->setPixelFormat(PixelFormat_IrExt16);
pIrFrame->setSize(nSize);
pIrFrame->setTimeStamp(0/**((uint64_t *)pData->timestamp)*/);
// 不使用该参数
pIrFrame->setStepCount(0);
//pIrFrame->setData(info->pBuffer, nSize);
// 灰度原始数据进入数据处理中心
pFrameProCenter = dynamic_cast<CGpFrameProcessCenter*>(pDev->getFrameProcessCenter());
if (!pFrameProCenter)
{
LOGERROR("CV4l2CaptureIr::getData pFrameProCenter is NULL");
SAFE_DELETE(pIrFrame);
nRet = ReturnCode_Error;
goto queue_buff;
}
if (pFrameProCenter->distributeProcess(pIrFrame) != ReturnCode_Success)
{
LOGERROR("CV4l2CaptureIr::getData pFrameProCenter->distributeProcess failed");
SAFE_DELETE(pIrFrame);
nRet = ReturnCode_Error;
goto queue_buff;
}
queue_buff:
// 根据返回的buf.index找到对应的mmap映射好的缓存,取出视频数据
// 将视频输出的缓冲帧放回到视频输入的缓冲区中去
// 将刚刚处理完的帧缓冲区重新加入采集队列列尾,这样可以循环采集
if (bQBuffFlag)
{
if(-1 == this->ioCtrl(m_nFd, VIDIOC_QBUF, &info))
{
LOGERROR("CV4l2CaptureIr::getData VIDIOC_QBUF is error... errno=[%d] reason=[%s]", errno, strerror(errno));
return ReturnCode_Error;
}
else
{
//LOGMSG("CV4l2CaptureIr::getData VIDIOC_QBUF is suc... index=[%u] offset=[%u] length=[%u] bytesused=[%u] sequence=[%u] flags=[0x%08x] tv_sec=[%u] tv_usec=[%u]", info.index, info.m.offset, info.length, info.bytesused, info.sequence, info.flags, info.timestamp.tv_sec, info.timestamp.tv_usec);
}
}
//LOGMSG("CV4l2CaptureIr::getData is suc... nTotalCostTm=[%llu] nImageWidth=[%d] nImageHeight=[%d] nSize=[%d] nStrechMax=[%d] nStrechMin=[%d]", (currentTime() - nTotalCostTm).count(), nImageWidth, nImageHeight, nSize, nStrechMax, nStrechMin);
return nRet;
}
//
//
INT32 CV4l2CaptureIr::uvcSet(int nValue)
{
//LOGMSG("CV4l2CaptureIr::uvcSet nValue=[%d]", nValue);
// 等待耗时统计
//TimePoint nTotalCostTm = currentTime();
// 检查m_nFd
CHECKI(m_nFd != INVALID_FD_ID);
/*
* include/linux/video2.h
* struct v4l2_control
* {
* __u32 id;
* __s32 value;
* };
*/
struct v4l2_control info;
MEMSET(info);
info.id = V4L2_CID_ZOOM_ABSOLUTE;
info.value = nValue;
if (ioctl(m_nFd, VIDIOC_S_CTRL, &info) == -1)
{
LOGERROR("CV4l2CaptureIr::uvcSet is error... v4l2_control error nValue=[%d]", nValue);
return ReturnCode_Error;
}
//LOGMSG("CV4l2CaptureIr::uvcSet is suc... nTotalCostTm=[%llu] nValue=[%d]", (currentTime() - nTotalCostTm).count(), nValue);
return ReturnCode_Success;
}
//
// 释放资源
INT32 CV4l2CaptureIr::deinit()
{
// 释放内存
this->deinitVideoDev();
// 关闭文件
this->closeVideoDev();
return ReturnCode_Success;
}
//
// 释放内存
void CV4l2CaptureIr::deinitVideoDev()
{
LOGMSG("CV4l2CaptureIr::deinitVideoDev");
// 等待耗时统计
TimePoint nTotalCostTm = currentTime();
switch (m_unIoMethod)
{
case V4L2_IO_METHOD_READ:
{
this->deinitRead();
}
break;
case V4L2_IO_METHOD_MMAP:
{
this->deinitMmap();
}
break;
case V4L2_IO_METHOD_USERPTR:
{
this->deinitUserPtr();
}
break;
}
LOGMSG("CV4l2CaptureIr::deinitVideoDev is suc... nTotalCostTm=[%llu]", (currentTime() - nTotalCostTm).count());
}
//
// 执行ioctl
INT32 CV4l2CaptureIr::ioCtrl(int nFd, unsigned long int unRequest, void* pArg)
{
// LOGMSG("CV4l2CaptureIr::ioCtrl nFd=[%d] unRequest=[%u]", nFd, unRequest);
// 等待耗时统计
// TimePoint nTotalCostTm = currentTime();
INT32 nRet = ReturnCode_Error;
do
{
nRet = ioctl(nFd, unRequest, pArg);
} while (-1 == nRet && EINTR == errno);
// LOGMSG("CV4l2CaptureIr::ioCtrl is suc... nTotalCostTm=[%llu] nFd=[%d] unRequest=[%u] ", (currentTime() - nTotalCostTm).count(), nFd, unRequest);
return nRet;
}
#endif // _GPTS_PROJECT_FLAG_