一、V4L2编程代码实现
1.头文件
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include <linux/videodev2.h>
2.打开设备
int fd = open("/dev/video0", O_RDWR);
if(fd < 0)
{
perror("open fail");
return -1;
}
3.获取设备支持格式
使用的函数:
int ioctl (int __fd, unsigned long int __request, …)
可以通过命令宏查看 ioctl第三参数的类
比如:VIDIOC_ENUM_FMT对应的是struct v4l2_fmtdesc
struct v4l2_fmtdesc {
__u32 index; /* Format number */
__u32 type; /* enum v4l2_buf_type */
__u32 flags;
__u8 description[32]; /* Description string */
__u32 pixelformat; /* Format fourcc */
__u32 reserved[4];
};
enum v4l2_buf_type {
V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
V4L2_BUF_TYPE_VIDEO_OUTPUT = 2,
V4L2_BUF_TYPE_VIDEO_OVERLAY = 3,
V4L2_BUF_TYPE_VBI_CAPTURE = 4,
...
};
例:
struct v4l2_fmtdesc vfmts;
vfmts.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
int ret = ioctl(fd, VIDIOC_ENUM_FMT, &vfmts);
if(ret < 0)
{
perror("获取设备支持格式VIDIOC_ENUM_FMT失败");
return -1;
}
printf("index = %d\n", vfmts.index);
printf("%s\n", vfmts.description);
4.设置设备采集格式VIDIOC_S_FMT
#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format)
struct v4l2_format {
__u32 type;
union {
struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
struct v4l2_pix_format_mplane pix_mp; /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */
struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */
struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */
struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
struct v4l2_sdr_format sdr; /* V4L2_BUF_TYPE_SDR_CAPTURE */
__u8 raw_data[200]; /* user-defined */
} fmt;
};
struct v4l2_pix_format {
__u32 width;
__u32 height;
__u32 pixelformat;
__u32 field; /* enum v4l2_field */
__u32 bytesperline; /* for padding, zero if unused */
__u32 sizeimage;
__u32 colorspace; /* enum v4l2_colorspace */
__u32 priv; /* private data, depends on pixelformat */
__u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
__u32 ycbcr_enc; /* enum v4l2_ycbcr_encoding */
__u32 quantization; /* enum v4l2_quantization */
__u32 xfer_func; /* enum v4l2_xfer_func */
};
设置例:
struct v4l2_format fmat;
fmat.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmat.fmt.pix.width = 640;
fmat.fmt.pix.height = 480;
fmat.fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG;//V4L2_PIX_FMT_YUYV
int ret = ioctl(fd, VIDIOC_S_FMT, &fmat);
if(ret < 0)
{
perror("v4l2_format set error");
return -1;
}
获取当前格式例:
memset(&fmat, 0, sizeof(fmat));
fmat.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = ioctl(fd, VIDIOC_G_FMT, &fmat);
if(ret < 0)
{
perror("v4l2_format get error");
return -1;
}
printf("%d:%d\n", fmat.fmt.pix.width,fmat.fmt.pix.height);
if(V4L2_PIX_FMT_MJPEG == fmat.fmt.pix.pixelformat)
{
printf("设置mjpegok\n");
}
5.分配内核队列空间(申请内核缓冲区)VIDIOC_REQBUFS
#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers)
struct v4l2_requestbuffers {
__u32 count;
__u32 type; /* enum v4l2_buf_type */
__u32 memory; /* enum v4l2_memory */
__u32 reserved[2];
};
enum v4l2_memory {
V4L2_MEMORY_MMAP = 1,
V4L2_MEMORY_USERPTR = 2,
V4L2_MEMORY_OVERLAY = 3,
V4L2_MEMORY_DMABUF = 4,
};
例:
struct v4l2_requestbuffers reqbuffer;
reqbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
reqbuffer.count = 4;//申请缓冲区--不超过5
reqbuffer.memory = V4L2_MEMORY_MMAP;
ret = ioctl(fd, VIDIOC_REQBUFS, &reqbuffer);
if(ret < 0)
{
perror("v4l2_requestbuffers request error");
return -1;
}
6.映射队列空间到用户空间VIDIOC_QUERYBUF , VIDIOC_QBUF
#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer)
#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer)
#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer)
struct v4l2_buffer {
__u32 index;
__u32 type;
__u32 bytesused;
__u32 flags;
__u32 field;
struct timeval timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
/* memory location */
__u32 memory;
union {
__u32 offset;
unsigned long userptr;
struct v4l2_plane *planes;
__s32 fd;
} m;
__u32 length;
__u32 reserved2;
__u32 reserved;
};
例:
struct v4l2_buffer v4l2buffer;
unsigned char *mmps[4];
unsigned long mmpsize[4];
for(int i=0; i<4; i++)
{
v4l2buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
v4l2buffer.index = i;
ret = ioctl(fd, VIDIOC_QUERYBUF, &v4l2buffer);
if(ret < 0)
{
perror("v4l2_requestbuffers request error");
return -1;
}
//映射
mmps[i] = mmap(NULL,v4l2buffer.length, PROT_READ|PROT_WRITE, MAP_SHARED,fd,
v4l2buffer.m.offset);
mmpsize[i] = v4l2buffer.length;
if(mmps[i] == (void *)-1)
{
perror("mmap fail");
return -1;
}
//放回队列
ret = ioctl(fd, VIDIOC_QBUF, &v4l2buffer);
if(ret < 0)
{
perror("mmap vidioc_qbuf");
return -1;
}
}
7.开始采集VIDIOC_STREAMON
#define VIDIOC_STREAMON _IOW('V', 18, int)
#define VIDIOC_STREAMOFF _IOW('V', 19, int)
这个命令对应数据可以使用前面用到的结构体对应type
int type = V4L2_BUF_TYPE_VIDEO_CAPTURE
例:
int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = ioctl(fd, VIDIOC_STREAMON, &type);
if(ret < 0)
{
perror("start fail");
return -1;
}
8.读数据–上锁 VIDIOC_DQBUF
memset(&v4l2buffer, 0, sizeof(v4l2buffer));
v4l2buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = ioctl(fd, VIDIOC_DQBUF, &v4l2buffer);
if(ret < 0)
{
perror("拿一帧数据失败");
return -1;
}
读取数据:
//mmps[v4l2buffer.index];
FILE *file = fopen("my.jpg", "w+");
fwrite(mmps[v4l2buffer.index], v4l2buffer.length, 1, file);
fclose(file);
9.读数据–解锁VIDIOC_QBUF
ret = ioctl(fd, VIDIOC_QBUF, &v4l2buffer);
if(ret < 0)
{
perror("放一帧数据失败");
return -1;
}
10.停止采集VIDIOC_STREAMOFF
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = ioctl(fd, VIDIOC_STREAMOFF, &type);
if(ret < 0)
{
perror("start fail");
return -1;
}
11.释放映射空间
for(int i=0;i<4;i++)
{
if(mmps[i] == (void*)-1) continue;
munmap(mmps[i],mmpsize[i]);
}
12.关闭设备
close(fd);
二、YUV格式
数据格式:MJPEG , RGB, YUV(格式集合)
Y–亮度, UV–色彩
分段存储方式(H264):Y1Y2Y3Y4Y5Y6Y7,U1U2U3U4U5U6U7,V1V2V3V4V5V6V7
交错式存储:Y1U1V1 Y2U2V2 Y3U3V3
1.存储方式
(1)YUV 4:4:4
YUV三个信道的抽样率相同,因此在生成的图像里,每个象素的三个分量信息完整(每个分量通常8比特),经过8比特量化之后,未经压缩的每个像素占用3个字节。
下面的四个像素为: [Y0 U0 V0] [Y1 U1 V1] [Y2 U2 V2] [Y3 U3 V3]
存放的码流为: Y0 U0 V0 Y1 U1 V1 Y2 U2 V2 Y3 U3 V3
(2)YUV 4:2:2(YUYV)
每个色差信道的抽样率是亮度信道的一半,所以水平方向的色度抽样率只是4:4:4的一半。对非压缩的8比特量化的图像来说,每个由两个水平方向相邻的像素组成的宏像素需要占用4字节内存。
下面的四个像素为:[Y0 U0 V0] [Y1 U1 V1] [Y2 U2 V2] [Y3 U3 V3]
存放的码流为:Y0 U0 Y1 V1 Y2 U2 Y3 V3
映射出像素点为:[Y0 U0 V1] [Y1 U0 V1] [Y2 U2 V3] [Y3 U2 V3]
(3)YUV 4:1:1
4:1:1的色度抽样,是在水平方向上对色度进行4:1抽样。对于低端用户和消费类产品这仍然是可以接受的。对非压缩的8比特量化的视频来说,每个由4个水平方向相邻的像素组成的宏像素需要占用6字节内存。
下面的四个像素为: [Y0 U0 V0] [Y1 U1 V1] [Y2 U2 V2] [Y3 U3 V3]
存放的码流为: Y0 U0 Y1 Y2 V2 Y3
映射出像素点为:[Y0 U0 V2] [Y1 U0 V2] [Y2 U0 V2] [Y3 U0 V2]
(4)YUV4:2:0
4:2:0并不意味着只有Y,Cb而没有Cr分量。它指得是对每行扫描线来说,只有一种色度分量以2:1的抽样率存储。相邻的扫描行存储不同的色度分量,也就是说,如果一行是4:2:0的话,下一行就是4:0:2,再下一行是4:2:0…以此类推。对每个色度分量来说,水平方向和竖直方向的抽样率都是2:1,所以可以说色度的抽样率是4:1。对非压缩的8比特量化的视频来说,每个由2x2个2行2列相邻的像素组成的宏像素需要占用6字节内存。
下面八个像素为:[Y0 U0 V0] [Y1 U1 V1] [Y2 U2 V2] [Y3 U3 V3][Y5 U5 V5] [Y6 U6 V6] [Y7U7 V7] [Y8 U8 V8]
存放的码流为:Y0 U0 Y1 Y2 U2 Y3 Y5 V5 Y6 Y7 V7 Y8
映射出的像素点为:[Y0 U0 V5] [Y1 U0 V5] [Y2 U2 V7] [Y3 U2 V7]
[Y5 U0 V5] [Y6 U0 V5] [Y7U2 V7] [Y8 U2 V7]
2.YUV格式转化为RGB格式
(1)小数形式,未量化
[YUV] -> [RGB]
R = Y + 1.4075 * (V-128);
G = Y - 0.3455 * (U-128) - 0.7169*(V-128);
B = Y + 1.779 * (U-128);
Y = 0.299*R + 0.587*G + 0.114*B;
U = (B-Y)/1.772;
V = (R-Y)/1.402; (U~(-128-127))
或写为:
Y = 0.299*R + 0.587*G + 0.114*B;
U = -0.169*R - 0.331*G + 0.5 *B ;
V = 0.5 *R - 0.419*G - 0.081*B;
(2)整数形式(减少计算量)未量化
R= Y + ((360 * (V - 128))>>8) ;
G= Y - (( ( 88 * (U - 128) + 184 * (V - 128)) )>>8) ;
B= Y +((455 * (U - 128))>>8) ;
Y = (77*R + 150*G + 29*B)>>8;
U = ((-44*R - 87*G + 131*B)>>8) + 128;
V = ((131*R - 110*G - 21*B)>>8) + 128 ;
(3)量化后的公式( Y~(16,235) U/V ~(16,240) ) 量化
[YUV] -> [RGB]
R = 1.164*Y + 1.596 * V - 222.9
G = 1.164*Y - 0.392 * U - 0.823 * V+ 135.6
B = 1.164*Y + 2.017 * U- 276.8
[RGB] -> [YUV]
Y = 0.257*R' + 0.504*G' + 0.098*B' + 16
U = -0.148*R' - 0.291*G' + 0.439*B' + 128
V = 0.439*R' - 0.368*G' - 0.071*B' + 128
例:
把YUV4:2:2 --> RGB
buffer —流码: Y0U0Y1V1 Y2U2Y3V3
char buffer[640*480*2]; //存储yuv数据
char rgbdata[640*480*3];
int r1, g1, b1;
int r2, g2, b2;
for(int i=0; i<640*480/2; i++)
{
char data[4];
memcpy(data, buffer+i*4, 4);
//Y0U0Y1V1 -->[Y0 U0 V1] [Y1 U0 V1]
unsigned char Y0=data[0];
unsigned char U0=data[1];
unsigned char Y1=data[2];
unsigned char V1=data[3];
r1 = Y0+1.4075*(V1-128); if(r1>255)r1=255; if(r1<0)r1=0;
g1 =Y0- 0.3455 * (U0-128) - 0.7169*(V1-128); if(g1>255)g1=255; if(g1<0)g1=0;
b1 = Y0 + 1.779 * (U0-128); if(b1>255)b1=255; if(b1<0)b1=0;
r2 = Y1+1.4075*(V1-128);if(r2>255)r2=255; if(r2<0)r2=0;
g2 = Y1- 0.3455 * (U0-128) - 0.7169*(V1-128); if(g2>255)g2=255; if(g2<0)g2=0;
b2 = Y1 + 1.779 * (U0-128); if(b2>255)b2=255; if(b2<0)b2=0;
rgbdata[i*6+0]=r1;
rgbdata[i*6+1]=g1;
rgbdata[i*6+2]=b1;
rgbdata[i*6+3]=r2;
rgbdata[i*6+4]=g2;
rgbdata[i*6+5]=b2;
}
三、jpeg解码—libjpeg
1.ubuntu版本
在ubuntu要安装libjpeg8-dev
sudo apt install libjpeg8-dev
gcc -o video_linux video_linux.c -ljpeg
2.开发板版本
装备arm版本的libjpeg库 把libjpeg目录拷贝到工程当前目录下
arm-linux-gcc -o video_linux video_linux.c -L./libjpeg -I./libjpeg -ljpeg
3.解码流程
int read_JPEG_file (const char *jpegData, char *rgbdata, int size)
{
struct jpeg_error_mgr jerr;
struct jpeg_decompress_struct cinfo;
cinfo.err = jpeg_std_error(&jerr);
//1.创建解码对象并且初始化
jpeg_create_decompress(&cinfo);
//2.准备解码的数据
//jpeg_stdio_src(&cinfo, infile);
jpeg_mem_src(&cinfo,jpegData, jpegsize);
//3.获取jpeg图片文件的参数
(void) jpeg_read_header(&cinfo, TRUE);
/* Step 4: set parameters for decompression */
//5.开始解码
(void) jpeg_start_decompress(&cinfo);
//6.申请存储一行数据的内存空间
int row_stride = cinfo.output_width * cinfo.output_components;
unsigned char *buffer = malloc(row_stride);
int i=0;
while (cinfo.output_scanline < cinfo.output_height)
{
//printf("****%d\n",i);
(void) jpeg_read_scanlines(&cinfo, &buffer, 1);
memcpy(rgbdata+i*640*3, buffer,row_stride);
//memcpy(rgbdata+cinfo.output_scanline*row_stride,buffer,row_stride);
i++;
}
//7.解码完成
(void) jpeg_finish_decompress(&cinfo);
//8.释放解码对象
jpeg_destroy_decompress(&cinfo);
return 1;
}
insmod: ERROR: could not insert module mmp_drv.ko: Invalid module format是说这个问题?
是因为系统中有多个版本的 内核
可能要重新编译驱动可以解决 make clean, make