Linux 通过摄像头获取画面 C语言

 

前提在已经存在 /dev/video0 设备,大概流程可以在其他博客看到,本文给出了可以直接运行的 c 语言完整代码。作者使用 Jetson nano 加 罗技 c922 摄像头测试

之后会有文章尝试控制各种曝光参数以及编码为视频流

 

#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>              /* low-level i/o */
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <linux/types.h>
#include <sys/mman.h>

#include <bits/types/struct_timespec.h>
#include <bits/types/struct_timeval.h>

#include <linux/videodev2.h>


int main(int argc, char* argv[])
{
	int fd = open("/dev/video0", O_RDWR);
	if (0)
	{	
		//输出所有支持的格式
		struct v4l2_fmtdesc fmtdesc;
		fmtdesc.index = 0;
		fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		printf("Support format:\n");
		while (ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc) != -1)

		{
			printf("\t%d.%s\n", fmtdesc.index + 1, fmtdesc.description);
			fmtdesc.index++;
		}
		printf("enum done\n");
	}
	if (0)
	{
		//查看当前的输出格式
		struct v4l2_format fmt;
		fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		ioctl(fd, VIDIOC_G_FMT, &fmt);

		printf("Current data format information : \n\twidth: % d\n\theight: % d\n", fmt.fmt.pix.width, fmt.fmt.pix.height);

		struct v4l2_fmtdesc fmtdesc2;
		fmtdesc2.index = 0;
		fmtdesc2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		while (ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc2) != -1)
		{
			if (fmtdesc2.pixelformat & fmt.fmt.pix.pixelformat)
			{
				printf("\tformat: % s\n", fmtdesc2.description);
				break;
			}
			fmtdesc2.index++;
		}
	}
	{
		//设置视频格式
		struct v4l2_format fmt;
		memset(&fmt, 0, sizeof(fmt));
		fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		fmt.fmt.pix.width = 1280;
		fmt.fmt.pix.height = 720;
		fmt.fmt.pix.pixelformat = 0;
		fmt.fmt.pix.field = V4L2_FIELD_ANY;
		//设置设备捕获视频的格式 
		if (ioctl(fd, VIDIOC_S_FMT, &fmt) < 0)
		{
			printf("set format failed\n");
			close(fd);
			return 0;
		}
		//如果摄像头不支持我们设置的分辨率格式,则 fmt.fmt.pix.width 会被修改,所以此处建议再次检查 fmt.fmt.pix. 的各种信息

		//向驱动申请帧缓存
		int CAP_BUF_NUM = 4;
		struct v4l2_requestbuffers req;
		memset(&req, 0, sizeof(req));
		req.count = CAP_BUF_NUM;  //申请一个拥有四个缓冲帧的缓冲区
		req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		req.memory = V4L2_MEMORY_MMAP;
		if (ioctl(fd, VIDIOC_REQBUFS, &req) < 0)
		{
			if (EINVAL == errno)
			{
				printf(" does not support memory mapping\n");
				close(fd);
				return 0;
			}
			else
			{
				printf("does not support memory mapping, unknow error\n");
				close(fd);
				return 0;
			}
		}
		else
		{
			printf("alloc success\n");
		}
		if (req.count < CAP_BUF_NUM)
		{
			printf("Insufficient buffer memory\n");
			close(fd);
			return 0;
		}
		else
		{
			printf("get %d bufs\n", req.count);
		}

		//将帧缓存与本地内存关联
		typedef struct VideoBuffer {   //定义一个结构体来映射每个缓冲帧
			void* start;
			size_t length;
		} VideoBuffer;
		VideoBuffer* buffers = calloc(req.count, sizeof(*buffers));
		struct v4l2_buffer buf;
		for (int numBufs = 0; numBufs < req.count; numBufs++) {//映射所有的缓存
			memset(&buf, 0, sizeof(buf));
			buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
			buf.memory = V4L2_MEMORY_MMAP;
			buf.index = numBufs;
			if (ioctl(fd, VIDIOC_QUERYBUF, &buf) == -1) {//获取到对应index的缓存信息,此处主要利用length信息及offset信息来完成后面的mmap操作。
				printf("unexpect error %d\n", numBufs);
				free(buffers);
				close(fd);
				return 0;
			}

			buffers[numBufs].length = buf.length;
			// 转换成相对地址
			buffers[numBufs].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, buf.m.offset); // #include <sys/mman.h>
			if (buffers[numBufs].start == MAP_FAILED) {
				printf("%d map failed errno %d\n", numBufs, errno);
				free(buffers);
				close(fd);
				return 0;
			}
			//addr 映射起始地址,一般为NULL ,让内核自动选择
			//prot 标志映射后能否被读写,其值为PROT_EXEC,PROT_READ,PROT_WRITE, PROT_NONE
			//flags 确定此内存映射能否被其他进程共享,MAP_SHARED,MAP_PRIVATE
			//fd,offset, 确定被映射的内存地址 返回成功映射后的地址,不成功返回MAP_FAILED ((void*)-1)
			//int munmap(void* addr, size_t length);// 最后记得断开映射

			//把缓冲帧加入缓冲队列
			if (ioctl(fd, VIDIOC_QBUF, &buf) < 0)
			{
				printf("add buf to queue failed %d\n", numBufs);
				free(buffers);
				close(fd);
				return 0;
			}
		}

		int type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		/* 打开设备视频流 */
		if (ioctl(fd, VIDIOC_STREAMON, &type) < 0)
		{
			printf("stream open failed\n");
			free(buffers);
			close(fd);
			return 0;
		}

		int franeCount = 9;
		while (franeCount--)
		{
			struct v4l2_buffer capture_buf;
			memset(&capture_buf, 0, sizeof(capture_buf));
			capture_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
			capture_buf.memory = V4L2_MEMORY_MMAP;
			/* 将已经捕获好视频的内存拉出已捕获视频的队列 */
			if (ioctl(fd, VIDIOC_DQBUF, &capture_buf) < 0)
			{
				printf("get frame failed %d\n", franeCount);
				break;
			}
			else
			{
				//long long secc = capture_buf.timestamp.tv_sec;
				//long long secc2 = capture_buf.timestamp.tv_usec;
				//printf("timestamp %lld  %lld", secc, secc2);
				//handle frame
				{
					FILE* f = fopen("/tftpboot/yuv.yuv", "ab");
					int wt = fwrite(buffers[capture_buf.index].start, 1, buffers[capture_buf.index].length, f);
					printf("wt %d\n", wt);
					fclose(f);
				}
				printf("get %d frame success\n", franeCount);

				//把用完的帧重新插回队列
				if (ioctl(fd, VIDIOC_QBUF, &capture_buf) == -1) {
					printf("insert buf failed %d\n", franeCount);
					break;
				}
			}
		}

		//清理资源
		int ret = ioctl(fd, VIDIOC_STREAMOFF, &type);
		for (int i = 0; i < CAP_BUF_NUM; i++)
		{
			munmap(buffers[i].start, buffers[i].length);
		}
		free(buffers);
		close(fd);
	}
    return 0;
}

 

  • 5
    点赞
  • 36
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
下面是一个完整的C语言代码示例,使用FFmpeg库在Linux上将摄像头流与视频水印合并: ```c #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include <sys/ioctl.h> #include <linux/videodev2.h> #define WIDTH 640 #define HEIGHT 480 #define FPS 30 #define WATERMARK_FILE "watermark.png" #define OUTPUT_FILE "output.mp4" int main(void) { int fd; struct v4l2_format fmt; struct v4l2_requestbuffers req; struct v4l2_buffer buf; enum v4l2_buf_type type; FILE *pipein; FILE *pipeout; char command[256]; int frame_size = WIDTH * HEIGHT * 3; // 打开摄像头设备 fd = open("/dev/video0", O_RDWR); if (fd == -1) { perror("Error opening video device"); return -1; } // 配置摄像头格式 memset(&fmt, 0, sizeof(fmt)); fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fmt.fmt.pix.width = WIDTH; fmt.fmt.pix.height = HEIGHT; fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24; fmt.fmt.pix.field = V4L2_FIELD_NONE; if (ioctl(fd, VIDIOC_S_FMT, &fmt) == -1) { perror("Error setting video format"); close(fd); return -1; } // 请求摄像头缓冲区 memset(&req, 0, sizeof(req)); req.count = 1; req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; req.memory = V4L2_MEMORY_MMAP; if (ioctl(fd, VIDIOC_REQBUFS, &req) == -1) { perror("Error requesting buffers"); close(fd); return -1; } // 映射摄像头缓冲区到用户空间 memset(&buf, 0, sizeof(buf)); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; buf.index = 0; if (ioctl(fd, VIDIOC_QUERYBUF, &buf) == -1) { perror("Error querying buffer"); close(fd); return -1; } void *buffer_start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, buf.m.offset); if (buffer_start == MAP_FAILED) { perror("Error mapping buffer"); close(fd); return -1; } // 开始摄像头流捕获 type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (ioctl(fd, VIDIOC_STREAMON, &type) == -1) { perror("Error starting streaming"); munmap(buffer_start, buf.length); close(fd); return -1; } // 构建FFmpeg命令 sprintf(command, "ffmpeg -f rawvideo -pixel_format rgb24 -video_size %dx%d -framerate %d -i - -i %s -filter_complex overlay=W-w-10:H-h-10 -c:v libx264 %s", WIDTH, HEIGHT, FPS, WATERMARK_FILE, OUTPUT_FILE); // 打开管道 pipein = popen(command, "w"); if (pipein == NULL) { perror("Error opening pipe for input"); munmap(buffer_start, buf.length); close(fd); return -1; } // 循环读取摄像头帧并写入pipein以合并水印 while (1) { // 从摄像头获取帧 memset(&buf, 0, sizeof(buf)); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; if (ioctl(fd, VIDIOC_QBUF, &buf) == -1) { perror("Error queuing buffer"); break; } // 开始采集帧 if (ioctl(fd, VIDIOC_DQBUF, &buf) == -1) { perror("Error dequeuing buffer"); break; } // 将帧数据写入pipein以合并水印 if (fwrite(buffer_start, 1, frame_size, pipein) != frame_size) { perror("Error writing to pipe"); break; } // 重新将帧放回摄像头缓冲区队列 if (ioctl(fd, VIDIOC_QBUF, &buf) == -1) { perror("Error requeuing buffer"); break; } } // 停止摄像头流捕获 type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (ioctl(fd, VIDIOC_STREAMOFF, &type) == -1) { perror("Error stopping streaming"); } // 释放资源 pclose(pipein); munmap(buffer_start, buf.length); close(fd); return 0; } ``` 在上述代码中,我们通过FFmpeg的`overlay`滤镜将摄像头流与指定的视频水印合并,并输出到指定的文件中。你需要将`/dev/video0`替换为你的摄像头设备文件路径,`WATERMARK_FILE`替换为你的水印文件路径,以及将`OUTPUT_FILE`替换为你想要输出的文件路径。请确保你已经安装了FFmpeg库和相关的编译工具链。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值