[root@localhost input_uvc]# pwd
/opt/FriendlyArm/mini2440/WebCam/mjpg-streamer/mjpg-streamer/plugins/input_uvc
[root@localhost input_uvc]# tree
.
|-- Makefile
|-- dynctrl.c
|-- dynctrl.h
|-- dynctrl.lo
|-- huffman.h
|-- input_uvc.c
|-- input_uvc.so
|-- jpeg_utils.c
|-- jpeg_utils.h
|-- jpeg_utils.lo
|-- uvc_compat.h
|-- uvcvideo.h
|-- v4l2uvc.c
|-- v4l2uvc.h
`-- v4l2uvc.lo
0 directories, 15 files
和WebCam/mjpg-streamer/mjpg-streamer/plugins/input_cmoscamera里类似,主要文件input_uvc.c--- 调用v4l2uvc.c里函数的部分
v4l2uvc.c------与驱动亲密接触的部分
在 v4l2uvc.h里的重要结构:
#define NB_BUFFER 4
struct vdIn {
int fd;
char *videodevice;
char *status;
char *pictName;
struct v4l2_capability cap;
struct v4l2_format fmt;
struct v4l2_buffer buf;
struct v4l2_requestbuffers rb;
void *mem[NB_BUFFER];
unsigned char *tmpbuffer;
unsigned char *framebuffer;
int isstreaming;
int grabmethod;
int width;
int height;
int fps;
int formatIn;
int formatOut;
int framesizeIn;
int signalquit;
int toggleAvi;
int getPict;
int rawFrameCapture;
/* raw frame capture */
unsigned int fileCounter;
/* raw frame stream capture */
unsigned int rfsFramesWritten;
unsigned int rfsBytesWritten;
/* raw stream capture */
FILE *captureFile;
unsigned int framesWritten;
unsigned int bytesWritten;
int framecount;
int recordstart;
int recordtime;
};
mjpg-streamer/plugins/input_uvc/input_uvc.c
int input_init(input_parameter *param) {
char *argv[MAX_ARGUMENTS]={NULL}, *dev = "/dev/video0", *s;
int argc=1, width=640, height=480, fps=5, format=V4L2_PIX_FMT_MJPEG, i;
in_cmd_type led = IN_CMD_LED_AUTO;
/* initialize the mutes variable */
if( pthread_mutex_init(&controls_mutex, NULL) != 0 ) {
IPRINT("could not initialize mutex variable\n");
exit(EXIT_FAILURE);
}
/* convert the single parameter-string to an array of strings */
argv[0] = INPUT_PLUGIN_NAME;
if ( param->parameter_string != NULL && strlen(param->parameter_string) != 0 ) {
char *arg=NULL, *saveptr=NULL, *token=NULL;
arg=(char *)strdup(param->parameter_string);
if ( strchr(arg, ' ') != NULL ) {
token=strtok_r(arg, " ", &saveptr);
if ( token != NULL ) {
argv[argc] = strdup(token);
argc++;
while ( (token=strtok_r(NULL, " ", &saveptr)) != NULL ) {
argv[argc] = strdup(token);
argc++;
if (argc >= MAX_ARGUMENTS) {
IPRINT("ERROR: too many arguments to input plugin\n");
return 1;
}
}
}
}
}
/* show all parameters for DBG purposes */
for (i=0; i<argc; i++) {
DBG("argv[%d]=%s\n", i, argv[i]);
}
/* parse the parameters */
reset_getopt();
while(1) {
int option_index = 0, c=0;
static struct option long_options[] = \
{
{"h", no_argument, 0, 0},
{"help", no_argument, 0, 0},
{"d", required_argument, 0, 0},
{"device", required_argument, 0, 0},
{"r", required_argument, 0, 0},
{"resolution", required_argument, 0, 0},
{"f", required_argument, 0, 0},
{"fps", required_argument, 0, 0},
{"y", no_argument, 0, 0},
{"yuv", no_argument, 0, 0},
{"q", required_argument, 0, 0},
{"quality", required_argument, 0, 0},
{"m", required_argument, 0, 0},
{"minimum_size", required_argument, 0, 0},
{"n", no_argument, 0, 0},
{"no_dynctrl", no_argument, 0, 0},
{"l", required_argument, 0, 0},
{"led", required_argument, 0, 0},
{0, 0, 0, 0}
};
/* parsing all parameters according to the list above is sufficent */
c = getopt_long_only(argc, argv, "", long_options, &option_index);
/* no more options to parse */
if (c == -1) break;
/* unrecognized option */
if (c == '?'){
help();
return 1;
}
/* dispatch the given options */
switch (option_index) {
/* h, help */
case 0:
case 1:
DBG("case 0,1\n");
help();
return 1;
break;
/* d, device */
case 2:
case 3:
DBG("case 2,3\n");
dev = strdup(optarg);
break;
/* r, resolution */
case 4:
case 5:
DBG("case 4,5\n");
width = -1;
height = -1;
/* try to find the resolution in lookup table "resolutions" */
for ( i=0; i < LENGTH_OF(resolutions); i++ ) {
if ( strcmp(resolutions[i].string, optarg) == 0 ) {
width = resolutions[i].width;
height = resolutions[i].height;
}
}
/* done if width and height were set */
if(width != -1 && height != -1)
break;
/* parse value as decimal value */
width = strtol(optarg, &s, 10);
height = strtol(s+1, NULL, 10);
break;
/* f, fps */
case 6:
case 7:
DBG("case 6,7\n");
fps=atoi(optarg);
break;
/* y, yuv */
case 8:
case 9:
DBG("case 8,9\n");
format = V4L2_PIX_FMT_YUYV;
break;
/* q, quality */
case 10:
case 11:
DBG("case 10,11\n");
format = V4L2_PIX_FMT_YUYV;
gquality = MIN(MAX(atoi(optarg), 0), 100);
break;
/* m, minimum_size */
case 12:
case 13:
DBG("case 12,13\n");
minimum_size = MAX(atoi(optarg), 0);
break;
/* n, no_dynctrl */
case 14:
case 15:
DBG("case 14,15\n");
dynctrls = 0;
break;
/* l, led */
case 16:
case 17:
DBG("case 16,17\n");
if ( strcmp("on", optarg) == 0 ) {
led = IN_CMD_LED_ON;
} else if ( strcmp("off", optarg) == 0 ) {
led = IN_CMD_LED_OFF;
} else if ( strcmp("auto", optarg) == 0 ) {
led = IN_CMD_LED_AUTO;
} else if ( strcmp("blink", optarg) == 0 ) {
led = IN_CMD_LED_BLINK;
}
break;
default:
DBG("default case\n");
help();
return 1;
}
}
/* keep a pointer to the global variables */
pglobal = param->global;
/* allocate webcam datastructure */
videoIn = malloc(sizeof(struct vdIn));
if ( videoIn == NULL ) {
IPRINT("not enough memory for videoIn\n");
exit(EXIT_FAILURE);
}
memset(videoIn, 0, sizeof(struct vdIn));
/* display the parsed values */
IPRINT("Using V4L2 device.: %s\n", dev);
IPRINT("Desired Resolution: %i x %i\n", width, height);
IPRINT("Frames Per Second.: %i\n", fps);
IPRINT("Format............: %s\n", (format==V4L2_PIX_FMT_YUYV)?"YUV":"MJPEG");
if ( format == V4L2_PIX_FMT_YUYV )
IPRINT("JPEG Quality......: %d\n", gquality);
/* open video device and prepare data structure */
if (init_videoIn(videoIn, dev, width, height, fps, format, 1) < 0) {
IPRINT("init_VideoIn failed\n");
closelog();
exit(EXIT_FAILURE);
}
/*
* recent linux-uvc driver (revision > ~#125) requires to use dynctrls
* for pan/tilt/focus/...
* dynctrls must get initialized
*/
if (dynctrls)
initDynCtrls(videoIn->fd);
/*
* switch the LED according to the command line parameters (if any)
*/
input_cmd(led, 0);
return 0;
}
从char *argv[MAX_ARGUMENTS]={NULL}, *dev = "/dev/video0", *s;
int argc=1, width=640, height=480, fps=5, format=V4L2_PIX_FMT_MJPEG,i;
可知默认设备 /dev/video0,默认格式V4L2_PIX_FMT_MJPEG
从
/* y, yuv */
case 8:
case 9:
DBG("case 8,9\n");
format = V4L2_PIX_FMT_YUYV;
break;
可知指定 -y后格式是V4L2_PIX_FMT_YUYV
线程函数cam_thread中有调用如下图像格式转换函数
/*
* If capturing in YUV mode convert to JPEG now.
* This compression requires many CPU cycles, so try to avoid YUV format.
* Getting JPEGs straight from the webcam, is one of the major advantages of
* Linux-UVC compatible devices.
*/
if (videoIn->formatIn == V4L2_PIX_FMT_YUYV) {
DBG("compressing frame\n");
pglobal->size = compress_yuyv_to_jpeg(videoIn, pglobal->buf, videoIn->framesizeIn, gquality);//格式化图像数据塞进pglobal->buf
}
else {
DBG("copying frame\n");
pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer, videoIn->buf.bytesused);//图像数据塞进pglobal->buf
}
可以直接从WebCam中获取MJPEG格式的数据就不需要转换了------zc0301pl支持。
在初始化函数中,有几行代码,使用ioctl设置zc0301pl的输出格式
/*
* set format in
*/
memset(&vd->fmt, 0, sizeof(struct v4l2_format));
vd->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->fmt.fmt.pix.width = vd->width;
vd->fmt.fmt.pix.height = vd->height;
vd->fmt.fmt.pix.pixelformat = vd->formatIn;//指定输出格式,已指定是V4L2_PIX_FMT_MJPEG
vd->fmt.fmt.pix.field = V4L2_FIELD_ANY;
ret = ioctl(vd->fd, VIDIOC_S_FMT, &vd->fmt);//
if (ret < 0) {
perror("Unable to set format");
goto fatal;
}
----所以此处实际上调用memcpy_picture()
/*************************************************************************************************************/
此处使用v4l2接口来操作usb摄像头 ,当然是因为驱动是使用v4l2标准实现的。
v4l2的知识参考
http://blog.csdn.net/Sasoritattoo/article/details/6225486
http://blog.csdn.net/hongtao_liu/article/details/5894089
http://v4l2spec.bytesex.org/spec-single/v4l2.html
下面根据v4l2操作摄像头的流程理清一下数据获取的思路
init_v4l2()函数里面顺序使用到的ioctl:
ioctl(vd->fd, VIDIOC_QUERYCAP, &vd->cap);//查询视频设备的功能
ioctl(vd->fd, VIDIOC_S_FMT, &vd->fmt);//设置视频设备的视频数据格式
ioctl(vd->fd, VIDIOC_S_PARM, setfps);//number offrames per second
ioctl(vd->fd, VIDIOC_REQBUFS, &vd->rb);//请求V4L2驱动分配视频缓冲区(若干个,组成队列),位于内核空间
ioctl(vd->fd, VIDIOC_QUERYBUF, &vd->buf);//查询上一步分配的V4L2的视频缓冲区的相关信息,为下一步的mmap()。
mmap();
ioctl(vd->fd, VIDIOC_QBUF, &vd->buf);//指示驱动使用哪个缓冲区进入队列来获取数据(投放一个空的视频缓冲区到视频缓冲区输入队列中 ;)
while(!stop){
ioctl(vd->fd, VIDIOC_STREAMON, &type);//启动摄像头抓取图像数据
ioctl(vd->fd, VIDIOC_DQBUF, &vd->buf);//将刚刚用VIDIOC_QBUF投进队列的缓冲区从队列中踢出(因为已经从这个缓冲区中读到了数据)
memcpy(vd->tmpbuffer, vd->mem[vd->buf.index], vd->buf.bytesused);//拷贝图像到目的地址(在用户空间,因为已经mmap过)
ioctl(vd->fd, VIDIOC_QBUF, &vd->buf);//指示驱动使用哪个缓冲区进入队列来获取数据(投放一个空的视频缓冲区到视频缓冲区输入队列中 ;)
}
mjpg-streamer/plugins/input_uvc/v4l2uvc.c
static int init_v4l2(struct vdIn *vd)
{
int i;
int ret = 0;
if ((vd->fd = open(vd->videodevice, O_RDWR)) == -1) {
perror("ERROR opening V4L interface");
return -1;
}
memset(&vd->cap, 0, sizeof(struct v4l2_capability));
ret = ioctl(vd->fd, VIDIOC_QUERYCAP, &vd->cap);
//查询视频设备的功能
if (ret < 0) {
fprintf(stderr, "Error opening device %s: unable to query device.\n", vd->videodevice);
goto fatal;
}
if ((vd->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
fprintf(stderr, "Error opening device %s: video capture not supported.\n",
vd->videodevice);
goto fatal;;
}
if (vd->grabmethod) {
if (!(vd->cap.capabilities & V4L2_CAP_STREAMING)) {
fprintf(stderr, "%s does not support streaming i/o\n", vd->videodevice);
goto fatal;
}
} else {
if (!(vd->cap.capabilities & V4L2_CAP_READWRITE)) {
fprintf(stderr, "%s does not support read i/o\n", vd->videodevice);
goto fatal;
}
}
/*
* set format in
*/
memset(&vd->fmt, 0, sizeof(struct v4l2_format));
vd->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->fmt.fmt.pix.width = vd->width;
vd->fmt.fmt.pix.height = vd->height;
vd->fmt.fmt.pix.pixelformat = vd->formatIn;
vd->fmt.fmt.pix.field = V4L2_FIELD_ANY;
ret = ioctl(vd->fd, VIDIOC_S_FMT, &vd->fmt);
//设置视频设备的视频数据格式
if (ret < 0) {
perror("Unable to set format");
goto fatal;
}
if ((vd->fmt.fmt.pix.width != vd->width) ||
(vd->fmt.fmt.pix.height != vd->height)) {
fprintf(stderr, " format asked unavailable get width %d height %d \n", vd->fmt.fmt.pix.width, vd->fmt.fmt.pix.height);
vd->width = vd->fmt.fmt.pix.width;
vd->height = vd->fmt.fmt.pix.height;
/*
* look the format is not part of the deal ???
*/
// vd->formatIn = vd->fmt.fmt.pix.pixelformat;
}
//此if块注释掉即可。
/*
* set framerate
*/
struct v4l2_streamparm *setfps;
setfps = (struct v4l2_streamparm *) calloc(1, sizeof(struct v4l2_streamparm));
memset(setfps, 0, sizeof(struct v4l2_streamparm));
setfps->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
setfps->parm.capture.timeperframe.numerator = 1;
setfps->parm.capture.timeperframe.denominator = vd->fps;
ret = ioctl(vd->fd, VIDIOC_S_PARM, setfps);
//设置每秒几帧
/*
* request buffers
*/
memset(&vd->rb, 0, sizeof(struct v4l2_requestbuffers));
vd->rb.count = NB_BUFFER;
vd->rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->rb.memory = V4L2_MEMORY_MMAP;
ret = ioctl(vd->fd, VIDIOC_REQBUFS, &vd->rb);
//请求V4L2驱动分配NB_BUFFER个视频缓冲区,组成视频缓冲区队列。位于内核空间。
if (ret < 0) {
perror("Unable to allocate buffers");
goto fatal;
}
/*
* map the buffers
*/
for (i = 0; i < NB_BUFFER; i++) {
memset(&vd->buf, 0, sizeof(struct v4l2_buffer));
vd->buf.index = i;
vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->buf.memory = V4L2_MEMORY_MMAP;
ret = ioctl(vd->fd, VIDIOC_QUERYBUF, &vd->buf);
/*
查询已经分配的V4L2的视频缓冲区的相关信息,包括视频缓冲区的使用状态、在内核空间的偏移地址、缓冲区长度等。在应用程序设计中通过调VIDIOC_QUERYBUF来
获取内核空间的视频缓冲区信息,然后调用函数mmap把内核空间地址映射到用户空间,这样应用程序才能够访问位于内核空间的视频缓冲区。
*/
if (ret < 0) {
perror("Unable to query buffer");
goto fatal;
}
if (debug)
fprintf(stderr, "length: %u offset: %u\n", vd->buf.length, vd->buf.m.offset);
vd->mem[i] = mmap(0 /* start anywhere */ ,
vd->buf.length, PROT_READ, MAP_SHARED, vd->fd,mmap
vd->buf.m.offset);
//映射,NB_BUFFER个视频缓冲区
if (vd->mem[i] == MAP_FAILED) {
perror("Unable to map buffer");
goto fatal;
}
if (debug)
fprintf(stderr, "Buffer mapped at address %p.\n", vd->mem[i]);
}
/*
* Queue the buffers.
*/
for (i = 0; i < NB_BUFFER; ++i) {
memset(&vd->buf, 0, sizeof(struct v4l2_buffer));
vd->buf.index = i;
vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->buf.memory = V4L2_MEMORY_MMAP;
ret = ioctl(vd->fd, VIDIOC_QBUF, &vd->buf);
/*
指示驱动使用视频缓冲区队列的哪一个视频缓冲区作为采集的数据的存放位置 ;
参数说明:参数类型为V4L2缓冲区数据结构类型 struct v4l2_buffer ;
返回值说明: 执行成功时,函数返回值为 0;函数执行成功后,指定的视频缓冲区进入视频输入队列(内核空间),在启动视频设备拍摄图像时,相应的视频数据被保存到
视频输入队列相应的视频缓冲区中。
*/
if (ret < 0) {
perror("Unable to queue buffer");
goto fatal;;
}
}
return 0;
fatal:
return -1;
}
uvcGrab()函数里面顺序使用到的ioctl
ioctl(vd->fd, VIDIOC_DQBUF, &vd->buf);
ioctl(vd->fd, VIDIOC_QBUF, &vd->buf);
mjpg-streamer/plugins/input_uvc/v4l2uvc.c
int uvcGrab(struct vdIn *vd)
{
#define HEADERFRAME1 0xaf
int ret;
if (!vd->isstreaming)
if (video_enable(vd))
goto err;
memset(&vd->buf, 0, sizeof(struct v4l2_buffer));
vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vd->buf.memory = V4L2_MEMORY_MMAP;
ret = ioctl(vd->fd, VIDIOC_DQBUF, &vd->buf);
if (ret < 0) {
perror("Unable to dequeue buffer");
goto err;
}
switch (vd->formatIn) {
case V4L2_PIX_FMT_MJPEG:
if (vd->buf.bytesused <= HEADERFRAME1) { /* Prevent crash
* on empty image */
fprintf(stderr, "Ignoring empty buffer ...\n");
return 0;
}
memcpy(vd->tmpbuffer, vd->mem[vd->buf.index], vd->buf.bytesused);
if (debug)
fprintf(stderr, "bytes in used %d \n", vd->buf.bytesused);
break;
case V4L2_PIX_FMT_YUYV:
if (vd->buf.bytesused > vd->framesizeIn)
memcpy (vd->framebuffer, vd->mem[vd->buf.index], (size_t) vd->framesizeIn);
else
memcpy (vd->framebuffer, vd->mem[vd->buf.index], (size_t) vd->buf.bytesused);
break;
default:
goto err;
break;
}
ret = ioctl(vd->fd, VIDIOC_QBUF, &vd->buf);
if (ret < 0) {
perror("Unable to requeue buffer");
goto err;
}
return 0;
err:
vd->signalquit = 0;
return -1;
}