以下是icvRetrieveFrameCAM_V4L(CvCaptureCAM_V4L* capture,int)的源码,在Opencv3.1.0源码中cap_v4l.cpp中第1438行,被cap_v4l.cpp中第1794行retrieveFrame(int)调用,retrieveFrame(int)被cap.cpp第100行cvRetrieveFrame( CvCapture* capture, int idx )调用,cvRetrieveFrame( CvCapture* capture, int idx )被cap.cpp中第620行bool VideoCapture::retrieve(OutputArray image, int channel)调用,而bool VideoCapture::retrieve(OutputArray image, int channel)即为bool VideoCapture::read(OutputArray image)中调用的读取图像的函数。
static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) {
/* Now get what has already been captured as a IplImage return */
// we need memory iff convert_rgb is true
bool recreate_frame = capture->frame_allocated != capture->convert_rgb;
if (!capture->convert_rgb) {
// for mjpeg streams the size might change in between, so we have to change the header
recreate_frame += capture->frame.imageSize != (int)capture->buffers[capture->bufferIndex].length;
}
if(recreate_frame) {
// printf("realloc %d %zu\n", capture->frame.imageSize, capture->buffers[capture->bufferIndex].length);
if(capture->frame_allocated)
cvFree(&capture->frame.imageData);
v4l2_create_frame(capture);
}
if(!capture->convert_rgb) {
capture->frame.imageData = (char*)capture->buffers[capture->bufferIndex].start;
return &capture->frame;
}
switch (capture->palette)
{
case V4L2_PIX_FMT_BGR24:
memcpy((char *)capture->frame.imageData,
(char *)capture->buffers[capture->bufferIndex].start,
capture->frame.imageSize);
break;
case V4L2_PIX_FMT_YVU420:
yuv420p_to_rgb24(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)(capture->buffers[capture->bufferIndex].start),
(unsigned char*)capture->frame.imageData);
break;
case V4L2_PIX_FMT_YUV411P:
yuv411p_to_rgb24(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)(capture->buffers[capture->bufferIndex].start),
(unsigned char*)capture->frame.imageData);
break;
#ifdef HAVE_JPEG
case V4L2_PIX_FMT_MJPEG:
case V4L2_PIX_FMT_JPEG:
if (!mjpeg_to_rgb24(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)(capture->buffers[capture->bufferIndex]
.start),
capture->buffers[capture->bufferIndex].length,
&capture->frame))
return 0;
break;
#endif
case V4L2_PIX_FMT_YUYV:
yuyv_to_rgb24(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)(capture->buffers[capture->bufferIndex].start),
(unsigned char*)capture->frame.imageData);
break;
case V4L2_PIX_FMT_UYVY:
uyvy_to_rgb24(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)(capture->buffers[capture->bufferIndex].start),
(unsigned char*)capture->frame.imageData);
break;
case V4L2_PIX_FMT_SBGGR8:
bayer2rgb24(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)capture->buffers[capture->bufferIndex].start,
(unsigned char*)capture->frame.imageData);
break;
case V4L2_PIX_FMT_SN9C10X:
sonix_decompress_init();
sonix_decompress(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)capture->buffers[capture->bufferIndex].start,
(unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start);
bayer2rgb24(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start,
(unsigned char*)capture->frame.imageData);
break;
case V4L2_PIX_FMT_SGBRG8:
sgbrg2rgb24(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start,
(unsigned char*)capture->frame.imageData);
break;
case V4L2_PIX_FMT_RGB24:
rgb24_to_rgb24(capture->form.fmt.pix.width,
capture->form.fmt.pix.height,
(unsigned char*)capture->buffers[(capture->bufferIndex+1) % capture->req.count].start,
(unsigned char*)capture->frame.imageData);
break;
}
return(&capture->frame);
}
VideoCapture的声明如下:
class CV_EXPORTS_W VideoCapture
{
public:
/** @brief
@note In C API, when you finished working with video, release CvCapture structure with
cvReleaseCapture(), or use Ptr\<CvCapture\> that calls cvReleaseCapture() automatically in the
destructor.
*/
CV_WRAP VideoCapture();
/** @overload
@param filename name of the opened video file (eg. video.avi) or image sequence (eg.
img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
*/
CV_WRAP VideoCapture(const String& filename);
/** @overload
@param filename name of the opened video file (eg. video.avi) or image sequence (eg.
img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
@param apiPreference preferred Capture API to use. Can be used to enforce a specific reader
implementation if multiple are available: e.g. CAP_FFMPEG or CAP_IMAGES
*/
CV_WRAP VideoCapture(const String& filename, int apiPreference);
/** @overload
@param index = camera_id + domain_offset (CAP_*). id of the video capturing device to open. If there is a single
camera connected, just pass 0. Advanced Usage: to open Camera 1 using the MS Media Foundation API: index = 1 + CAP_MSMF
*/
CV_WRAP VideoCapture(int index);
virtual ~VideoCapture();
/** @brief Open video file or a capturing device for video capturing
@param filename name of the opened video file (eg. video.avi) or image sequence (eg.
img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
The methods first call VideoCapture::release to close the already opened file or camera.
*/
CV_WRAP virtual bool open(const String& filename);
/** @overload
@param index = camera_id + domain_offset (CAP_*). id of the video capturing device to open. If there is a single
camera connected, just pass 0. Advanced Usage: to open Camera 1 using the MS Media Foundation API: index = 1 + CAP_MSMF
*/
CV_WRAP virtual bool open(int index);
/** @brief Returns true if video capturing has been initialized already.
If the previous call to VideoCapture constructor or VideoCapture::open succeeded, the method returns
true.
*/
CV_WRAP virtual bool isOpened() const;
/** @brief Closes video file or capturing device.
The methods are automatically called by subsequent VideoCapture::open and by VideoCapture
destructor.
The C function also deallocates memory and clears \*capture pointer.
*/
CV_WRAP virtual void release();
/** @brief Grabs the next frame from video file or capturing device.
The methods/functions grab the next frame from video file or camera and return true (non-zero) in
the case of success.
The primary use of the function is in multi-camera environments, especially when the cameras do not
have hardware synchronization. That is, you call VideoCapture::grab() for each camera and after that
call the slower method VideoCapture::retrieve() to decode and get frame from each camera. This way
the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames
from different cameras will be closer in time.
Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the
correct way of retrieving data from it is to call VideoCapture::grab first and then call
VideoCapture::retrieve one or more times with different values of the channel parameter. See
<https://github.com/Itseez/opencv/tree/master/samples/cpp/openni_capture.cpp>
*/
CV_WRAP virtual bool grab();
/** @brief Decodes and returns the grabbed video frame.
The methods/functions decode and return the just grabbed frame. If no frames has been grabbed
(camera has been disconnected, or there are no more frames in video file), the methods return false
and the functions return NULL pointer.
@note OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame return image stored inside the video
capturing structure. It is not allowed to modify or release the image! You can copy the frame using
:ocvcvCloneImage and then do whatever you want with the copy.
*/
CV_WRAP virtual bool retrieve(OutputArray image, int flag = 0);
virtual VideoCapture& operator >> (CV_OUT Mat& image);
virtual VideoCapture& operator >> (CV_OUT UMat& image);
/** @brief Grabs, decodes and returns the next video frame.
The methods/functions combine VideoCapture::grab and VideoCapture::retrieve in one call. This is the
most convenient method for reading video files or capturing data from decode and return the just
grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more
frames in video file), the methods return false and the functions return NULL pointer.
@note OpenCV 1.x functions cvRetrieveFrame and cv.RetrieveFrame return image stored inside the video
capturing structure. It is not allowed to modify or release the image! You can copy the frame using
:ocvcvCloneImage and then do whatever you want with the copy.
*/
CV_WRAP virtual bool read(OutputArray image);
/** @brief Sets a property in the VideoCapture.
@param propId Property identifier. It can be one of the following:
- **CAP_PROP_POS_MSEC** Current position of the video file in milliseconds.
- **CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the
film, 1 - end of the film.
- **CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
- **CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
- **CAP_PROP_FPS** Frame rate.
- **CAP_PROP_FOURCC** 4-character code of codec.
- **CAP_PROP_FRAME_COUNT** Number of frames in the video file.
- **CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .
- **CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
- **CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
- **CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
- **CAP_PROP_SATURATION** Saturation of the image (only for cameras).
- **CAP_PROP_HUE** Hue of the image (only for cameras).
- **CAP_PROP_GAIN** Gain of the image (only for cameras).
- **CAP_PROP_EXPOSURE** Exposure (only for cameras).
- **CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted
to RGB.
- **CAP_PROP_WHITE_BALANCE** Currently unsupported
- **CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
by DC1394 v 2.x backend currently)
@param value Value of the property.
*/
CV_WRAP virtual bool set(int propId, double value);
/** @brief Returns the specified VideoCapture property
@param propId Property identifier. It can be one of the following:
- **CAP_PROP_POS_MSEC** Current position of the video file in milliseconds or video
capture timestamp.
- **CAP_PROP_POS_FRAMES** 0-based index of the frame to be decoded/captured next.
- **CAP_PROP_POS_AVI_RATIO** Relative position of the video file: 0 - start of the
film, 1 - end of the film.
- **CAP_PROP_FRAME_WIDTH** Width of the frames in the video stream.
- **CAP_PROP_FRAME_HEIGHT** Height of the frames in the video stream.
- **CAP_PROP_FPS** Frame rate.
- **CAP_PROP_FOURCC** 4-character code of codec.
- **CAP_PROP_FRAME_COUNT** Number of frames in the video file.
- **CAP_PROP_FORMAT** Format of the Mat objects returned by retrieve() .
- **CAP_PROP_MODE** Backend-specific value indicating the current capture mode.
- **CAP_PROP_BRIGHTNESS** Brightness of the image (only for cameras).
- **CAP_PROP_CONTRAST** Contrast of the image (only for cameras).
- **CAP_PROP_SATURATION** Saturation of the image (only for cameras).
- **CAP_PROP_HUE** Hue of the image (only for cameras).
- **CAP_PROP_GAIN** Gain of the image (only for cameras).
- **CAP_PROP_EXPOSURE** Exposure (only for cameras).
- **CAP_PROP_CONVERT_RGB** Boolean flags indicating whether images should be converted
to RGB.
- **CAP_PROP_WHITE_BALANCE** Currently not supported
- **CAP_PROP_RECTIFICATION** Rectification flag for stereo cameras (note: only supported
by DC1394 v 2.x backend currently)
@note When querying a property that is not supported by the backend used by the VideoCapture
class, value 0 is returned.
*/
CV_WRAP virtual double get(int propId) const;
/** @overload
@param filename name of the opened video file (eg. video.avi) or image sequence (eg.
img_%02d.jpg, which will read samples like img_00.jpg, img_01.jpg, img_02.jpg, ...)
@param apiPreference preferred Capture API to use. Can be used to enforce a specific reader
implementation if multiple are available: e.g. CAP_FFMPEG or CAP_IMAGES
The methods first call VideoCapture::release to close the already opened file or camera.
*/
CV_WRAP virtual bool open(const String& filename, int apiPreference);
protected:
Ptr<CvCapture> cap;
Ptr<IVideoCapture> icap;
};
成员cap为CvCapture。
CvCaptureCAM_V4L的声明如下:
struct CvCaptureCAM_V4L : public CvCapture
{
int deviceHandle;
int bufferIndex;
int FirstCapture;
char *memoryMap;
IplImage frame;
__u32 palette;
int index;
int width, height;
__u32 fps;
bool convert_rgb;
bool frame_allocated;
/* V4L2 variables */
buffer buffers[MAX_V4L_BUFFERS + 1];
v4l2_capability cap;
v4l2_input inp;
v4l2_format form;
v4l2_crop crop;
v4l2_cropcap cropcap;
v4l2_requestbuffers req;
v4l2_buf_type type;
v4l2_queryctrl queryctrl;
timeval timestamp;
/* V4L2 control variables */
Range focus, brightness, contrast, saturation, hue, gain, exposure;
bool open(int _index);
virtual double getProperty(int) const;
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
Range getRange(int property_id) const {
switch (property_id) {
case CV_CAP_PROP_BRIGHTNESS:
return brightness;
case CV_CAP_PROP_CONTRAST:
return contrast;
case CV_CAP_PROP_SATURATION:
return saturation;
case CV_CAP_PROP_HUE:
return hue;
case CV_CAP_PROP_GAIN:
return gain;
case CV_CAP_PROP_EXPOSURE:
return exposure;
case CV_CAP_PROP_FOCUS:
return focus;
case CV_CAP_PROP_AUTOFOCUS:
return Range(0, 1);
default:
return Range(0, 255);
}
}
virtual ~CvCaptureCAM_V4L();
};
CvCaptureCAM_V4L继承自CvCapture,可见V4L驱动的摄像头的VideoCapture使用cap而非icap(icap.empty()返回true)。cap_v4l.cpp的第1566行static double
icvGetPropertyCAM_V4L (const CvCaptureCAM_V4L* capture,int property_id )如下:
static double icvGetPropertyCAM_V4L (const CvCaptureCAM_V4L* capture,
int property_id ) {
{
v4l2_format form;
form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_FMT, &form)) {
/* display an error message, and return an error code */
perror ("VIDIOC_G_FMT");
return -1;
}
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
return form.fmt.pix.width;
case CV_CAP_PROP_FRAME_HEIGHT:
return form.fmt.pix.height;
case CV_CAP_PROP_FOURCC:
case CV_CAP_PROP_MODE:
return capture->palette;
case CV_CAP_PROP_FORMAT:
return CV_MAKETYPE(CV_8U, capture->frame.nChannels);
case CV_CAP_PROP_CONVERT_RGB:
return capture->convert_rgb;
}
if(property_id == CV_CAP_PROP_FPS) {
v4l2_streamparm sp = v4l2_streamparm();
sp.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (ioctl(capture->deviceHandle, VIDIOC_G_PARM, &sp) < 0){
fprintf(stderr, "VIDEOIO ERROR: V4L: Unable to get camera FPS\n");
return -1;
}
return sp.parm.capture.timeperframe.denominator / (double)sp.parm.capture.timeperframe.numerator;
}
/* initialize the control structure */
if(property_id == CV_CAP_PROP_POS_MSEC) {
if (capture->FirstCapture) {
return 0;
} else {
return 1000 * capture->timestamp.tv_sec + ((double) capture->timestamp.tv_usec) / 1000;
}
}
__u32 v4l2id = capPropertyToV4L2(property_id);
if(v4l2id == __u32(-1)) {
fprintf(stderr,
"VIDEOIO ERROR: V4L2: getting property #%d is not supported\n",
property_id);
return -1;
}
v4l2_control control = {v4l2id, 0};
if (-1 == ioctl (capture->deviceHandle, VIDIOC_G_CTRL,
&control)) {
fprintf( stderr, "VIDEOIO ERROR: V4L2: ");
switch (property_id) {
case CV_CAP_PROP_BRIGHTNESS:
fprintf (stderr, "Brightness");
break;
case CV_CAP_PROP_CONTRAST:
fprintf (stderr, "Contrast");
break;
case CV_CAP_PROP_SATURATION:
fprintf (stderr, "Saturation");
break;
case CV_CAP_PROP_HUE:
fprintf (stderr, "Hue");
break;
case CV_CAP_PROP_GAIN:
fprintf (stderr, "Gain");
break;
case CV_CAP_PROP_EXPOSURE:
fprintf (stderr, "Exposure");
break;
case CV_CAP_PROP_AUTOFOCUS:
fprintf (stderr, "Autofocus");
break;
case CV_CAP_PROP_FOCUS:
fprintf (stderr, "Focus");
break;
}
fprintf (stderr, " is not supported by your device\n");
return -1;
}
/* get the min/max values */
Range range = capture->getRange(property_id);
/* all was OK, so convert to 0.0 - 1.0 range, and return the value */
return ((double)control.value - range.start) / range.size();
}
};