和上一篇博客的区别只在于,视频流从读文件变成了读摄像头数据。
定义两个方法,用来获取摄像头信息。
void show_dshow_device(){
AVFormatContext *pFormatCtx = avformat_alloc_context();
AVDictionary* options = NULL;
av_dict_set(&options,"list_devices","true",0);
AVInputFormat *iformat = av_find_input_format("dshow");
printf("Device Info=============\n");
avformat_open_input(&pFormatCtx,"video=dummy",iformat,&options);
printf("========================\n");
}
void show_dshow_device_option(){
AVFormatContext *pFormatCtx = avformat_alloc_context();
AVDictionary* options = NULL;
av_dict_set(&options,"list_options","true",0);
AVInputFormat *iformat = av_find_input_format("dshow");
printf("========Device Option Info======\n");
avformat_open_input(&pFormatCtx,"video=1.3M WebCam",iformat,&options);
printf("================================\n");
}
解释:
"dshow"是DirectShow设备,我笔记本上就是这个设备。这个设备的名字叫“1.3M WebCam”,所在在把数据通过avformat_open_input方法传到AVFormatContext这个结构体里。从而可以从pFormatCtx->streams[i]得到视频流了。
还是把整个工程放进来。
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <windows.h>
#define __STDC_CONSTANT_MACROS
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libavfilter/avfilter.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavutil/avutil.h"
}
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "avutil.lib")
void show_dshow_device(){
AVFormatContext *pFormatCtx = avformat_alloc_context();
AVDictionary* options = NULL;
av_dict_set(&options,"list_devices","true",0);
AVInputFormat *iformat = av_find_input_format("dshow");
printf("Device Info=============\n");
avformat_open_input(&pFormatCtx,"video=dummy",iformat,&options);
printf("========================\n");
}
void show_dshow_device_option(){
AVFormatContext *pFormatCtx = avformat_alloc_context();
AVDictionary* options = NULL;
av_dict_set(&options,"list_options","true",0);
AVInputFormat *iformat = av_find_input_format("dshow");
printf("========Device Option Info======\n");
avformat_open_input(&pFormatCtx,"video=1.3M WebCam",iformat,&options);
printf("================================\n");
}
//定义BMP文件头
#ifndef _WINGDI_
#define _WINGDI_
typedef struct tagBITMAPFILEHEADER {
WORD bfType;
DWORD bfSize;
WORD bfReserved1;
WORD bfReserved2;
DWORD bfOffBits;
} BITMAPFILEHEADER, FAR *LPBITMAPFILEHEADER, *PBITMAPFILEHEADER;
typedef struct tagBITMAPINFOHEADER{
DWORD biSize;
LONG biWidth;
LONG biHeight;
WORD biPlanes;
WORD biBitCount;
DWORD biCompression;
DWORD biSizeImage;
LONG biXPelsPerMeter;
LONG biYPelsPerMeter;
DWORD biClrUsed;
DWORD biClrImportant;
} BITMAPINFOHEADER, FAR *LPBITMAPINFOHEADER, *PBITMAPINFOHEADER;
#endif
//保存BMP文件的函数
void SaveAsBMP (AVFrame *pFrameRGB, int width, int height, int index, int bpp)
{ char buf[5] = {0}; //bmp头
BITMAPFILEHEADER bmpheader;
BITMAPINFOHEADER bmpinfo;
FILE *fp;
char *filename = new char[255]; //文件存放路径,根据自己的修改
sprintf_s(filename,255,"%s_%d.bmp","C:/test",index);
if ( (fp=fopen(filename,"wb+")) == NULL ) {
printf ("open file failed!\n");
return;
}
bmpheader.bfType = 0x4d42;
bmpheader.bfReserved1 = 0;
bmpheader.bfReserved2 = 0;
bmpheader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
bmpheader.bfSize = bmpheader.bfOffBits + width*height*bpp/8;
bmpinfo.biSize = sizeof(BITMAPINFOHEADER);
bmpinfo.biWidth = width;
bmpinfo.biHeight = height;
bmpinfo.biPlanes = 1;
bmpinfo.biBitCount = bpp;
bmpinfo.biCompression = BI_RGB;
bmpinfo.biSizeImage = (width*bpp+31)/32*4*height;
bmpinfo.biXPelsPerMeter = 100;
bmpinfo.biYPelsPerMeter = 100;
bmpinfo.biClrUsed = 0;
bmpinfo.biClrImportant = 0;
fwrite (&bmpheader, sizeof(bmpheader), 1, fp);
fwrite (&bmpinfo, sizeof(bmpinfo), 1, fp);
fwrite (pFrameRGB->data[0], width*height*bpp/8, 1, fp);
fclose(fp);
}
int main ()
{
AVFormatContext *pFormatCtx;
unsigned int i = 0, videoStream = -1;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame,*pFrameRGB;
struct SwsContext *pSwsCtx;
int frameFinished;
int PictureSize;
AVPacket packet;
uint8_t *buf;
av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
//Register Device
avdevice_register_all();
//Show Dshow Device
show_dshow_device();
//Show Device Options
show_dshow_device_option();
AVInputFormat *ifmt=av_find_input_format("dshow");
//Set own video device's name
if(avformat_open_input(&pFormatCtx,"video=1.3M WebCam",ifmt,NULL)!=0){
printf("Couldn't open input stream.\n");
return -1;
}
if(avformat_find_stream_info(pFormatCtx,NULL)<0)
{
printf("Couldn't find stream information.\n");
return -1;
}
//获取视频数据
for(int i=0;i<pFormatCtx->nb_streams;i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
videoStream = i ;
}
if(videoStream == -1){
printf("%s\n","find video stream failed");
exit(1);
}
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec ==NULL){
printf("%d\n","avcode find decoder failed!");
exit(1);
}
//打开解码器
if(avcodec_open2(pCodecCtx,pCodec,NULL)<0){
printf("avcode open failed!\n");
exit(1);
}
//为每帧图像分配内存
pFrame = av_frame_alloc();
pFrameRGB = av_frame_alloc();
if(pFrame==NULL||pFrameRGB==NULL){
printf("av frame alloc failed!\n");
exit(1);
}
//获得帧图大小
PictureSize = avpicture_get_size(AV_PIX_FMT_BGR24,pCodecCtx->width,pCodecCtx->height);
buf = (uint8_t*)av_malloc(PictureSize);
if(buf ==NULL){
printf("av malloc failed!\n");
exit(1);
}
avpicture_fill((AVPicture *)pFrameRGB,buf,AV_PIX_FMT_BGR24,pCodecCtx->width,pCodecCtx->height);
//设置图像转换上下文
pSwsCtx = sws_getContext(pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,pCodecCtx->width,pCodecCtx->height,AV_PIX_FMT_BGR24,SWS_BICUBIC,NULL,NULL,NULL);
i = 0;
while(av_read_frame(pFormatCtx,&packet)>=0){
if(packet.stream_index==videoStream){
//真正的解码
avcodec_decode_video2(pCodecCtx,pFrame,&frameFinished,&packet);
if(frameFinished){
//饭庄图像,否则是上下颠倒的
pFrame->data[0]+=pFrame->linesize[0]*(pCodecCtx->height-1);
pFrame->linesize[0]*=-1;
pFrame->data[1]+=pFrame->linesize[1]*(pCodecCtx->height/2-1);
pFrame->linesize[1]*=-1;
pFrame->data[2]+=pFrame->linesize[2]*(pCodecCtx->header_bits/2-1);
pFrame->linesize[2]*=-1;
//转换图像格式,将解压出来的YUV420P的图像转换为BRG24的图像
sws_scale(pSwsCtx,pFrame->data,pFrame->linesize,0,pCodecCtx->height,pFrameRGB->data,pFrameRGB->linesize);
//保存为bmp图
SaveAsBMP(pFrameRGB,pCodecCtx->width,pCodecCtx->height,i,24);
i++;
}
av_free_packet(&packet);
}
}
sws_freeContext(pSwsCtx);
av_free(pFrame);
av_free(pFrameRGB);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
文章意义在于可以读摄像头并可以对帧进行处理。
跑了几秒钟,保存了很多图片。数了数,一秒钟保存30张。意思是我的摄像头的帧率是30。
源文件下载地址:http://download.csdn.net/download/bless2015/10129278