一.ffmpeg安装
第一步:添加源。
sudo add-apt-repository ppa:djcj/hybrid
第二步:更新源。
sudo apt-get update
第三步:下载安装。
sudo apt-get install ffmpeg
第四步:验证。
sudo ffmpeg -version
二.C语言调用ffmpeg库实现rtsp视频流解析并存储为ppm格式图片
1.参考:
https://stackoverflow.com/questions/10715170/receiving-rtsp-stream-using-ffmpeg-library
2.代码:my_streamer.cpp
#include
#include
#include
#include
#include
extern "C" {
#include
#include
#include
#include
}
int main(int argc, char** argv) {
// Open the initial context variables that are needed
SwsContext *img_convert_ctx;
AVFormatContext* format_ctx = avformat_alloc_context();
AVCodecContext* codec_ctx = NULL;
int video_stream_index;
// Register everything
av_register_all();
avformat_network_init();
//open RTSP
if (avformat_open_input(&format_ctx, "rtsp://134.169.178.187:8554/h264.3gp",
NULL, NULL) != 0) {
return EXIT_FAILURE;
}
if (avformat_find_stream_info(format_ctx, NULL) < 0) {
return EXIT_FAILURE;
}
//search video stream
for (int i = 0; i < format_ctx->nb_streams; i++) {
if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
video_stream_index = i;
}
AVPacket packet;
av_init_packet(&packet);
//open output file
AVFormatContext* output_ctx = avformat_alloc_context();
AVStream* stream = NULL;
int cnt = 0;
//start reading packets from stream and write them to file
av_read_play(format_ctx); //play RTSP
// Get the codec
AVCodec *codec = NULL;
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
exit(1);
}
// Add this to allocate the context by codec
codec_ctx = avcodec_alloc_context3(codec);
avcodec_get_context_defaults3(codec_ctx, codec);
avcodec_copy_context(codec_ctx, format_ctx->streams[video_stream_index]->codec);
std::ofstream output_file;
if (avcodec_open2(codec_ctx, codec, NULL) < 0)
exit(1);
img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height,
codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24,
SWS_BICUBIC, NULL, NULL, NULL);
int size = avpicture_get_size(AV_PIX_FMT_YUV420P, codec_ctx->width,
codec_ctx->height);
uint8_t* picture_buffer = (uint8_t*) (av_malloc(size));
AVFrame* picture = av_frame_alloc();
AVFrame* picture_rgb = av_frame_alloc();
int size2 = avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width,
codec_ctx->height);
uint8_t* picture_buffer_2 = (uint8_t*) (av_malloc(size2));
avpicture_fill((AVPicture *) picture, picture_buffer, AV_PIX_FMT_YUV420P,
codec_ctx->width, codec_ctx->height);
avpicture_fill((AVPicture *) picture_rgb, picture_buffer_2, AV_PIX_FMT_RGB24,
codec_ctx->width, codec_ctx->height);
while (av_read_frame(format_ctx, &packet) >= 0 && cnt < 1000) { //read ~ 1000 frames
std::cout << "1 Frame: " << cnt << std::endl;
if (packet.stream_index == video_stream_index) { //packet is video
std::cout << "2 Is Video" << std::endl;
if (stream == NULL) { //create stream in file
std::cout << "3 create stream" << std::endl;
stream = avformat_new_stream(output_ctx,
format_ctx->streams[video_stream_index]->codec->codec);
avcodec_copy_context(stream->codec,
format_ctx->streams[video_stream_index]->codec);
stream->sample_aspect_ratio =
format_ctx->streams[video_stream_index]->codec->sample_aspect_ratio;
}
int check = 0;
packet.stream_index = stream->id;
std::cout << "4 decoding" << std::endl;
int result = avcodec_decode_video2(codec_ctx, picture, &check, &packet);
std::cout << "Bytes decoded " << result << " check " << check
<< std::endl;
if (cnt > 100) //cnt < 0)
{
sws_scale(img_convert_ctx, picture->data, picture->linesize, 0,
codec_ctx->height, picture_rgb->data, picture_rgb->linesize);
std::stringstream file_name;
file_name << "test" << cnt << ".ppm";
output_file.open(file_name.str().c_str());
output_file << "P3 " << codec_ctx->width << " " << codec_ctx->height
<< " 255\n";
for (int y = 0; y < codec_ctx->height; y++) {
for (int x = 0; x < codec_ctx->width * 3; x++)
output_file
<< (int) (picture_rgb->data[0]
+ y * picture_rgb->linesize[0])[x] << " ";
}
output_file.close();
}
cnt++;
}
av_free_packet(&packet);
av_init_packet(&packet);
}
av_free(picture);
av_free(picture_rgb);
av_free(picture_buffer);
av_free(picture_buffer_2);
av_read_pause(format_ctx);
avio_close(output_ctx->pb);
avformat_free_context(output_ctx);
return (EXIT_SUCCESS);
}
3.依赖库安装
apt install libavformat-dev
apt install libavcodec-dev
apt install libswresample-dev
apt install libswscale-dev
apt install libavutil-dev
sudo apt-get install libsdl1.2-dev
sudo apt-get install libsdl-image1.2-dev
sudo apt-get install libsdl-mixer1.2-dev
sudo apt-get install libsdl-ttf2.0-dev
sudo apt-get install libsdl-gfx1.2-dev
4.编译
g++ -w my_streamer.cpp -o my_streamer $(pkg-config --cflags --libs libavformat libswscale libavcodec libavutil)
5.运行
./my_streamer
三.C语言调用ffmpeg库实现rtsp视频流解析并存储为ppm或jpg格式图片,并通过opencv显示
1.代码:my_streamer.cpp
#include
#include
#include
#include
#include
#include
#include
#include
#include
extern "C" {
#include
#include
#include
#include
}
static void CopyDate(AVFrame *pictureFrame,int width,int height,int time);
//static void SaveFrame_mmp(AVFrame *pictureFrame, int width, int height, int iFrame);
static void SaveFrame_jpg_uselibjpeg(AVFrame *pictureFrame, int width, int height, int iFrame);
int main(int argc, char** argv) {
// Open the initial context variables that are needed
SwsContext *img_convert_ctx;
AVFormatContext* format_ctx = avformat_alloc_context();
AVCodecContext* codec_ctx = NULL;
int video_stream_index;
// Register everything
av_register_all();
avformat_network_init();
//open RTSP
if (avformat_open_input(&format_ctx, "rtsp://192.168.31.100:8656/main",
NULL, NULL) != 0) {
return EXIT_FAILURE;
}
if (avformat_find_stream_info(format_ctx, NULL) < 0) {
return EXIT_FAILURE;
}
//search video stream
for (int i = 0; i < format_ctx->nb_streams; i++) {
if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
video_stream_index = i;
}
AVPacket packet;
av_init_packet(&packet);
//open output file
AVFormatContext* output_ctx = avformat_alloc_context();
AVStream* stream = NULL;
int cnt = 0;
//start reading packets from stream and write them to file
av_read_play(format_ctx); //play RTSP
// Get the codec
AVCodec *codec = NULL;
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
exit(1);
}
// Add this to allocate the context by codec
codec_ctx = avcodec_alloc_context3(codec);
avcodec_get_context_defaults3(codec_ctx, codec);
avcodec_copy_context(codec_ctx, format_ctx->streams[video_stream_index]->codec);
std::ofstream output_file;
if (avcodec_open2(codec_ctx, codec, NULL) < 0)
exit(1);
img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height,
codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24,
SWS_BICUBIC, NULL, NULL, NULL);
int size = avpicture_get_size(AV_PIX_FMT_YUV420P, codec_ctx->width,
codec_ctx->height);
uint8_t* picture_buffer = (uint8_t*) (av_malloc(size));
AVFrame* picture = av_frame_alloc();
AVFrame* picture_rgb = av_frame_alloc();
int size2 = avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width,
codec_ctx->height);
uint8_t* picture_buffer_2 = (uint8_t*) (av_malloc(size2));
avpicture_fill((AVPicture *) picture, picture_buffer, AV_PIX_FMT_YUV420P,
codec_ctx->width, codec_ctx->height);
avpicture_fill((AVPicture *) picture_rgb, picture_buffer_2, AV_PIX_FMT_RGB24,
codec_ctx->width, codec_ctx->height);
//geyg
AVFrame *pictureRGB;
int numBytes;
uint8_t *buffer;
int i=0;
long prepts = 0;
pictureRGB=avcodec_alloc_frame();
if(pictureRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, codec_ctx->width,
codec_ctx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *)pictureRGB, buffer, PIX_FMT_RGB24,
codec_ctx->width, codec_ctx->height);
while (av_read_frame(format_ctx, &packet) >= 0 && cnt < 1000) { //read ~ 1000 frames
std::cout << "1 Frame: " << cnt << std::endl;
if (packet.stream_index == video_stream_index) { //packet is video
std::cout << "2 Is Video" << std::endl;
if (stream == NULL) { //create stream in file
std::cout << "3 create stream" << std::endl;
stream = avformat_new_stream(output_ctx,
format_ctx->streams[video_stream_index]->codec->codec);
avcodec_copy_context(stream->codec,
format_ctx->streams[video_stream_index]->codec);
stream->sample_aspect_ratio =
format_ctx->streams[video_stream_index]->codec->sample_aspect_ratio;
}
int check = 0;
packet.stream_index = stream->id;
std::cout << "4 decoding" << std::endl;
int result = avcodec_decode_video2(codec_ctx, picture, &check, &packet);
std::cout << "Bytes decoded " << result << " check " << check
<< std::endl;
if(check!=0 && cnt > 100)
{
static struct SwsContext *img_convert_ctx;
// Convert the image into YUV format that SDL uses
if(img_convert_ctx == NULL) {
int w = codec_ctx->width;
int h = codec_ctx->height;
img_convert_ctx = sws_getContext(w, h,
codec_ctx->pix_fmt,
w, h, PIX_FMT_RGB24, SWS_BICUBIC,
NULL, NULL, NULL);
if(img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
}
}
int ret = sws_scale(img_convert_ctx, picture->data, picture->linesize, 0,
codec_ctx->height, pictureRGB->data, pictureRGB->linesize);
// Save the frame to disk
SaveFrame_jpg_uselibjpeg(pictureRGB, codec_ctx->width, codec_ctx->height, cnt);
CopyDate(pictureRGB, codec_ctx->width, codec_ctx->height,packet.pts-prepts);
prepts = packet.pts;
}
/*
if (cnt > 100) //cnt < 0)
{
sws_scale(img_convert_ctx, picture->data, picture->linesize, 0,
codec_ctx->height, picture_rgb->data, picture_rgb->linesize);
std::stringstream file_name;
file_name << "test" << cnt << ".ppm";
output_file.open(file_name.str().c_str());
output_file << "P3 " << codec_ctx->width << " " << codec_ctx->height
<< " 255\n";
for (int y = 0; y < codec_ctx->height; y++) {
for (int x = 0; x < codec_ctx->width * 3; x++)
output_file
<< (int) (picture_rgb->data[0]
+ y * picture_rgb->linesize[0])[x] << " ";
}
output_file.close();
}
*/
cnt++;
}
av_free_packet(&packet);
av_init_packet(&packet);
}
av_free(picture);
av_free(picture_rgb);
av_free(picture_buffer);
av_free(picture_buffer_2);
av_read_pause(format_ctx);
avio_close(output_ctx->pb);
avformat_free_context(output_ctx);
return (EXIT_SUCCESS);
}
static void CopyDate(AVFrame *picture,int width,int height,int time)
{
if(time <=0 ) time = 1;
int nChannels;
int stepWidth;
uchar* pData;
cv::Mat frameImage(cv::Size(width, height), CV_8UC3, cv::Scalar(0));
stepWidth = frameImage.step;
nChannels = frameImage.channels();
pData = frameImage.data;
for(int i = 0; i < height; i++)
{
for(int j = 0; j < width; j++)
{
pData[i*stepWidth+j*nChannels+0] = picture->data[0][i*picture->linesize[0]+j*nChannels+2];
pData[i*stepWidth+j*nChannels+1] = picture->data[0][i*picture->linesize[0]+j*nChannels+1];
pData[i*stepWidth+j*nChannels+2] = picture->data[0][i*picture->linesize[0]+j*nChannels+0];
}
}
cv::namedWindow("Video", cv::WINDOW_NORMAL);
cv::imshow("Video", frameImage);
cv::waitKey(1);
}
static void SaveFrame_mmp(AVFrame *picture, int width, int height, int iFrame)
{
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
{
printf("%s\n","create file fail!");
return;
}
// Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data
for(y=0; y
fwrite(picture->data[0]+y*picture->linesize[0], 1, width*3, pFile);
// Close file
fclose(pFile);
}
static void SaveFrame_jpg_uselibjpeg(AVFrame* pFrame, int width, int height, int iFrame)
{
char fname[128] = { 0 };
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPROW row_pointer[1];
int row_stride;
uint8_t *buffer;
FILE *fp;
buffer = pFrame->data[0];
sprintf(fname, "%s%d.jpg", "frame", iFrame);
fp = fopen(fname, "wb");
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo);
jpeg_stdio_dest(&cinfo, fp);
cinfo.image_width = width;
cinfo.image_height = height;
cinfo.input_components = 3;
cinfo.in_color_space = JCS_RGB;
jpeg_set_defaults(&cinfo);
jpeg_set_quality(&cinfo, 80, true);
jpeg_start_compress(&cinfo, TRUE);
row_stride = width * 3;
while (cinfo.next_scanline < height)
{
row_pointer[0] = &buffer[cinfo.next_scanline * row_stride];
(void)jpeg_write_scanlines(&cinfo, row_pointer, 1);
}
jpeg_finish_compress(&cinfo);
fclose(fp);
jpeg_destroy_compress(&cinfo);
return;
}
2.依赖库安装
1.
安装opencv:用户显示
2.
apt install libavformat-dev
apt install libavcodec-dev
apt install libswresample-dev
apt install libswscale-dev
apt install libavutil-dev
apt install libjpeg-dev
3.
sudo apt-get install libsdl1.2-dev
sudo apt-get install libsdl-image1.2-dev
sudo apt-get install libsdl-mixer1.2-dev
sudo apt-get install libsdl-ttf2.0-dev
sudo apt-get install libsdl-gfx1.2-dev
4.
ubuntu下zmq安装
(1)下载zmq:wget http://download.zeromq.org/zeromq-4.1.4.tar.gz (可以将“4.1.4”改成当前最新版本编号)
(2)解压:tar -zxvf zeromq-4.1.4.tar.gz
(3)编译安装
A.执行configure文件:./configure
出现错误:
configure: error: Package requirements (libsodium) were not met:
No package 'libsodium' found
解决方案:忽略这个库
./configure --prefix=/home/ygy/zmq --without-libsodium(prefix中的路径是zmq存放的目录)
B.编译:make
C.安装:make install
D.配置环境变量 vi /etc/profile
export C_INCLUDE_PATH="$C_INCLUDE_PATH:/usr/local/include"
export CPLUS_INCLUDE_PATH="$CPLUS_INCLUDE_PATH:/usr/local/include"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib"
export LIBRARY_PATH="$LIBRARY_PATH:/usr/local/lib"
C语言zmq使用
引用:#include
3.编译
g++ -w my_streamer.cpp -o my_streamer $(pkg-config --cflags --libs libavformat libswscale libavcodec libavutil opencv libjpeg libzmq)
4.运行
./my_streamer
四.C语言调用ffmpeg库实现本地视频播放
1.参考:https://github.com/mpenkov/ffmpeg-tutorial
2.依赖库安装:见上节二.3
3.编译:
git clone https://github.com/mpenkov/ffmpeg-tutorial.git
cd ffmpeg-tutorial
make
4.运行:
./tutorial01.out ×.avi