问题一:
api-example.c的功能是先自己生成一个MPEG1的文件,然后再按帧保存成PGM文件。我的问题是PGM格式是个什么格式,为什么我通过PHOTOSHOP打开后,这些图像都不是原先的视频图像显示,而是原视频图像的黑白显示。
问题二:
我把api-example.c中原来的codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);改成codec = avcodec_find_decoder(CODEC_ID_MPEG2VIDEO);目的是测试一下他将一个已有的正常MPEG2文件按帧保存成PGM文件的效果。结果是这些图像都只能显示原先的视频图像的上面的三分之一部分(彩色显示),而无法完全显示。
问题三:
我把api-example.c中原有的保存PGM功能删掉加上了一段自己写的转码功能,该功能参照FFMPEG中的output_example.c。
代码如下:
len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
if (got_picture)
{
fflush(stdout);
if (img_convert_ctx == NULL)
{
img_convert_ctx = sws_getContext(c->width, c->height,PIX_FMT_YUV420P,c->width, c->height,video_st->codec- >pix_fmt, sws_flags, NULL, NULL, NULL);
}
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
sws_scale(img_convert_ctx, picture->data, picture->linesize,0, c->height,tmp_picture->data,tmp_picture->linesize);
int out_size = avcodec_encode_video(video_st->codec,video_outbuf, video_outbuf_size, tmp_picture);
if (out_size > 0)
{
AVPacket pkt;
av_init_packet(&pkt);
if(video_st->codec->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= video_outbuf;
pkt.size= out_size;
int ret = av_interleaved_write_frame(oc, &pkt);
frame++;
}
avpkt.size -= len;
avpkt.data += len;
}
}
结果是这些图像还是只能显示原先的视频图像的上面的三分之一部分(彩色显示),而且马赛克严重。
1.将代码作了一下修改,将video_deocde_video出来的数据经过sws-scale转换格式为RGB的,再保存得到的图像就正常了:
贴上我改过的代码:
/*
* Video decoding example
*/
void pgm_save(unsigned char *buf,int wrap, int xsize,int ysize,char *filename)
{
FILE *f;
int i;
/*
f=fopen(filename,"w");
fprintf(f,"P5/n%d %d/n%d/n",xsize,ysize,255);
for(i=0;i<ysize;i++)
fwrite(buf + i * wrap,1,xsize,f);
fclose(f);
*/
if(NULL==(f=fopen(filename,"wb")))
printf("ppm file not opened/n");
// Write header
fprintf(f, "P6/n%d %d/n255/n", xsize, ysize);
// Write pixel data
for(i=0; i<ysize; i++)
fwrite(buf + i * wrap, 1, xsize*3, f);
fclose(f);
}
void video_decode_example(const char *outfilename, const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int frame, size, got_picture, len;
FILE *f;
AVFrame *picture;
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE], *inbuf_ptr;
char buf[1024];
AVFrame *pFrameRGB;
int numBytes;
uint8_t *buffer;
struct SwsContext *ctx;
int ctx_flag=1;
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
printf("Video decoding/n");
/* find the mpeg1 video decoder */
codec = avcodec_find_decoder(CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "codec not found/n");
exit(1);
}
c= avcodec_alloc_context();
picture= avcodec_alloc_frame();
if(codec->capabilities&CODEC_CAP_TRUNCATED)
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
available in the bitstream. */
/* open it */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec/n");
exit(1);
}
/* the codec gives us the frame size, in samples */
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "could not open %s/n", filename);
exit(1);
}
/*
ctx = sws_getContext(c->width, c->height,
c->pix_fmt, c->width, c->height,
PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
if(ctx == NULL)
{
printf("cts initialized failed/n");
return 1;
}
*/
frame = 0;
for(;;) {
size = fread(inbuf, 1, INBUF_SIZE, f);
if (size == 0)
break;
/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
and this is the only method to use them because you cannot
know the compressed data size before analysing it.
BUT some other codecs (msmpeg4, mpeg4) are inherently frame
based, so you must call them with all the data for one
frame exactly. You must also initialize 'width' and
'height' before initializing them. */
/* NOTE2: some codecs allow the raw parameters (frame size,
sample rate) to be changed at any frame. We handle this, so
you should also take care of it */
/* here, we use a stream based decoder (mpeg1video), so we
feed decoder and see if it could decode a frame */
inbuf_ptr = inbuf;
while (size > 0) {
len = avcodec_decode_video(c, picture, &got_picture,
inbuf_ptr, size);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %d/n", frame);
exit(1);
}
if (got_picture && frame<6) {
if(ctx_flag)
{
ctx = sws_getContext(c->width, c->height,
c->pix_fmt, c->width, c->height,
PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
if(ctx == NULL)
{
printf("cts initialized failed/n");
return 1;
}
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, c->width,
c->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
c->width, c->height);
ctx_flag = 0;
}
/* the picture is allocated by the decoder. no need to
free it */
sws_scale(ctx, picture->data, picture->linesize,
0, c->height,(AVPicture *)pFrameRGB->data,
(AVPicture *)pFrameRGB->linesize);
printf("saving frame %3d/n", frame);
fflush(stdout);
snprintf(buf, sizeof(buf), outfilename, frame);
// sprintf(buf, "frame%d.pgm", frame);
pgm_save(picture->data[0], picture->linesize[0],
c->width, c->height, buf);
frame++;
}
size -= len;
inbuf_ptr += len;
}
}
/* some codecs, such as MPEG, transmit the I and P frame with a
latency of one frame. You must do the following to have a
chance to get the last frame of the video */
len = avcodec_decode_video(c, picture, &got_picture,
NULL, 0);
if (got_picture) {
printf("saving last frame %3d/n", frame);
fflush(stdout);
/* the picture is allocated by the decoder. no need to
free it */
snprintf(buf, sizeof(buf), outfilename, frame);
pgm_save(picture->data[0], picture->linesize[0],
c->width, c->height, buf);
frame++;
}
fclose(f);
avcodec_close(c);
av_free(c);
av_free(buffer);
av_free(pFrameRGB);
av_free(picture);
printf("/n");
}