三,开始编码
1,在采集视频的回调函数里面获取到图片的buffer
CVImageBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
记得使用这个buffer的时候,要¥可在开始的时候
CVPixelBufferLockBaseAddress(pixelBuffer, 0); 结束了解锁CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
2.接下来就是ffmpeg的编码了,网上有蛮多资料我就照搬了
CVImageBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
// access the data
int width = CVPixelBufferGetWidth(pixelBuffer);
int height = CVPixelBufferGetHeight(pixelBuffer);
unsigned char *rawPixelBase = (unsigned char *)CVPixelBufferGetBaseAddress(pixelBuffer);
AVFrame *pFrame = avcodec_alloc_frame();
pFrame->quality = 0;
AVFrame* outpic = avcodec_alloc_frame();
avpicture_fill((AVPicture*)pFrame, rawPixelBase, PIX_FMT_BGR32, width, height);//PIX_FMT_RGB32//PIX_FMT_RGB8
avcodec_register_all();
av_register_all();
AVCodec *codec;
AVCodecContext *c= NULL;
int out_size, size, outbuf_size;
//FILE *f;
uint8_t *outbuf;
printf("Video encoding\n");
/* find the mpeg video encoder */
codec =avcodec_find_encoder(CODEC_ID_H264);//avcodec_find_encoder_by_name("libx264"); //avcodec_find_encoder(CODEC_ID_H264);//CODEC_ID_H264);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
c= avcodec_alloc_context3(codec);
/* put sample parameters */
c->bit_rate = 400000;
// c->bit_rate_tolerance = 10;
// c->me_method = 2;
/* resolution must be a multiple of two */
c->width = 192;//width;//352;
c->height = 144;//height;//288;
/* frames per second */
c->time_base= (AVRational){1,25};
c->gop_size = 10;//25; /* emit one intra frame every ten frames */
c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
c->thread_count = 1;
// c ->me_range = 16;
// c ->max_qdiff = 4;
// c ->qmin = 10;
// c ->qmax = 51;
// c ->qcompress = 0.6f;
/* open it */
if (avcodec_open2(c, codec,NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
/* alloc image and output buffer */
outbuf_size = 100000;
outbuf = malloc(outbuf_size);
size = c->width * c->height;
AVPacket avpkt;
int nbytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);
//create buffer for the output image
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);
fflush(stdout);
for (int i=0;i<15;++i){
avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, c->width, c->height);
struct SwsContext* fooContext = sws_getContext(c->width, c->height,
PIX_FMT_BGR32,
c->width, c->height,
PIX_FMT_YUV420P,
SWS_POINT, NULL, NULL, NULL);
//perform the conversion
pFrame->data[0] += pFrame->linesize[0] * (height - 1);
pFrame->linesize[0] *= -1;
pFrame->data[1] += pFrame->linesize[1] * (height / 2 - 1);
pFrame->linesize[1] *= -1;
pFrame->data[2] += pFrame->linesize[2] * (height / 2 - 1);
pFrame->linesize[2] *= -1;
int xx = sws_scale(fooContext,(const uint8_t**)pFrame->data, pFrame->linesize, 0, c->height, outpic->data, outpic->linesize);
// Here is where I try to convert to YUV
NSLog(@"xxxxx=====%d",xx);
/* encode the image */
int got_packet_ptr = 0;
av_init_packet(&avpkt);
avpkt.size = outbuf_size;
avpkt.data = outbuf;
out_size = avcodec_encode_video2(c, &avpkt, outpic, &got_packet_ptr);
printf("encoding frame (size=%5d)\n", out_size);
printf("encoding frame %s\n", avpkt.data);
fwrite(avpkt.data,1,avpkt.size ,fp);
}
free(outbuf);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
avcodec_close(c);
av_free(c);
av_free(pFrame);
av_free(outpic);
H264编码
最新推荐文章于 2023-12-09 10:14:23 发布