Windows使用FFmpeg对yuv格式的数据进行crop裁切

一、命令裁切

ffmpeg -pixel_format nv12 -f rawvideo -video_size 240x320 -i nv12_240x320.yuv -vf crop=120:160 -pix_fmt nv12 nv12_120x160.yuv

二、代码裁切,将240x320大小的数据裁切为120x160大小的数据

#include <iostream>
#include <fstream>

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/opt.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersrc.h"
#include "libavfilter/buffersink.h"
#include "libavutil/imgutils.h"
};
/*
srcBuffer:源yuv数据
srcWidth:宽
srcHeight:高
dstBuffer:目标buffer
dstWidth:目标宽
dstHeight:目标高
offestX:起始x坐标
offsetY:起始y坐标
format:YUV数据的格式,例如AV_PIX_FMT_NV12 AV_PIX_FMT_NV21 AV_PIX_FMT_YUV420P
*/
int32_t Crop(uint8_t* srcBuffer, uint32_t srcWidth, uint32_t srcHeight,
    uint8_t* dstBuffer, uint32_t dstWidth, uint32_t dstWidth,
    uint32_t offsetX, uint32_t offsetY, AVPixelFormat format) {
    // 创建AVFrame
    AVFrame* srcFrame = av_frame_alloc();
    srcFrame->width = srcWidth;
    srcFrame->height = srcHeight;
    srcFrame->format = format;
    // 填充AVFrame
    av_image_fill_arrays(srcFrame->data, srcFrame->linesize, srcBuffer, format, srcWidth, srcHeight, 1);
    AVFrame* dstFrame = av_frame_alloc();
    // 进行crop
    AVFilterGraph* filterGraph = avfilter_graph_alloc();
    char args[512] = "";
    snprintf(args, sizeof(args),
        "buffer=video_size=%dx%d:pix_fmt=%d:time_base=1/1:pixel_aspect=1/1[in];" // Parsed_buffer_0
        "[in]crop=x=%d:y=%d:out_w=%d:out_h=%d[out];"                             // Parsed_crop_1
        "[out]buffersink",                                                       // Parsed_buffersink_2
        srcWidth, srcHeight, format,
        offsetX, offsetY, dstWidth, dstHeight);

    AVFilterInOut* inputs = NULL;
    AVFilterInOut* outputs = NULL;
    avfilter_graph_parse2(filterGraph, args, &inputs, &outputs);
    avfilter_graph_config(filterGraph, NULL);
    AVFilterContext* srcFilterCtx = avfilter_graph_get_filter(filterGraph, "Parsed_buffer_0");
    AVFilterContext* sinkFilterCtx = avfilter_graph_get_filter(filterGraph, "Parsed_buffersink_2");

    dstFrame = av_frame_clone(srcFrame);
    av_buffersrc_add_frame(srcFilterCtx, dstFrame);
    av_buffersink_get_frame(sinkFilterCtx, dstFrame);
    avfilter_graph_free(&filterGraph);

    // 获取crop完成后的数据
    avpicture_layout((AVPicture*)dstFrame, format, dstWidth, dstHeight, dstBuffer,
        avpicture_get_size(format, dstWidth, dstHeight));

    av_frame_free(&srcFrame);
    av_frame_free(&dstFrame);
    return 0;
}

int main(void) {
    std::ifstream fin("nv21_240x320.yuv", std::ios::binary | std::ios::in);
    std::ofstream fout("nv21_120x160.yuv", std::ios::binary | std::ios::out);
    uint32_t width = 240;
    uint32_t height = 320;
    uint32_t size = width * height * 3 / 2;
    uint8_t* src_data = new uint8_t[size];
    uint8_t* dest_data = new uint8_t[size / 4];
    while (!fin.eof()) {
        fin.read((char*)src_data, size);
        Crop(src_data, width, height, dest_data, width / 2, height / 2, 0, 0, AV_PIX_FMT_NV21);
        fout.write((char*)dest_data, size / 4);
    }

    delete[] src_data;
    delete[] dest_data;

    fout.close();
    fin.close();
    getchar();
    return 0;
}

 

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
要保存 YUV 格式数据,可以使用 FFmpeg 库提供的 `AVFrame` 结构体和 `av_write_frame` 函数。下面是保存 YUV 格式数据的示例代码: ```c #include <stdio.h> #include <stdlib.h> #include <string.h> #include <libavformat/avformat.h> int main(int argc, char *argv[]) { int ret; AVFormatContext *fmt_ctx = NULL; AVOutputFormat *ofmt = NULL; AVStream *video_st = NULL; AVCodecContext *codec_ctx = NULL; AVFrame *frame = NULL; uint8_t *frame_data = NULL; int frame_size; int width = 640, height = 480; // 打开输出文件 if ((ret = avformat_alloc_output_context2(&fmt_ctx, NULL, NULL, "output.yuv")) < 0) { fprintf(stderr, "Error allocating output context: %s\n", av_err2str(ret)); return 1; } ofmt = fmt_ctx->oformat; // 添加视频流 video_st = avformat_new_stream(fmt_ctx, NULL); if (!video_st) { fprintf(stderr, "Error creating video stream\n"); return 1; } codec_ctx = video_st->codec; codec_ctx->codec_id = AV_CODEC_ID_RAWVIDEO; codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO; codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P; codec_ctx->width = width; codec_ctx->height = height; codec_ctx->time_base = (AVRational){1, 25}; if ((ret = avcodec_parameters_to_context(codec_ctx, video_st->codecpar)) < 0) { fprintf(stderr, "Error copying codec parameters to context: %s\n", av_err2str(ret)); return 1; } // 打开视频编码器 if ((ret = avcodec_open2(codec_ctx, NULL, NULL)) < 0) { fprintf(stderr, "Error opening video encoder: %s\n", av_err2str(ret)); return 1; } // 创建帧缓冲区 frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Error allocating frame\n"); return 1; } frame->width = width; frame->height = height; frame->format = codec_ctx->pix_fmt; if ((ret = av_frame_get_buffer(frame, 0)) < 0) { fprintf(stderr, "Error allocating frame buffer: %s\n", av_err2str(ret)); return 1; } frame_data = frame->data[0]; frame_size = av_image_get_buffer_size(codec_ctx->pix_fmt, width, height, 1); // 打开输出文件 if (!(ofmt->flags & AVFMT_NOFILE)) { if ((ret = avio_open(&fmt_ctx->pb, "output.yuv", AVIO_FLAG_WRITE)) < 0) { fprintf(stderr, "Error opening output file: %s\n", av_err2str(ret)); return 1; } } // 写入视频帧 for (int i = 0; i < 25; i++) { // 生成测试图像 for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { int r = rand() % 256; int g = rand() % 256; int b = rand() % 256; uint8_t *yuv = frame_data + y * frame->linesize[0] + x * 3 / 2; yuv[0] = 0.299 * r + 0.587 * g + 0.114 * b; yuv[1] = -0.14713 * r - 0.28886 * g + 0.436 * b; yuv[2] = 0.615 * r - 0.51498 * g - 0.10001 * b; } } // 编码并写入帧 frame->pts = i; AVPacket pkt = {0}; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; if ((ret = avcodec_send_frame(codec_ctx, frame)) < 0) { fprintf(stderr, "Error sending frame to encoder: %s\n", av_err2str(ret)); return 1; } while (ret >= 0) { ret = avcodec_receive_packet(codec_ctx, &pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { break; } else if (ret < 0) { fprintf(stderr, "Error encoding frame: %s\n", av_err2str(ret)); return 1; } pkt.stream_index = video_st->index; av_write_frame(fmt_ctx, &pkt); av_packet_unref(&pkt); } } // 清理工作 av_write_trailer(fmt_ctx); if (codec_ctx) avcodec_close(codec_ctx); if (frame) av_frame_free(&frame); if (fmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_closep(&fmt_ctx->pb); avformat_free_context(fmt_ctx); return 0; } ``` 这段代码首先创建一个 YUV 格式的视频流,然后生成一些随机数据作为测试图像,并将每帧图像编码成 YUV 格式数据,最终将所有帧数据写入到文件中。注意,这里使用了随机数据生成测试图像,实际应用中需要根据实际情况生成真实的图像数据
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值