ffmpeg4.4 学习笔记 -(2)读取视频文件并用SDL 显示_商少-CSDN博客
修改saveframe,改用mfc bitmap 显示视频:
void CMFCAudioResampleDlg::OnBnClickedButtonChooseFile()
{
CRGB24PlayerDlg dlg;
std::thread([&dlg]()
{
dlg.DoModal();
}).detach();
// TODO: 在此添加控件通知处理程序代码
auto target = GetOneFile();
m_targetFilePath.SetWindowTextW(target.c_str());
UpdateData(TRUE);
CString strTargetWindowText;
m_targetFilePath.GetWindowTextW(strTargetWindowText);
AVFormatContext* avFormatContext = avformat_alloc_context();
USES_CONVERSION;
if (!PathFileExists(strTargetWindowText.GetBuffer())) {
return;
}
auto result = avformat_open_input(&avFormatContext, W2A((strTargetWindowText.GetBuffer())), nullptr, nullptr);
if (result < 0) {
assert(false);
}
result = avformat_find_stream_info(avFormatContext, NULL);
if (result < 0)
{
assert(false);
}
av_dump_format(avFormatContext, 0, NULL, 0);
// Find the first video stream
int videoStream = -1;
for (int i = 0; i < avFormatContext->nb_streams; i++)
if (avFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
break;
}
if (videoStream == -1)
return ; // Didn't find a video stream
AVCodecContext* avCodecContext = avcodec_alloc_context3(NULL);
result = avcodec_parameters_to_context(avCodecContext, avFormatContext->streams[videoStream]->codecpar);
if (result < 0)
{
assert(false);
}
avCodecContext->pkt_timebase = avFormatContext->streams[videoStream]->time_base;
AVCodec* codec = avcodec_find_decoder(avCodecContext->codec_id);
avCodecContext->codec_id = codec->id;
result = avcodec_open2(avCodecContext, codec, nullptr);
if (result < 0)
{
msgBoxFFmpegError(result);
assert(false);
}
AVFrame* pFrame = nullptr;
pFrame = av_frame_alloc();
// 我们最终的目的是将原始视频帧存储为24-bit RGB 视频。
AVFrame* pFrameRGB24 = av_frame_alloc();
uint8_t* bufferRawRGB24 = nullptr;
const int numBytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, avCodecContext->width, avCodecContext->height, 1);
bufferRawRGB24 = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
av_image_fill_arrays(pFrameRGB24->data, pFrameRGB24->linesize, bufferRawRGB24, AV_PIX_FMT_RGB24, avCodecContext->width, avCodecContext->height, 1);
// 读取数据
// sws_ctx 定义了从pix_fmt 到 AV_PIX_FMT_RGB24的转换
struct SwsContext* sws_ctx = NULL;
int frameFinished;
// initialize SWS context for software scaling
sws_ctx = sws_getContext(avCodecContext->width,
avCodecContext->height,
avCodecContext->pix_fmt,
avCodecContext->width,
avCodecContext->height,
AV_PIX_FMT_RGB24,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
int i = 0;
AVPacket packet;
bool bReadEof = false;
while (true)
{
Sleep(33);
int readResult = -1;
if (!bReadEof)
{
readResult = av_read_frame(avFormatContext, &packet);
if (readResult < 0) {
::MessageBoxA(0, 0, GetFFmpegErorString(readResult).c_str(), 0);
bReadEof = true;
}
else if (readResult == 0) {
static int iCnt = 0;
if (packet.stream_index == videoStream) {
++iCnt;
}
CString str;
str.Format(L"cunt[%d]\r\n", iCnt);
OutputDebugStringW(str.GetBuffer());
}
}
if (bReadEof)
{
// 需要给刷空
avcodec_send_packet(avCodecContext, NULL);
}
else
{
// Is this a packet from the video stream?
if (packet.stream_index == videoStream) {
// Decode video frame
avcodec_send_packet(avCodecContext, &packet);
}
}
int receiveResult = avcodec_receive_frame(avCodecContext, pFrame);
// Did we get a video frame?
if (receiveResult == 0) {
// Convert the image from its native format to RGB
sws_scale(sws_ctx, (uint8_t const* const*)pFrame->data,
pFrame->linesize, 0, avCodecContext->height,
pFrameRGB24->data, pFrameRGB24->linesize);
++i;
// SaveFrame(pFrameRGB24, avCodecContext->width,
// avCodecContext->height, i);
dlg.DisplayNewRGB24(pFrameRGB24->data[0], avCodecContext->width, avCodecContext->height);
}
else if (receiveResult == AVERROR_EOF)
{
::MessageBoxA(0, 0, "read eof", 0);
break;
}
else if(receiveResult == AVERROR(EAGAIN)){
if (bReadEof) {
break;
}
else {
}
}
else {
msgBoxFFmpegError(receiveResult);
}
// Free the packet that was allocated by av_read_frame
if(readResult == 0)
av_packet_unref(&packet);
}
av_frame_free(&pFrame);
av_frame_free(&pFrameRGB24);
av_free(bufferRawRGB24);
avcodec_close(avCodecContext);
avformat_close_input(&avFormatContext);
}
其中CRGB24PlayerDlg 是一个用于显示rgb24图片的对话框:
void CRGB24PlayerDlg::OnPaint()
{
CPaintDC dc(this); // device context for painting
// TODO: 在此处添加消息处理程序代码
// 不为绘图消息调用 CDialogEx::OnPaint()
if (m_update && m_memDC)
{
::BitBlt(dc.m_hDC, 0, 0, m_currentSize.Width(), m_currentSize.Height(), m_memDC, 0, 0, SRCCOPY);
m_update = false;
}
}
void CRGB24PlayerDlg::DisplayNewRGB24(void* data, int width, int height)
{
std::lock_guard<std::mutex> lock(m_lock);
if (m_currentSize.Width() != width || m_currentSize.Height() != height) {
m_currentSize.left = 0;
m_currentSize.top = 0;
m_currentSize.right = width;
m_currentSize.bottom = height;
RECT rect = {};
rect.left = 0;
rect.top = 0;
rect.right = width;
rect.bottom = height;
MoveWindow(&rect, true);
}
if (m_memDC != NULL)
{
if(m_hPreBitmap)
SelectObject(m_memDC, m_hPreBitmap);
DeleteDC(m_memDC);
m_memDC = NULL;
}
//https://docs.microsoft.com/en-us/windows/win32/api/wingdi/ns-wingdi-bitmapinfoheader
BITMAPINFO bi = { 0 };
BITMAPINFOHEADER* bih = &bi.bmiHeader;
bih->biSize = sizeof(BITMAPINFOHEADER);
bih->biBitCount = 24;
bih->biWidth = width;
bih->biHeight = -height;
bih->biPlanes = 1;
bih->biCompression = BI_RGB;
m_memDC = CreateCompatibleDC(this->GetDC()->m_hDC);
void* dibData = nullptr;
HBITMAP hBitmap = CreateDIBSection(m_memDC, &bi, DIB_RGB_COLORS, (void**)&dibData, NULL, 0);
for (int i = 0; i < height; i++) {
const int lineAlign = ((((width * 24) + 31) & ~31) >> 3);
for (int j = 0; j < width; j++) {
//RGB to BGR
*((uint8_t*)dibData + i * lineAlign + j * 3 + 0) = *((uint8_t*)data + i * width * 3 + j * 3 + 2);
*((uint8_t*)dibData + i * lineAlign + j * 3 + 1) = *((uint8_t*)data + i * width * 3 + j * 3 + 1);
*((uint8_t*)dibData + i * lineAlign + j * 3 + 2) = *((uint8_t*)data + i * width * 3 + j * 3 + 0);
}
}
m_hPreBitmap = (HBITMAP)SelectObject(m_memDC, hBitmap);
m_update = true;
this->Invalidate(true);
}
其中核心逻辑是rgb24 到bitmap 24 的格式转换以及memdc 的处理