Linux下的x11录屏、V4l2摄像头获取、alsa录音、及视频拼接_xcb_get_image

img
img

网上学习资料一大堆,但如果学到的知识不成体系,遇到问题时只是浅尝辄止,不再深入研究,那么很难做到真正的技术提升。

需要这份系统化的资料的朋友,可以添加戳这里获取

一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人,都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!

            qDebug() << "VIDIOC_DQBUF";
            return 0;
        }
    }
    for (i = 0; i < n_buffers; ++i)
        if (buf.m.userptr == (unsigned long)buffers[i].start &&
            buf.length == buffers[i].length)
            break;
    if (fd <= 0 || !bStart)
        break;
    assert(i < n_buffers);
    printf("----------hhjjj=======\r\n");
    //videoFrameCB((void*)buf.m.userptr, 0, NULL, NULL);
    // process_image((void*)buf.m.userptr, 0);
    if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
    {
        qDebug() << "VIDIOC_QBUF";
        return 0;
    }
    break;
}
return 1;

}
int CMBCamera::open_device(void)
{
qDebug() << “CMBCamera::open_device Start!”;
bool bFind = false;
int nCameraId = 0;
for (int i = 0; i < 10; i++)
{
if (isVaildCamera(i, m_strCameraVPid) == 0)
{
bFind = true;
nCameraId = i;
break;
}
}
if(!bFind)
{
qDebug() << “CMBCamera::open_device isVaildCamera Fail!”;
return 0;
}

// nCameraId = SmartSeeMediaConfigManager::GetInstance()->mVideoCameraId;
char szDevName[256] = {0};
sprintf(szDevName, “/dev/video%d”, nCameraId);

qDebug() << "CMBCamera::open_device" << szDevName;

struct stat st;
dev_name = szDevName;//"/dev/video0";
if (-1 == stat(szDevName, &st))
{
    fprintf(stderr, "Cannot identify %s: %d, %s\n", szDevName, errno,strerror(errno));
    qDebug() << "open_device() stat Fail!";
    return 0;
    //exit(EXIT_FAILURE);
}
if (!S_ISCHR(st.st_mode))
{
    fprintf(stderr, "%s is no device\n", szDevName);
    qDebug() << "open_device() S_ISCHR Fail!";
    return 0;
    //exit(EXIT_FAILURE);
}
fd = open(szDevName, O_RDWR | O_NONBLOCK, 0);
if (-1 == fd)
{
    fprintf(stderr, "Cannot open \E2\80?%s\E2\80?: %d, %s\n", szDevName, errno,
        strerror(errno));
    qDebug() << "open_device() open Fail!";
    return 0;
    //exit(EXIT_FAILURE);
}
qDebug() << "open_device() ok";
return 1;

}

void CMBCamera::printSolution(int fd)
{ //获取摄像头所支持的分辨率
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
struct v4l2_fmtdesc fmt_1;
struct v4l2_frmsizeenum frmsize;
structFromatRecord formatRecord;

// struct v4l2_pix_format pixFormat;

// ClearVideoInParamsList();
qDebug() << “printSolution Camera Support param”;
fmt_1.index = 0;
fmt_1.type = type;
while (ioctl(fd, VIDIOC_ENUM_FMT, &fmt_1) >= 0)
{
frmsize.pixel_format = fmt_1.pixelformat;
frmsize.index = 0;
while (ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &frmsize) >= 0)
{
//获取支持的宽高格式
//V4L2_PIX_FMT_YUYV 1448695129
//V4L2_PIX_FMT_YYUV 1448434009
//V4L2_PIX_FMT_MJPEG 1196444237
formatRecord.width = frmsize.discrete.width;
formatRecord.height = frmsize.discrete.height;
formatRecord.pixfromat = fmt_1.pixelformat;//fmt_1.pixelformat;
//formatRecord.fps = getfps.parm.capture.timeperframe.denominator;
g_vcV4l2Pix.push_back(formatRecord);

        if (frmsize.type == V4L2_FRMSIZE_TYPE_DISCRETE)
        {
            qDebug() << "1.V4L2_FRMSIZE_TYPE_DISCRETE line:" << __LINE__ << " : " << formatRecord.pixfromat << "-  " << frmsize.discrete.width << "X" << frmsize.discrete.height;
        }
        else if (frmsize.type == V4L2_FRMSIZE_TYPE_STEPWISE)
        {
            qDebug() << "2.V4L2_FRMSIZE_TYPE_STEPWISE line:" << __LINE__ << " : " << formatRecord.pixfromat << "-  " << frmsize.discrete.width << "X" << frmsize.discrete.height;
        }

        frmsize.index++;
    }

    fmt_1.index++;
}

}

int CMBCamera::xioctl(int fd, int request, void* arg)
{
int r;
do
r = ioctl(fd, request, arg);
while (-1 == r && EINTR == errno);
return r;
}

int CMBCamera::init_device()
{
struct v4l2_capability cap;
struct v4l2_cropcap cropcap;
struct v4l2_crop crop;
struct v4l2_format fmt;
unsigned int min;
int i_pixret = -1;
if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &cap))
{
if (EINVAL == errno)
{
fprintf(stderr, “%s is no V4L2 device\n”, dev_name);
// exit(EXIT_FAILURE);
qDebug() << “no V4L2 device!”;
return 0;
}
else
{
qDebug() << “VIDIOC_QUERYCAP Success”;
}
}

if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
{
    fprintf(stderr, "%s is no video capture device\n", dev_name);
    qDebug() << "no video capture device";
    //exit(EXIT_FAILURE);
    return 0;
}

switch (io)
{
case IO_METHOD_READ:
    if (!(cap.capabilities & V4L2_CAP_READWRITE))
    {
        fprintf(stderr, "%s does not support read i/o\n", dev_name);
        qDebug() << "not support read i/o";
        return 0;
    }
    break;
case IO_METHOD_MMAP:
case IO_METHOD_USERPTR:
    if (!(cap.capabilities & V4L2_CAP_STREAMING))
    {
        fprintf(stderr, "%s does not support streaming i/o\n", dev_name);
        qDebug() << "not support streaming i/o";
        return 0;
    }
    break;
}

CLEAR(cropcap);
cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 == xioctl(fd, VIDIOC_CROPCAP, &cropcap))
{
    crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    crop.c = cropcap.defrect;
    if (-1 == xioctl(fd, VIDIOC_S_CROP, &crop))
    {
        switch (errno)
        {
        case EINVAL:

            break;
        default:

            break;
        }
    }
}
else
{
}
CLEAR(fmt);
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

// int w = WWIDTH;
// int h = HHEIGHT;
// getSolution(params, w, h);
// setCurrentVideo(m_bDisplayBG, w, h, m_bRestartEncode);

// fmt.fmt.pix.width = w;
// fmt.fmt.pix.height = h;
// //fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
// fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG;
// m_iCameraFormat = fmt.fmt.pix.pixelformat;

//设置宽高采集样式

// fmt.fmt.pix.width = g_vcV4l2Pix[0].width;
// fmt.fmt.pix.height = g_vcV4l2Pix[0].height;
//返回合适的宽高
i_pixret = GetCurrentCamera();
if(i_pixret == 1)
{
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG;
}
else if(i_pixret == 0)
{
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
}
else {
qDebug() << “GetCurrentCamera error!”;
return 0;
}
fmt.fmt.pix.width = m_CurrentWidth;
fmt.fmt.pix.height = m_CurrentHeight;
//fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
//fmt.fmt.pix.pixelformat = g_vcV4l2Pix[0].pixfromat;
//fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG;
m_iCameraFormat = fmt.fmt.pix.pixelformat;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt))
qDebug() << “VIDIOC_S_FMT”;

//设置帧率
setfps = (struct v4l2_streamparm*)calloc(1, sizeof(struct v4l2_streamparm));
if(set_camera_streamparm(fd, setfps,  30) != 0)
{
    qDebug() << "set_camera_streamparm  30FPS fail!";
}

min = fmt.fmt.pix.width * 2;
if (fmt.fmt.pix.bytesperline < min)
    fmt.fmt.pix.bytesperline = min;
min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
if (fmt.fmt.pix.sizeimage < min)
    fmt.fmt.pix.sizeimage = min;

struct v4l2_streamparm  getfps;
getfps.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
int ret = ioctl(fd, VIDIOC_G_PARM, &getfps);
if(ret)
{
    qDebug() << "VIDIOC_G_PARM to get fps failed : " << ret;
}
else
{
    qDebug() << "VIDIOC_G_PARM to get fps succeed : FPS = " << getfps.parm.capture.timeperframe.denominator;
}

switch (io)
{
case IO_METHOD_READ:
    init_read(fmt.fmt.pix.sizeimage);
    break;
case IO_METHOD_MMAP:
    init_mmap();
    break;
case IO_METHOD_USERPTR:
    init_userp(fmt.fmt.pix.sizeimage);
    break;
}

qDebug() << "init_device() ok\n";
return 1;

}
//根据传入的摄像头宽高 找到匹配的摄像头支持的宽高
int CMBCamera::GetCurrentCamera()
{
int iminMJPEG = (std::abs((int)g_vcV4l2Pix[0].width - m_CurrentWidth) + std::abs((int)g_vcV4l2Pix[0].height - m_CurrentHeight));
int iminYUY2 = (std::abs((int)g_vcV4l2Pix[0].width - m_CurrentWidth) + std::abs((int)g_vcV4l2Pix[0].height - m_CurrentHeight));
int temp;
int iIndexMJPEG = 0;
int iIndexYUY2 = 0;
bool bIsMJPEG = false;
bool bIsYUY2 = false;
for(int i = 0; i < g_vcV4l2Pix.size(); i++)
{
if(g_vcV4l2Pix[i].pixfromat == V4L2_PIX_FMT_MJPEG)
{
bIsMJPEG = true;
temp = (std::abs((int)g_vcV4l2Pix[i].width - m_CurrentWidth) + std::abs((int)g_vcV4l2Pix[i].height - m_CurrentHeight));
if(iminMJPEG > temp)
{
iminMJPEG = temp;
iIndexMJPEG = i;
}
}
else if(g_vcV4l2Pix[i].pixfromat == V4L2_PIX_FMT_YUYV)
{
bIsYUY2 = true;
temp = (std::abs((int)g_vcV4l2Pix[i].width - m_CurrentWidth) + std::abs((int)g_vcV4l2Pix[i].height - m_CurrentHeight));
if(iminYUY2 > temp)
{
iminYUY2 = temp;
iIndexYUY2 = i;
}
}
}
if(bIsMJPEG)
{
m_CurrentWidth = g_vcV4l2Pix[iIndexMJPEG].width;
m_CurrentHeight = g_vcV4l2Pix[iIndexMJPEG].height;
return 1;
}
else if (bIsYUY2)
{
m_CurrentWidth = g_vcV4l2Pix[iIndexYUY2].width;
m_CurrentHeight = g_vcV4l2Pix[iIndexYUY2].height;
return 0;
}
return -1;
}

void CMBCamera::start_capturing(void)
{
unsigned int i;
enum v4l2_buf_type type;
switch (io)
{
case IO_METHOD_READ:

    break;
case IO_METHOD_MMAP:
    for (i = 0; i < n_buffers; ++i)
    {
        struct v4l2_buffer buf;
        CLEAR(buf);
        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = V4L2_MEMORY_MMAP;
        buf.index = i;
        if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
            qDebug() << "VIDIOC_QBUF";
    }
    type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
        qDebug() << "VIDIOC_STREAMON";
    break;
case IO_METHOD_USERPTR:
    for (i = 0; i < n_buffers; ++i)
    {
        struct v4l2_buffer buf;
        CLEAR(buf);
        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        buf.memory = V4L2_MEMORY_USERPTR;
        buf.m.userptr = (unsigned long)buffers[i].start;
        buf.length = buffers[i].length;
        if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
            qDebug() << "VIDIOC_QBUF";
    }

    type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
        qDebug() << "VIDIOC_STREAMON";
    break;
}

qDebug() << "start_capturing() ok\n";

}

void CMBCamera::init_read(unsigned int buffer_size)
{
buffers = (buffer*)calloc(1, sizeof(*buffers));
if (!buffers)
{
fprintf(stderr, “Out of memory\n”);
qDebug() << “Out of memory Fail!”;
exit(EXIT_FAILURE);
}
buffers[0].length = buffer_size;
buffers[0].start = malloc(buffer_size);
if (!buffers[0].start)
{
fprintf(stderr, “Out of memory\n”);
qDebug() << “Out of memory2 Fail!”;
exit(EXIT_FAILURE);
}
}

void CMBCamera::init_mmap(void)
{
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req))
{
if (EINVAL == errno)
{
fprintf(stderr,
"%s does not support "
“memory mapping\n”,
dev_name);
qDebug() << “init_mmap xioctl Fail!”;
exit(EXIT_FAILURE);
}
else
{
qDebug() << “VIDIOC_REQBUFS”;
}
}
if (req.count < 2)
{
fprintf(stderr, “Insufficient buffer memory on %s\n”, dev_name);
// exit(EXIT_FAILURE);
}
buffers = (buffer*)calloc(req.count, sizeof(*buffers));
if (!buffers)
{
fprintf(stderr, “Out of memory\n”);
qDebug() << “init_mmap Out of memory3 Fail!”;
exit(EXIT_FAILURE);
}
for (n_buffers = 0; n_buffers < req.count; ++n_buffers)
{
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = n_buffers;
if (-1 == xioctl(fd, VIDIOC_QUERYBUF, &buf))
qDebug() << “VIDIOC_QUERYBUF”;
buffers[n_buffers].length = buf.length;
buffers[n_buffers].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, buf.m.offset);
if (MAP_FAILED == buffers[n_buffers].start)
qDebug() << “mmap”;
}

}

void CMBCamera::init_userp(unsigned int buffer_size)
{
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_USERPTR;
if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req))
{
if (EINVAL == errno)
{
fprintf(stderr,
"%s does not support "
“user pointer i/o\n”,
dev_name);
qDebug() << “xioctl does not support user pointer i/o”;
exit(EXIT_FAILURE);
}
else
{
qDebug() << “VIDIOC_REQBUFS”;
}
}
buffers = (buffer*)calloc(4, sizeof(*buffers));
if (!buffers)
{
fprintf(stderr, “Out of memory\n”);
qDebug() << “Out of memory”;
exit(EXIT_FAILURE);
}
for (n_buffers = 0; n_buffers < 4; ++n_buffers)
{
buffers[n_buffers].length = buffer_size;
buffers[n_buffers].start = malloc(buffer_size);
if (!buffers[n_buffers].start)
{
fprintf(stderr, “Out of memory\n”);
qDebug() << “Out of memory2”;
exit(EXIT_FAILURE);
}
}
}


### 3、alsa 录音



bool IniitAudioInfo()
{
int err = -1;
snd_pcm_hw_params_t *hwParam = NULL;
snd_pcm_uframes_t frames= 32;

snd_output_t* log;

err = snd_output_stdio_attach(&log, stdout, 0);

const char *name = “plughw:2,0”;
//if ((err = snd_pcm_open(&captureHandle, name, SND_PCM_STREAM_CAPTURE, 0)) < 0)
if ((err = snd_pcm_open(&captureHandle, “default”, SND_PCM_STREAM_CAPTURE, 0)) < 0)
{
qDebug() << “snd_pcm_open faild!” << snd_strerror(err);
return false;
}

if ((err = snd_pcm_hw_params_malloc(&hwParam)) < 0)
{
    qDebug() << "snd_pcm_hw_params_malloc faild!" << snd_strerror(err);
    return false;
}


if ((err = snd_pcm_hw_params_any(captureHandle, hwParam)) < 0)
{
    qDebug() << "snd_pcm_hw_params_any faild!" << snd_strerror(err);
    return false;
}

if ((err = snd_pcm_hw_params_set_access(captureHandle, hwParam, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0)
{
    qDebug() << "snd_pcm_hw_params_set_access faild!" << snd_strerror(err);
    return false;
}

//16位
if ((err = snd_pcm_hw_params_set_format(captureHandle, hwParam, SND_PCM_FORMAT_S16_LE)) < 0)
{
    qDebug() << "snd_pcm_hw_params_set_format faild!" << snd_strerror(err);
    return false;
}


//shuang通道
if ((err = snd_pcm_hw_params_set_channels(captureHandle, hwParam, 2)) < 0)
{
    qDebug() << "snd_pcm_hw_params_set_channels faild!" << snd_strerror(err);
    return false;
}

int dir = 0;;
unsigned int val = 48000;
//设置采样率,如果采样率不支持,会用硬件支持最接近的采样率
snd_pcm_hw_params_set_rate_near(captureHandle, hwParam,&val, &dir);

unsigned int buffer_time,period_time;
//获取最大的缓冲时间,buffer_time单位为us,500000us=0.5s
snd_pcm_hw_params_get_buffer_time_max(hwParam, &buffer_time, 0);
//printf("max_buffer_time:%d\n",buffer_time);
if ( buffer_time >500000)
    buffer_time = 500000;

//设置缓冲时间
snd_pcm_hw_params_set_buffer_time_near(captureHandle, hwParam, &buffer_time, 0);

//设置采样周期时间,计算方法38帧/秒,48000/38=1263点/帧
period_time = 26315;
snd_pcm_hw_params_set_period_time_near(captureHandle, hwParam, &period_time, 0);

//让这些参数设置到PCM设备
snd_pcm_hw_params(captureHandle, hwParam);

//这个frames并不是指帧率,而是1263采样点数/帧
snd_pcm_hw_params_get_period_size(hwParam,&frames, &dir);

// LenTotal = frames * 4 * 2;//两倍申请
int LenTotal = 12000;//两倍申请
if(m_Audiobuffer == NULL)
{
m_Audiobuffer = (unsigned char *) malloc(LenTotal);
}
if(m_Audiobuffer == NULL)
{
qDebug() << “snd_pcm_hw_params_set_channels faild!” << snd_strerror(err);
return false;
}
qDebug() << “snd_pcm_open success!”;
// int size = frames * 4;

// FILE *fp = NULL;
// int file_len = 0;

// fp = fopen(“record_dump.raw”, “w+”);
// if(fp == NULL) {
// qDebug() << “open file fail!\n”;
// exit(1);
// }
// int rc;
// int i = 1000;
// frames = 1263;
// while (i–) {
// memset(m_Audiobuffer, 0, size);
// rc = snd_pcm_readi(captureHandle, m_Audiobuffer, frames);
// // printf(“snd_pcm_readi,frames:%d, m_Audiobuffer:%s\n”,frames, m_Audiobuffer);
// if (rc == -EPIPE)
// {
// /* EPIPE means overrun */
// qDebug() << “overrun occurred\n”;
// snd_pcm_prepare(captureHandle);
// continue ;
// } else if (rc < 0) {
// qDebug() << "error from read: " << snd_strerror(rc);
// continue ;
// } else if (rc != (int)frames) {
// qDebug() << “short read, read %d frames\n”;
// continue ;
// }

// if (fp) {
// //file_len = fwrite(m_Audiobuffer, 1, frames, fp);
// file_len = fwrite(m_Audiobuffer, 1, size, fp);
// qDebug() << “fwrite:”<< file_len << " bytes \n" ; //5024字节

// } else {
// qDebug() << “%s[%d] file open fail\n” << func<< LINE;
// }
// }
// fclose(fp);
// //close(fd_f);
// snd_pcm_drain(captureHandle);
// snd_pcm_close(captureHandle);
// free(m_Audiobuffer);
return true;
}


这里注意参数的设置,并且重采样的时候采样率要保持一致 否则可能声音会不清楚



//获取数据 还需要重采样
snd_pcm_uframes_t frames= 1024;
sizeReadi = snd_pcm_readi(captureHandle, m_Audiobuffer, frames);
if(sizeReadi < 0)
{
continue;
}
memcpy(m_pinputFramebuf,m_Audiobuffer,m_inputFrameBuffsize);

    m_AudioTimePts = av_rescale_q(av_gettime()-m_first_aud_time, time_base_q, m_pAudio_st->time_base);
    m_inputFrame->pkt_pts = m_inputFrame->pts = m_AudioTimePts;

    //重采样
    AVFrame *pOutFrame = NULL;
    ret = AudioConvert(m_inputFrame, AV_SAMPLE_FMT_FLTP, 2, 48000, &pOutFrame);
    if(ret != 0)
    {
        qDebug() << "AudioConvert Fail!";
        continue;
    }

/
int32_t CManager::AudioConvert(
const AVFrame* pInFrame, // 输入音频帧
AVSampleFormat eOutSmplFmt, // 输出音频格式
int32_t nOutChannels, // 输出音频通道数
int32_t nOutSmplRate, // 输出音频采样率
AVFrame** ppOutFrame) // 输出视频帧
{
//struct SwrContext* pSwrCtx = nullptr;
AVFrame* pOutFrame = nullptr;

// 创建格式转换器,
int64_t nInChnlLayout = av_get_default_channel_layout(pInFrame->channels);
int64_t nOutChnlLayout = (nOutChannels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;

if(m_pSwrCtx == NULL)
{
    m_pSwrCtx = swr_alloc();
    if (m_pSwrCtx == nullptr)
    {
        qDebug() << "swr_alloc Fail";
        return -1;
    }
    swr_alloc_set_opts(m_pSwrCtx,
        nOutChnlLayout, eOutSmplFmt, nOutSmplRate, nInChnlLayout,
        (enum AVSampleFormat)(pInFrame->format), pInFrame->sample_rate,
        0, nullptr);

    swr_init(m_pSwrCtx);
}
// 计算重采样转换后的样本数量,从而分配缓冲区大小
int64_t nCvtBufSamples = av_rescale_rnd(pInFrame->nb_samples, nOutSmplRate, pInFrame->sample_rate, AV_ROUND_UP);

// 创建输出音频帧
pOutFrame = av_frame_alloc();
pOutFrame->format = eOutSmplFmt;
pOutFrame->nb_samples = (int)nCvtBufSamples;
pOutFrame->channel_layout = (uint64_t)nOutChnlLayout;
int res = av_frame_get_buffer(pOutFrame, 0); // 分配缓冲区

img
img

既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,涵盖了95%以上C C++开发知识点,真正体系化!

由于文件比较多,这里只是将部分目录截图出来,全套包含大厂面经、学习笔记、源码讲义、实战项目、大纲路线、讲解视频,并且后续会持续更新

如果你需要这些资料,可以戳这里获取

_init(m_pSwrCtx);
}
// 计算重采样转换后的样本数量,从而分配缓冲区大小
int64_t nCvtBufSamples = av_rescale_rnd(pInFrame->nb_samples, nOutSmplRate, pInFrame->sample_rate, AV_ROUND_UP);

// 创建输出音频帧
pOutFrame = av_frame_alloc();
pOutFrame->format = eOutSmplFmt;
pOutFrame->nb_samples = (int)nCvtBufSamples;
pOutFrame->channel_layout = (uint64_t)nOutChnlLayout;
int res = av_frame_get_buffer(pOutFrame, 0); // 分配缓冲区

[外链图片转存中…(img-LDGC1rrd-1715766814107)]
[外链图片转存中…(img-55J7vYGy-1715766814108)]

既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,涵盖了95%以上C C++开发知识点,真正体系化!

由于文件比较多,这里只是将部分目录截图出来,全套包含大厂面经、学习笔记、源码讲义、实战项目、大纲路线、讲解视频,并且后续会持续更新

如果你需要这些资料,可以戳这里获取

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值