海思3536输出PCM多路混音

由于项目需求,需要将进行多路混音的功能。

对于海思音频库的处理程序

//初始化
int AudioInit(AIO_MODE_E enWorkmode)
{
	HI_S32      s32Ret = HI_SUCCESS;
	ADEC_CHN 	AdChn = 0; 
	AUDIO_DEV	AoDev = 0;
	
	ADEC_CHN_ATTR_S stAdecAttr;
	ADEC_ATTR_AAC_S stAdecAac;
	
	stAdecAttr.enType = PT_AAC;
	stAdecAttr.u32BufSize = 20;
	stAdecAttr.enMode = ADEC_MODE_STREAM;/* propose use pack mode in your app */
	stAdecAttr.pValue = &stAdecAac;
	stAdecAttr.enMode = ADEC_MODE_STREAM;	/* aac should be stream mode */
	stAdecAac.enTransType = AAC_TRANS_TYPE_ADTS;

	
	/* create adec chn*/
	s32Ret = HI_MPI_ADEC_CreateChn(AdChn, &stAdecAttr);
	if (s32Ret)
	{
		printf("%s: HI_MPI_ADEC_CreateChn(%d) failed with %#x!\n", __FUNCTION__,AdChn,s32Ret);
		return s32Ret;
	}

    AIO_ATTR_S stAioAttr;
    stAioAttr.enSamplerate   = 44100; // 输出采样率可以是32k, 44.1k或者48k
    stAioAttr.enBitwidth     = AUDIO_BIT_WIDTH_16;
    stAioAttr.enWorkmode     = enWorkmode; 
    stAioAttr.enSoundmode 	 = AUDIO_SOUND_MODE_STEREO;
    
    stAioAttr.u32EXFlag      = 1;
    stAioAttr.u32FrmNum      = MAX_AUDIO_FRAME_NUM;
    stAioAttr.u32PtNumPerFrm = 1024;
    stAioAttr.u32ChnCnt      = 2; //立体声模式需要设置为2
    stAioAttr.u32ClkChnCnt   = 1;
    stAioAttr.u32ClkSel      = 0;


    if (stAioAttr.u32ClkChnCnt == 0)
    {
        stAioAttr.u32ClkChnCnt = stAioAttr.u32ChnCnt;
    }
    
	//A0属性设置
    s32Ret = HI_MPI_AO_SetPubAttr(AoDev, &stAioAttr);
    if (HI_SUCCESS != s32Ret)
    {
        printf("%sAoDev HI_MPI_AO_SetPubAttr(%d) failed with %#x!\n", __FUNCTION__, AoDev, s32Ret);
        return HI_FAILURE;
    }

	//使能AO
    s32Ret = HI_MPI_AO_Enable(AoDev);
    if (HI_SUCCESS != s32Ret)
    {
        printf("%s: HI_MPI_AO_Enable(%d) failed with %#x!\n", __FUNCTION__, AoDev, s32Ret);
        return HI_FAILURE;
    }

	//使能AO通道
    s32Ret = HI_MPI_AO_EnableChn(AoDev, 0);
    if (HI_SUCCESS != s32Ret)
    {
        printf("%s: HI_MPI_AO_EnableChn(0) failed with %#x!\n", __FUNCTION__, s32Ret);
        return HI_FAILURE;
    }    	
    
}

//ADEC发送帧函数
int AdecSendFrame(unsigned int dwAdeChn, unsigned long long u64FrameNo, unsigned char * pbyBuffer, unsigned int dwDataLen,HI_BOOL bBlock)
{
	HI_S32      s32Ret = HI_SUCCESS;
	AUDIO_STREAM_S stAudioStream;  
	
	stAudioStream.pStream = pbyBuffer;
	stAudioStream.u32Len = dwDataLen;
	stAudioStream.u64TimeStamp = u64FrameNo;
	
	s32Ret = HI_MPI_ADEC_SendStream(dwAdeChn, &stAudioStream, bBlock);
	if(HI_SUCCESS != s32Ret)
    {
        printf("%s: HI_MPI_ADEC_SendStream(%d) failed with %#x!\n",\
               __FUNCTION__, dwAdeChn, s32Ret);
    }
	
	return s32Ret;
}

//ADEC获取帧函数
int GetAdecEncodedFrame(unsigned int dwAencChn,AUDIO_FRAME_INFO_S *ptFrmInfo,HI_BOOL bBlock)
{

	HI_S32 s32Ret = HI_SUCCESS;

	if(ptFrmInfo == NULL)
	{
		return HI_FAILURE;

	}
	
	s32Ret = HI_MPI_ADEC_GetFrame(dwAencChn, ptFrmInfo, bBlock);
	if (s32Ret != HI_SUCCESS && s32Ret != HI_ERR_AENC_BUF_EMPTY)
	{
		printf("Failed to get frame from ADEC, s32Ret: %#x\n", s32Ret);
		return s32Ret;
	}
	
	return HI_SUCCESS;
}

//释放ADEC帧函数
void ReleaseADECFrame(unsigned int dwAencChn,AUDIO_FRAME_INFO_S *ptFrmInfo)
{
	HI_MPI_ADEC_ReleaseFrame(dwAencChn,ptFrmInfo);
}

//A0发送帧函数
int MPP_SendAudioFrameToAo(short *pLeftPcmBuffer,
                              short *pRightPcmBuffer,
                              int length,int sdwTimeoutMs)
{
	AUDIO_DEV	AoDev = 0;

    AUDIO_FRAME_S aoFrame;
    aoFrame.enBitwidth = AUDIO_BIT_WIDTH_16;
    aoFrame.enSoundmode = AUDIO_SOUND_MODE_STEREO;
    aoFrame.pVirAddr[0] = pLeftPcmBuffer;
    aoFrame.pVirAddr[1] = pRightPcmBuffer;
    aoFrame.u32Len =  length;
    HI_S32 ret = HI_MPI_AO_SendFrame(AoDev,
                                     0,
                                     &aoFrame,
                                     sdwTimeoutMs);
    if (ret != HI_SUCCESS)
    {
        printf("HI_MPI_AO_SendFrame %#x\n", ret);
        return ret;
    }
    return HI_SUCCESS;
}

混音处理代码:
PCM多路混音算法参考:https://blog.csdn.net/daska110/article/details/80322696

#define AUDIO_DATA_TYPE short       
                                    
#define AUDIO_DATA_TYPE_MAX (32767)   // 2^15(short)
#define AUDIO_DATA_TYPE_MIN (-32768)

#define WIDEN_TEMP_TYPE int         // 4字节有符号的中间变量,用于混音时防止溢出

#define AUDIO_MAX_CHN (32)

typedef int SDWORD

#include <math.h>
#include <vector>
#include <mutex>


using namespace std;

class AudioPcmMix
{
public:

    ~AudioPcmMix();

    static AudioPcmMix* GetInstance(void);

    // 叠加,然后归一化
    void AddAndNormalization(vector< vector<AUDIO_DATA_TYPE> >&   allMixingSounds,
        DWORD                           dwRawDataCnt,
        vector<AUDIO_DATA_TYPE>&        rawDataBuffer);


    SDWORD PcmMixHandle();

    void DelAudioChnID(SDWORD sdwAudioChnID);
    void AddAudioChnID(SDWORD sdwAudioChnID);
    

private:
    AudioPcmMix();

public:  
    vector<AUDIO_DATA_TYPE>        m_leftRawDataBuffer;
    vector<AUDIO_DATA_TYPE>        m_rightRawDataBuffer;
    SDWORD m_sdwFrameLen;

private:
    vector<SDWORD> m_audioChnIDList;
    std::mutex m_audioMutex;
    SDWORD m_getFrameRet[AUDIO_MAX_CHN];	
    
};
AudioPcmMix::AudioPcmMix()  :m_sdwFrameLen(0),m_audioChnIDList(AUDIO_MAX_CHN,-1)
{

}

AudioPcmMix::~AudioPcmMix()
{
   
}

AudioPcmMix* AudioPcmMix::GetInstance(void)
{
    static AudioPcmMix mixStance;

    return &mixStance;
}

void AudioPcmMix::AddAndNormalization(vector< vector<AUDIO_DATA_TYPE> >&   allMixingSounds,
    DWORD                           dwRawDataCnt,
    vector<AUDIO_DATA_TYPE>&        rawDataBuffer)
{
    WIDEN_TEMP_TYPE Sum = 0;                                    // 用更大的范围来表示(用有符号的int,而不要用无符号的DWORD)
    double decayFactor = 1;                                     // 衰减因子(防止溢出)

    for (int i = 0; i < dwRawDataCnt; ++i)
    {
        Sum = 0;                                                // 复位叠加的值
        for (int wavNum = 0; wavNum < allMixingSounds.size(); ++wavNum)
        {
            Sum += allMixingSounds[wavNum][i];
        }
        Sum *= decayFactor;                                     // 将衰减因子作用在叠加的音频上

                                                                // 计算衰减因子
                                                                // 1. 叠加之后,会溢出,计算溢出的倍数(即衰减因子)
        if (Sum > AUDIO_DATA_TYPE_MAX)
        {
            decayFactor = static_cast<double>(AUDIO_DATA_TYPE_MAX) / static_cast<double>(Sum);  // 算大了,就用小数0.8衰减
            Sum = AUDIO_DATA_TYPE_MAX;
        }
        else if (Sum < AUDIO_DATA_TYPE_MIN)
        {
            decayFactor = static_cast<double>(AUDIO_DATA_TYPE_MIN) / static_cast<double>(Sum);  // 算小了,就用大数1.2增加
            Sum = AUDIO_DATA_TYPE_MIN;
        }

        // 2. 衰减因子的平滑(为了防止个别点偶然的溢出)
        if (decayFactor < 1)
        {
            decayFactor += static_cast<double>(1 - decayFactor) / static_cast<double>(32);
        }

        rawDataBuffer.push_back(AUDIO_DATA_TYPE(Sum));      // 把int再强制转换回为short
    }
}

void AudioPcmMix::DelAudioChnID(SDWORD sdwAudioChnID)
{  
	m_audioMutex.lock();
     
    for(auto it = m_audioChnIDList.begin();it != m_audioChnIDList.end();it++)
    {
        if(*it == sdwAudioChnID)
        {
            m_getFrameRet[sdwAudioChnID] = -1;
            //丢弃这一帧的数据
            m_sdwFrameLen = 0;
            m_audioChnIDList.erase(it);
            break;
        }    
    }
    
    m_audioMutex.unlock();
}

void AudioPcmMix::AddAudioChnID(SDWORD sdwAudioChnID)
{   

	m_audioMutex.lock();
    

    for(auto it = m_audioChnIDList.begin();it != m_audioChnIDList.end();it++)
    {
        if(*it == sdwAudioChnID)
        {
            m_audioMutex.unlock();
            return;
        }    
    }
    
    m_audioChnIDList.push_back(sdwAudioChnID);
    
    m_audioMutex.unlock();
}



SDWORD AudioPcmMix::PcmMixHandle()
{
    m_sdwFrameLen = 0;
    
    //数据处理时加锁
	m_audioMutex.lock()
    
    if(m_audioChnIDList.size() == 0)
    {       
        m_audioMutex.unlock();
        return -1;
    }
    
    memset(m_getFrameRet,-1,AUDIO_MAX_CHN);

    AUDIO_FRAME_INFO_S stFrmInfo[AUDIO_MAX_CHN];
    vector< vector<AUDIO_DATA_TYPE> >   allMixingSounds;
 
    m_leftRawDataBuffer.clear();
    m_rightRawDataBuffer.clear();

    for (auto &it : m_audioChnIDList)
    {
        SDWORD sdwChnID = it;
        if( (sdwChnID < 0) || (sdwChnID >= AUDIO_MAX_CHN))
        {
           continue; 
        }

        printf("sdwChnID = %d size() %d\n",sdwChnID,m_audioChnIDList.size());
		
		//需要进行阻塞处理,否则混的声音将不对
        m_getFrameRet[sdwChnID] = GetAdecEncodedFrame(sdwChnID, stFrmInfo + sdwChnID, Block);

        printf("=========== channel[%d] get frame finish: %#x================", sdwChnID, m_getFrameRet[sdwChnID]);
    
        if (m_getFrameRet[sdwChnID] == 0)
        {
            printf("channel[%d] frame len: %#x\n", sdwChnID, stFrmInfo[sdwChnID].pstFrame->u32Len);
            
            if ((m_sdwFrameLen == 0) || (m_sdwFrameLen > stFrmInfo[sdwChnID].pstFrame->u32Len))
            {
                m_sdwFrameLen   = stFrmInfo[sdwChnID].pstFrame->u32Len; 
            } 
            
        }    
    }
    
    for (auto &it : m_audioChnIDList)
    {
        SDWORD sdwChnID = it;

        if( (sdwChnID < 0) || (sdwChnID >= AUDIO_MAX_CHN) )
        {
           continue; 
        }

        if ((m_getFrameRet[sdwChnID] == 0))
        {           
            AUDIO_DATA_TYPE *tmp = (AUDIO_DATA_TYPE *)stFrmInfo[sdwChnID].pstFrame->pVirAddr[0];
            if(tmp != nullptr)
            {
                allMixingSounds.push_back(vector<AUDIO_DATA_TYPE>(tmp,tmp + m_sdwFrameLen/sizeof(AUDIO_DATA_TYPE)));						      
            }
            else
            {
                printf("Get Left Frame Data Error\n");
            }   
        }
    } 

    //左声道进行混音
    AddAndNormalization(allMixingSounds,m_sdwFrameLen/2,m_leftRawDataBuffer);

    allMixingSounds.clear();

    for (auto &it : m_audioChnIDList)
    {
        SDWORD sdwChnID = it;

        if( (sdwChnID < 0) || (sdwChnID >= AUDIO_MAX_CHN) )
        {
           continue; 
        }

        if ((m_getFrameRet[sdwChnID] == 0))
        { 
            AUDIO_DATA_TYPE *tmp = (AUDIO_DATA_TYPE *)stFrmInfo[sdwChnID].pstFrame->pVirAddr[1];
            						
            if(tmp != nullptr)
            {
                allMixingSounds.push_back(vector<AUDIO_DATA_TYPE>(tmp,tmp + m_sdwFrameLen/sizeof(AUDIO_DATA_TYPE)));						      
            }
            else
            {
                printf("Get Right Frame Data Error\n");
            }
        }
    }

    //右声道进行混音
    AddAndNormalization(allMixingSounds,m_sdwFrameLen/2,m_rightRawDataBuffer);

    for (auto &it : m_audioChnIDList)
    {
        SDWORD sdwChnID = it;

        if( (sdwChnID < 0) || (sdwChnID >= AUDIO_MAX_CHN) )
        {
            continue; 
        }

        if ((m_getFrameRet[sdwChnID] != 0))
        {    
            continue;
        }
        
        //释放获取的帧
        ReleaseADECFrame(sdwChnID, stFrmInfo + sdwChnID);
    }
    
    
    m_audioMutex.unlock();

	return 0;
}

混音处理线程:


void AudioPcmMixThread(void)
{
    prctl(PR_SET_NAME,"AudioPcmMixThread");
    SDWORD sdwTimeoutMs = 0;
    SDWORD sdwRet = -1;

    while(1)
    {            
     	//获取PCM混音数据
        if( AudioPcmMix::GetInstance()->PcmMixHandle() == 0)
        {      
        	//将PCM数据发送到AO
            if(AudioPcmMix::GetInstance()->m_sdwFrameLen > 0)
            {
                sdwRet = SendAudioFrameToAo(AudioPcmMix::GetInstance()->m_leftRawDataBuffer.data(),AudioPcmMix::GetInstance()->m_rightRawDataBuffer.data(),AudioPcmMix::GetInstance()->m_sdwFrameLen,sdwTimeoutMs);
                           
            }   

            usleep(1*1000);               
        }
        else
        {
            sleep(1);
        }  
    } 
}

由于我们项目使用的是AAC音频进行数据传输,因此需要使用ADEC库将AAC解码为PCM,然后在将PCM混音,最后发送到A0中去,实现多路混音输出的目的。

在程序当中,对于每个AAC的音频连接都创建一个线程进行处理,将收到的AAC数据,通过AdecSendFrame进行发送解码,然后在混音处理线程当中统一通过GetAdecEncodedFrame取出解码后的PCM数据,再进行多路混音处理。

  • 3
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值