ffmpeg集成阿里语音识别和rocket服务

ffmpeg3.4.6
阿里sdk文件:http://download.taobaocdn.com/freedom/33762/compress/NlsSdkCpp2.zip?spm=a2c4g.11186623.2.15.7eb371a972MFE2&file=NlsSdkCpp2.zip
rocketmq:在我其他博文中有介绍https://blog.csdn.net/qq_37003193/article/details/91984714
#include <unistd.h>
#include <pthread.h>
#include stdlib.h>
#include string.h>
#include ctime>
#include map>
#include string>
#include iostream>
#include vector>
#include fstream>
#include queue>
#include list>
#include “speechTranscriberSyncRequest.h”
#include “nlsClient.h”
#include “nlsEvent.h”
#include “nlsCommonSdk/Token.h”
#include “mediaAudioDetect.h”
#include “rocketmq/CProducer.h”
#include “rocketmq/CMessage.h”
#include “rocketmq/CSendResult.h”
#include “mediaAudioDetect.h”
extern “C” // 因为FFmpeg是纯C程序
{
// FFmpeg libraries
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
};
using namespace std;

#define FRAME_SIZE 3200
#define SAMPLE_RATE 16000

using std::map;
using std::string;
using std::vector;
using std::cout;
using std::endl;
using std::ifstream;
using std::ios;

using namespace AlibabaNlsCommon;

using AlibabaNls::NlsClient;
using AlibabaNls::NlsEvent;
using AlibabaNls::LogDebug;
using AlibabaNls::LogInfo;
using AlibabaNls::AudioDataStatus;
using AlibabaNls::AUDIO_FIRST;
using AlibabaNls::AUDIO_MIDDLE;
using AlibabaNls::AUDIO_LAST;
using AlibabaNls::SpeechTranscriberSyncRequest;

/**

  • 全局维护一个服务鉴权token和其对应的有效期时间戳,
  • 每次调用服务之前,首先判断token是否已经过期,
  • 如果已经过期,则根据AccessKey ID和AccessKey Secret重新生成一个token,并更新这个全局的token和其有效期时间戳。
  • 注意:不要每次调用服务之前都重新生成新token,只需在token即将过期时重新生成即可。所有的服务并发可共用一个token。
    */

string g_akId = “LTAIM8t6rJhVf0a7”;
string g_akSecret = “7fSeIEw2y4L7WIt9w1qZ0LP8PmO5Je”;
string g_token = “”;
long g_expireTime = -1;

// 自定义线程参数
struct ParamStruct {
string fileName;
string token;
string appkey;
};
void StartSendMessage(CProducer *producer, string inMsg)
{
CSendResult result;
// create message and set some values for it
CMessage *msg = CreateMessage(“Test_Topic”);
SetMessageTags(msg, “Test_Tag”);
SetMessageKeys(msg, “Test_Keys”);
// construct different body
string strMessageBody = inMsg;
SetMessageBody(msg, strMessageBody.c_str());
// send message
SendMessageSync(producer, msg, &result);
cout << “send message[” << “], result status:” << (int)result.sendStatus << “, msgBody:” << strMessageBody << endl;
// destroy message
DestroyMessage(msg);
}

void sendmsg(string m_message)
{
cout << “Producer Initializing…” << endl;
// create producer and set some values for it
CProducer *producer = CreateProducer(“avtr-producer”);
SetProducerNameServerAddress(producer, “192.168.2.217:9876”);
// start producer
StartProducer(producer);
cout << “Producer start…” << endl;
// send message
string crateMsg = m_message;
StartSendMessage(producer, crateMsg);
// shutdown producer
ShutdownProducer(producer);
// destroy producer
DestroyProducer(producer);
cout << “Producer Shutdown!” << endl;
}

/**

  • 根据AccessKey ID和AccessKey Secret重新生成一个token,并获取其有效期时间戳
    /
    int generateToken(string akId, string akSecret, string
    token, long* expireTime) {
    NlsToken nlsTokenRequest;
    nlsTokenRequest.setAccessKeyId(akId);
    nlsTokenRequest.setKeySecret(akSecret);

    if (-1 == nlsTokenRequest.applyNlsToken()) {
    cout << "Failed: " << nlsTokenRequest.getErrorMsg() << endl; /获取失败原因/
    return -1;
    }

    *token = nlsTokenRequest.getToken();
    *expireTime = nlsTokenRequest.getExpireTime();

    return 0;
    }

/**
* @brief 获取sendAudio发送延时时间
* @param dataSize 待发送数据大小
* @param sampleRate 采样率 16k/8K
* @param compressRate 数据压缩率,例如压缩比为10:1的16k opus编码,此时为10;非压缩数据则为1
* @return 返回sendAudio之后需要sleep的时间
* @note 对于8k pcm 编码数据, 16位采样,建议每发送1600字节 sleep 100 ms.
对于16k pcm 编码数据, 16位采样,建议每发送3200字节 sleep 100 ms.
对于其它编码格式的数据, 用户根据压缩比, 自行估算, 比如压缩比为10:1的 16k opus,
需要每发送3200/10=320 sleep 100ms.
*/
unsigned int getSendAudioSleepTime(const int dataSize,
const int sampleRate,
const int compressRate) {
// 仅支持16位采样
const int sampleBytes = 16;
// 仅支持单通道
const int soundChannel = 1;
// 当前采样率,采样位数下每秒采样数据的大小
int bytes = (sampleRate * sampleBytes * soundChannel) / 8;
// 当前采样率,采样位数下每毫秒采样数据的大小
int bytesMs = bytes / 1000;
// 待发送数据大小除以每毫秒采样数据大小,以获取sleep时间
int sleepMs = (dataSize * compressRate) / bytesMs;
return sleepMs;
}

// 工作线程
void* pthreadFunc(void* arg) {
int sleepMs = 0;
// 0: 从自定义线程参数中获取token, 配置文件等参数.
ParamStruct* tst = (ParamStruct*)arg;
if (tst == NULL) {
cout << “arg is not valid.” << endl;
return NULL;
}
/*
* 创建实时音频流同步识别SpeechTranscriberSyncRequest对象.
* request对象在一个会话周期内可以重复使用.
* 会话周期是一个逻辑概念. 比如Demo中, 指读取, 发送完整个音频文件数据的时间.
* 音频文件数据发送结束时, 可以releaseTranscriberSyncRequest()释放对象.
* createTranscriberSyncRequest(), sendSyncAudio(), getTranscriberResult(), releaseTranscriberSyncRequest()请在
* 同一线程内完成, 跨线程使用可能会引起异常错误.
/
/

* 1: 创建实时音频流识别SpeechTranscriberSyncRequest对象
/
SpeechTranscriberSyncRequest
request = NlsClient::getInstance()->createTranscriberSyncRequest();
if (request == NULL) {
cout << “createTranscriberSyncRequest failed.” << endl;
return NULL;
}
request->setAppKey(tst->appkey.c_str()); // 设置AppKey, 必填参数, 请参照官网申请
request->setFormat(“pcm”); // 设置音频数据编码格式, 默认是pcm
request->setSampleRate(SAMPLE_RATE); // 设置音频数据采样率, 可选参数,目前支持16000, 8000. 默认是16000
request->setIntermediateResult(true); // 设置是否返回中间识别结果, 可选参数. 默认false
request->setPunctuationPrediction(true); // 设置是否在后处理中添加标点, 可选参数. 默认false
request->setInverseTextNormalization(true); // 设置是否在后处理中执行数字转写, 可选参数. 默认false
//语音断句检测阈值,一句话之后静音长度超过该值,即本句结束,合法参数范围200~2000(ms),默认值800ms
// request->setMaxSentenceSilence(800);
// request->setCustomizationId(“TestId_123”); //定制模型id, 可选.
// request->setVocabularyId(“TestId_456”); //定制泛热词id, 可选.
request->setToken(tst->token.c_str()); // 设置账号校验token, 必填参数 d
av_register_all();
avformat_network_init();
// get format from audio file
AVFormatContext* format = avformat_alloc_context();
// open input stream
if (avformat_open_input(&format, tst->fileName.c_str(), NULL, NULL) != 0) {
return NULL;
}
// find input stream
if (avformat_find_stream_info(format, NULL) < 0) {
return NULL;
}
// Find the index of the first audio stream
int stream_index =- 1;
for (int i=0; inb_streams; i++) {
if (format->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
stream_index = i;
break;
}
}
if (stream_index == -1) {
return NULL;
}
AVStream* stream = format->streams[stream_index];
AVCodecContext pCodecCtx;
AVCodec pCodec;
pCodecCtx=format->streams[stream_index]->codec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL){
printf(“Codec not found.\n”);
}
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
printf(“Could not open codec.\n”);
}
// prepare resampler
struct SwrContext
swr = swr_alloc();
av_opt_set_int(swr, “in_channel_count”, pCodecCtx->channels, 0);
av_opt_set_int(swr, “out_channel_count”, 1, 0);
av_opt_set_int(swr, “in_channel_layout”, pCodecCtx->channel_layout, 0);
av_opt_set_int(swr, “out_channel_layout”, AV_CH_LAYOUT_MONO, 0);
av_opt_set_int(swr, “in_sample_rate”, pCodecCtx->sample_rate, 0);
av_opt_set_int(swr, “out_sample_rate”, SAMPLE_RATE, 0);
av_opt_set_sample_fmt(swr, “in_sample_fmt”, pCodecCtx->sample_fmt, 0);
av_opt_set_sample_fmt(swr, “out_sample_fmt”, AV_SAMPLE_FMT_S16, 0);
swr_init(swr);
if (!swr_is_initialized(swr)) {
fprintf(stderr, “Resampler has not been properly initialized\n”);
return NULL;
}
cout << "sample: " << pCodecCtx->sample_fmt << "channel_count: " << pCodecCtx->channels
<< "rate: " << pCodecCtx->sample_rate << “channel_layout” << pCodecCtx->channel_layout << endl;
// prepare to read data
AVPacket packet;
av_init_packet(&packet);
AVFrame
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, “Error allocating the frame\n”);
return NULL;
}
// iterate through frames
char data = NULL;
int retSize;
int sentSize =0;
while (av_read_frame(format, &packet) >= 0)
{
// decode one frame
if (packet.stream_index != stream_index)
{
continue;
}
int gotFrame;
if (avcodec_decode_audio4(pCodecCtx, frame, &gotFrame, &packet) < 0) {
break;
}
cout << “hello1” << endl;
if (!gotFrame) {
continue;
}
// resample frames
int out_nb_samples;
out_nb_samples = av_rescale_rnd(swr_get_delay(swr, pCodecCtx->sample_rate) +
AV_SAMPLE_FMT_S16, SAMPLE_RATE, pCodecCtx->sample_rate, AV_ROUND_UP);
cout << "out_nb_samples: " << out_nb_samples << endl;
uint8_t
buffer;
av_samples_alloc((uint8_t**) &buffer, NULL, 1, out_nb_samples, AV_SAMPLE_FMT_S16, 0);
int frame_count = swr_convert(swr, (uint8_t**) &buffer, out_nb_samples,
(const uint8_t**) frame->data, frame->nb_samples);
fprintf(stderr, “%d %d\n”, frame_count, frame->nb_samples);
// append resampled frames to data
// int nlen = frame->nb_samples * sizeof(char);
data = (char*) realloc(data, frame->nb_samples * sizeof(char));
memcpy(data, buffer, frame_count * sizeof(char));
AudioDataStatus status;
if (sentSize == 0) {
status = AUDIO_FIRST; // 发送第一块音频数据
}else
{
status = AUDIO_MIDDLE;
}
/*
* 2: 发送音频数据. sendAudio返回-1表示发送失败, 可在getTranscriberResult函数中获得失败的具体信息.
* 对于第四个参数: format为opu(发送原始音频数据必须为PCM, FRAME_SIZE大小必须为640)时, 需设置为true. 其它格式默认使用false.
/
int size = frame_count;
sentSize = size+sentSize;
retSize = request->sendSyncAudio(data, size, status);
/

语音数据发送控制:
语音数据是实时的, 不用sleep控制速率, 直接发送即可.
语音数据来自文件, 发送时需要控制速率, 使单位时间内发送的数据大小接近单位时间原始语音数据存储的大小.
/
if (retSize > 0) {
cout << “sendSyncAudio:” << retSize << endl;
sleepMs = getSendAudioSleepTime(retSize, SAMPLE_RATE, 1); // 根据 发送数据大小,采样率,数据压缩比 来获取sleep时间
}
// sleepMs = getSendAudioSleepTime(size, SAMPLE_RATE, 1);
/

* 3: 语音数据发送延时控制
/
#ifdef _WIN32
Sleep(sleepMs);
#else
usleep(sleepMs * 1000);
#endif
/

* 4: 获取识别结果
* 接收到EventType为TaskFailed, closed, completed事件类型时,停止发送数据
* 部分错误可收到多次TaskFailed事件,只要发生TaskFailed事件,请停止发送数据
/
bool isFinished = false;
string msg;
std::queue eventQueue;
request->getTranscriberResult(&eventQueue);
while (!eventQueue.empty()) {
NlsEvent _event = eventQueue.front();
eventQueue.pop();
NlsEvent::EventType type = _event.getMsgType();
switch (type)
{
case NlsEvent::TranscriptionStarted:
cout << "
********* Transcriber started " << endl;
break;
case NlsEvent::SentenceBegin:
cout << "
Detected sentence begin " << endl;
cout << "sentence index: " << _event.getSentenceIndex() << endl;
cout << "sentence time: " << _event.getSentenceTime() << endl;
break;
case NlsEvent::TranscriptionResultChanged:
cout << "
Transcriber has sentence middle result " << endl;
cout << "sentence index: " << _event.getSentenceIndex() << endl;
cout << "sentence time: " << _event.getSentenceTime() << endl;
cout << "result: " << _event.getResult() << endl;
msg = _event.getResult();
sendmsg(msg);
break;
case NlsEvent::SentenceEnd:
cout << "
Detected sentence end " << endl;
cout << "sentence index: " << _event.getSentenceIndex() << endl;
cout << "sentence time: " << _event.getSentenceTime() << endl;
cout << "sentence begin time: " << _event.getSentenceBeginTime() << endl;
cout << "sentence confidence: " << _event.getSentenceConfidence() << endl;
cout << "result: " << _event.getResult() << endl;
msg = _event.getResult();
sendmsg(msg);
break;
case NlsEvent::TranscriptionCompleted:
cout << "
Transcriber completed " << endl;
isFinished = true;
break;
case NlsEvent::TaskFailed:
cout << "
TaskFailed " << endl;
isFinished = true;
break;
case NlsEvent::Close:
cout << "
Closed ************" << endl;
isFinished = true;
break;
default:
break;
}
cout << "allMessage: " << _event.getAllResponse() << endl;
}
if (isFinished) {
break;
}
}
// 关闭音频文件
//fclose(file);
av_frame_free(&frame);
swr_free(&swr);
avcodec_close(pCodecCtx);
avformat_free_context(format);
cout << “close file” << endl;
/

* 5: 识别结束, 释放request对象
*/
NlsClient::getInstance()->releaseTranscriberSyncRequest(request);
return NULL;
}

/**

  • 识别单个音频数据
    /
    int speechTranscriberFile(const char
    appkey, string filepath) {
    /**

    • 获取当前系统时间戳,判断token是否过期
      */
      std::time_t curTime = std::time(0);
      if (g_expireTime - curTime < 10) {
      cout << “the token will be expired, please generate new token by AccessKey-ID and AccessKey-Secret.” << endl;
      if (-1 == generateToken(g_akId, g_akSecret, &g_token, &g_expireTime)) {
      return -1;
      }
      }
      ParamStruct pa;
      pa.token = g_token;
      pa.appkey = appkey;
      pa.fileName = filepath;

    pthread_t pthreadId;

    // 启动一个工作线程, 用于识别
    pthread_create(&pthreadId, NULL, &pthreadFunc, (void *)&pa);

    pthread_join(pthreadId, NULL);

    return 0;

}

void mediaAudioDetect::audioDetect()
{
string appkey = “2IOzi7tifFUa5USy”;
// 根据需要设置SDK输出日志, 可选. 此处表示SDK日志输出至log-Transcriber.txt, LogDebug表示输出所有级别日
int ret = NlsClient::getInstance()->setLogConfig(“log-transcriber.txt”, LogInfo);
if (-1 == ret) {
cout << “set log failed.” << endl;
}
// 识别单个音频数据
speechTranscriberFile(appkey.c_str(), m_videoFile);
// 识别多个音频数据
// speechTranscriberMultFile(appkey.c_str());
// 所有工作完成,进程退出前,释放nlsClient. 请注意, releaseInstance()非线程安全.
NlsClient::releaseInstance();
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值