源码链接:
https://gitee.com/open-ascend/atlas_mindxsdk_samples/tree/master/contrib/cv/data_process/video_transcoding
一、使用live555搭建 rtsp server
(1)安装live555
wget http://www.live555.com/liveMedia/public/live555-latest.tar.gz
tar xzf live555-latest.tar.gz
cd live
./genMakefiles linux-64bit #注意后面这个参数是根据当前文件夹下config.<后缀>获取得到的
make
(2)启动rtsp server
cd mediaServer
./live555MediaServer
(3)验证rtsp server是否正常
下面上传文件到 live555MediaServer 同级目录
[root@localhost mediaServer]#
[root@localhost mediaServer]# ls
COPYING DynamicRTSPServer.cpp DynamicRTSPServer.o live555MediaServer.cpp Makefile Makefile.tail video.264
COPYING.LESSER DynamicRTSPServer.hh live555MediaServer live555MediaServer.o Makefile.head version.hh
[root@localhost mediaServer]#
我上传了一个 video.264 那么,使用http协议访问的地址就行 http://你的ip:8000/文件名
例:http://127.0.0.1:8000/video.264;
使用rtsp协议访问的地址 http://你的ip/文件名
例:rtsp://127.0.0.1/video.264
然后请准备一个流媒体播放器,我下载的是vlc播放器
输入测试地址:
输入测试地址后点击播放,播放的时候能看到视频,说明rtsp server功能正常。
二、安装昇腾驱动
先安装昇腾驱动,昇腾驱动请参考各个产品安装手册,安装完成后npu-smi info 显示安装成功
[root@localhost ~]#
[root@localhost ~]# npu-smi info
+-------------------------------------------------------------------------------------------------+
| npu-smi 22.0.2 Version: 22.0.2 |
+------------------+--------------+---------------------------------------------------------------+
| NPU Name | Health | Power(W) Temp(C) Hugepages-Usage(page) |
| Chip Device | Bus-Id | AICore(%) Memory-Usage(MB) |
+==================+==============+===============================================================+
| 1 310 | OK | 12.8 45 0 / 0 |
| 0 0 | 0000:05:00.0 | 0 2621 / 8192 |
+==================+==============+===============================================================+
三、安装MindX SDK > mxVision
(1)MindX SDK需要通过官网获取。
(2)mxVision说明手册:
https://www.hiascend.com/document/detail/zh/mind-sdk/30rc3/quickstart/visionquickstart/visionquickstart_0000.html
(3)安装MindX SDK
./Ascend-mindxsdk-mxvision_3.0.RC2_linux-aarch64.run --install --install-path=/usr/local/sdk_home
–install-path为指定安装的路径
(4)安装成功后会提示如下信息
Installing collected packages:mindx
Successfully installed mindx-3.0.RC2
(5)安装成功后在对应目录下查看,能看到mxVision
[root@localhost sdk_home]#
[root@localhost sdk_home]# pwd
/usr/local/sdk_home
[root@localhost sdk_home]# ls
mxVision mxVision-3.0.RC2
[root@localhost sdk_home]#
[root@localhost sdk_home]#
(6)MindX SDK使用中需要用到OSD功能,安装后需要执行以下命令,生成om文件
bash /usr/local/sdk_home/mxVision/operators/opencvosd/generate_osd_om.sh
执行成功后,显示如下效果
[root@localhost ~]# bash /usr/local/sdk_home/mxVision/operators/opencvosd/generate_osd_om.sh
ASCEND_HOME is set to /usr/local/Ascend by user
Set ASCEND_VERSION to the default value:ascend-toolkit/latest
ATC start working now,please wait for a moment.
ATC run success, welcome to the next use.
The model has been successfully converted to om,please get it under /usr/local/sdk_home/mxVision/operators/opencvosd.
[root@localhost ~]#
(9)安装完MindX SDK后,需要配置环境变量
.bashrc文件添加以下环境变量
# 安装mxVision时配置
. /usr/local/sdk_home/mxVision/set_env.sh
用户也可以通过修改~/.bashrc文件方式设置永久环境变量,操作如下:
a) 以运行用户在任意目录下执行vi ~/.bashrc命令,打开.bashrc文件,在文件最后一行后面添加上述内容。
b) 执行:wq!命令保存文件并退出。
c) 执行source ~/.bashrc命令使其立即生效。
四、使用video_transcoding
1、修改run_cpp.sh中MX_SDK_HOME为MindX SDK安装目录
export MX_SDK_HOME=/usr/local/sdk_home/mxVision
2、修改data/pipeline/test.pipeline中rtspUrl,rtspUrl修改为拉取视频流的链接
"rtspUrl": "rtsp://127.0.0.1/video.264"
3、修改run_cpp.sh权限
chmod +x run_cpp.sh
4、执行run_cpp.sh
./run_cpp.sh
5、目录下会生成out.h264
[root@localhost video_transcoding]#
[root@localhost video_transcoding]# ls
README.md build cpp data main out.h264 run_cpp.sh
[root@localhost video_transcoding]#
五、video_transcoding详解
1、技术流程图
视频解码:调用硬件(DVPP)解码能力,转换为 YUV 格式图像数据。
图像缩放:基于硬件(DVPP)图像加速处理能力,将图像缩放到一定尺寸大小。
视频编码:调用硬件(DVPP)编码能力,将 YUV 格式的图像数据转换为 H264 视频流。
2、pipeline详解
{
"encoder": {
"stream_config": { ##设置业务流在哪个芯片上处理
"deviceId": "0"
},
"mxpi_rtspsrc0": { ##拉取视频流
"props": {
"rtspUrl": "rtsp://127.0.0.1/video.264",
"channelId": "0"
},
"factory": "mxpi_rtspsrc",
"next": "queue0"
},
"queue0":{ ##队列缓存
"props":{
"max-size-buffers":"50"
},
"factory":"queue",
"next": "mxpi_videodecoder0"
},
"mxpi_videodecoder0": { ##视频解码(纯硬件)
"props": {
"inputVideoFormat": "H264",
"outputImageFormat": "YUV420SP_NV12",
"vdecChannelId": "0"
},
"factory": "mxpi_videodecoder",
"next": "queue1"
},
"queue1":{ ##队列缓存
"props":{
"max-size-buffers":"50"
},
"factory":"queue",
"next": "mxpi_imageresize0"
},
"mxpi_imageresize0": { ##视频缩放(纯硬件)
"props": {
"dataSource": "mxpi_videodecoder0",
"resizeHeight": "288",
"resizeWidth": "352"
},
"factory": "mxpi_imageresize",
"next": "queue2"
},
"queue2":{ ##队列缓存
"props":{
"max-size-buffers":"50"
},
"factory":"queue",
"next": "mxpi_videoencoder0"
},
"mxpi_videoencoder0": { ##视频编码(纯硬件)
"props": {
"dataSource": "mxpi_imageresize0",
"imageHeight": "288",
"imageWidth": "352",
"inputFormat": "YUV420SP_NV12",
"outputFormat": "H264",
"fps": "1",
"iFrameInterval": "50"
},
"factory": "mxpi_videoencoder",
"next": "queue3"
},
"queue3":{ ##队列缓存
"props":{
"max-size-buffers":"50"
},
"factory":"queue",
"next": "appsink0"
},
"appsink0": { ##视频流输出
"factory": "appsink"
}
}
}
3、源码详解
namespace {
std::string ReadPipelineConfig(const std::string& pipelineConfigPath) // 读取pipeline配置文件
{
std::ifstream file(pipelineConfigPath.c_str(), std::ifstream::binary);
if (!file) {
LogError << pipelineConfigPath <<" file dose not exist.";
return "";
}
file.seekg(0, std::ifstream::end);
uint32_t fileSize = file.tellg();
file.seekg(0);
std::unique_ptr<char[]> data(new char[fileSize]);
file.read(data.get(), fileSize);
file.close();
std::string pipelineConfig(data.get(), fileSize);
return pipelineConfig;
}
}
int main(int argc, char* argv[])
{
// 读取pipeline配置文件
std::string pipelineConfigPath = "data/pipeline/test.pipeline";
std::string pipelineConfig = ReadPipelineConfig(pipelineConfigPath);
if (pipelineConfig == "") {
LogError << "Read pipeline failed.";
return APP_ERR_COMM_INIT_FAIL;
}
// 初始化 stream manager 资源
MxStream::MxStreamManager mxStreamManager;
APP_ERROR ret = mxStreamManager.InitManager();
if (ret != APP_ERR_OK) {
LogError << "Failed to init Stream manager, ret = " << ret << ".";
return ret;
}
// 根据指定的pipeline配置创建Stream
ret = mxStreamManager.CreateMultipleStreams(pipelineConfig);
if (ret != APP_ERR_OK) {
LogError << "Failed to create Stream, ret = " << ret << ".";
return ret;
}
// 创建空的h264文件
FILE *fp = fopen("./out.h264", "wb");
if (fp == nullptr) {
LogError << "Failed to open file.";
return APP_ERR_COMM_OPEN_FAIL;
}
bool m_bFoundFirstIDR = false;
bool bIsIDR = false;
uint32_t frameCount = 0;
uint32_t MaxframeCount = 1000;
std::string streamName = "encoder";
int inPluginId = 0;
while (1) {
// 获取视频编码后的视频帧
MxStream::MxstDataOutput* output = mxStreamManager.GetResult(streamName, inPluginId);
if (output == nullptr) {
LogError << "Failed to get pipeline output.";
return ret;
}
// H264视频格式,第一帧写入必须是IDR帧
bIsIDR = (output->dataSize > 1);
if(!m_bFoundFirstIDR)
{
if(!bIsIDR) {
continue;
} else {
m_bFoundFirstIDR = true;
}
}
// 把视频编码后的数据帧写入h264文件
if (fwrite(output->dataPtr, output->dataSize, 1, fp) != 1) {
LogInfo << "write frame to file fail";
}
frameCount++;
if (frameCount > MaxframeCount) {
LogInfo << "write frame to file done";
break;
}
delete output;
}
// 关闭h264文件
fclose(fp);
// 销毁Streams
mxStreamManager.DestroyAllStreams();
return 0;
}