AudioRenderer开发音频播放功能(ArkTS)

console.error(`Invoke createAudioRenderer failed, code is ${err.code}, message is ${err.message}`);
return;

} else {
console.info(‘Invoke createAudioRenderer succeeded.’);
let audioRenderer = data;
}
});


2. 调用start()方法进入running状态,开始渲染音频。



import { BusinessError } from ‘@ohos.base’;

audioRenderer.start((err: BusinessError) => {
if (err) {
console.error(Renderer start failed, code is ${err.code}, message is ${err.message});
} else {
console.info(‘Renderer start success.’);
}
});


3. 指定待渲染文件地址,打开文件调用write()方法向缓冲区持续写入音频数据进行渲染播放。如果需要对音频数据进行处理以实现个性化的播放,在写入之前操作即可。



import fs from ‘@ohos.file.fs’;

let context = getContext(this);
async function read() {
const bufferSize: number = await audioRenderer.getBufferSize();
let path = context.filesDir;

const filePath = path + ‘/voice_call_data.wav’;
let file: fs.File = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
let buf = new ArrayBuffer(bufferSize);
let readsize: number = await fs.read(file.fd, buf);
let writeSize: number = await audioRenderer.write(buf);
}


4. 调用stop()方法停止渲染。



import { BusinessError } from ‘@ohos.base’;

audioRenderer.stop((err: BusinessError) => {
if (err) {
console.error(Renderer stop failed, code is ${err.code}, message is ${err.message});
} else {
console.info(‘Renderer stopped.’);
}
});


5. 调用release()方法销毁实例,释放资源。



import { BusinessError } from ‘@ohos.base’;

audioRenderer.release((err: BusinessError) => {
if (err) {
console.error(Renderer release failed, code is ${err.code}, message is ${err.message});
} else {
console.info(‘Renderer released.’);
}
});


#### 完整示例


下面展示了使用AudioRenderer渲染音频文件的示例代码。



import audio from ‘@ohos.multimedia.audio’;
import fs from ‘@ohos.file.fs’;

const TAG = ‘AudioRendererDemo’;

let context = getContext(this);
let renderModel: audio.AudioRenderer | undefined = undefined;
let audioStreamInfo: audio.AudioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率
channels: audio.AudioChannel.CHANNEL_2, // 通道
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式
}
let audioRendererInfo: audio.AudioRendererInfo = {
usage: audio.StreamUsage.STREAM_USAGE_MUSIC, // 音频流使用类型
rendererFlags: 0 // 音频渲染器标志
}
let audioRendererOptions: audio.AudioRendererOptions = {
streamInfo: audioStreamInfo,
rendererInfo: audioRendererInfo
}

// 初始化,创建实例,设置监听事件
async function init() {
audio.createAudioRenderer(audioRendererOptions, (err, renderer) => { // 创建AudioRenderer实例
if (!err) {
console.info(${TAG}: creating AudioRenderer success);
renderModel = renderer;
if (renderModel !== undefined) {
(renderModel as audio.AudioRenderer).on(‘stateChange’, (state: audio.AudioState) => { // 设置监听事件,当转换到指定的状态时触发回调
if (state == 2) {
console.info(‘audio renderer state is: STATE_RUNNING’);
}
});
(renderModel as audio.AudioRenderer).on(‘markReach’, 1000, (position: number) => { // 订阅markReach事件,当渲染的帧数达到1000帧时触发回调
if (position == 1000) {
console.info(‘ON Triggered successfully’);
}
});
}
} else {
console.info(${TAG}: creating AudioRenderer failed, error: ${err.message});
}
});
}

// 开始一次音频渲染
async function start() {
if (renderModel !== undefined) {
let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
if (stateGroup.indexOf((renderModel as audio.AudioRenderer).state.valueOf()) === -1) { // 当且仅当状态为prepared、paused和stopped之一时才能启动渲染
console.error(TAG + ‘start failed’);
return;
}
await (renderModel as audio.AudioRenderer).start(); // 启动渲染

const bufferSize = await (renderModel as audio.AudioRenderer).getBufferSize();

let path = context.filesDir;
const filePath = path + '/test.wav'; // 使用沙箱路径获取文件,实际路径为/data/storage/el2/base/haps/entry/files/test.wav

let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
let stat = await fs.stat(filePath);
let buf = new ArrayBuffer(bufferSize);
let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
class Options {
  offset: number = 0;
  length: number = 0
}
for (let i = 0; i < len; i++) {
  let options: Options = {
    offset: i * bufferSize,
    length: bufferSize
  };
  let readsize = await fs.read(file.fd, buf, options);
  
  // buf是要写入缓冲区的音频数据,在调用AudioRenderer.write()方法前可以进行音频数据的预处理,实现个性化的音频播放功能,AudioRenderer会读出写入缓冲区的音频数据进行渲染
  
  let writeSize: number = await (renderModel as audio.AudioRenderer).write(buf);
    if ((renderModel as audio.AudioRenderer).state.valueOf() === audio.AudioState.STATE_RELEASED) { // 如果渲染器状态为released,关闭资源
    fs.close(file);
  }
  if ((renderModel as audio.AudioRenderer).state.valueOf() === audio.AudioState.STATE_RUNNING) {
    if (i === len - 1) { // 如果音频文件已经被读取完,停止渲染
      fs.close(file);
      await (renderModel as audio.AudioRenderer).stop();
    }
  }
}

}
}

// 暂停渲染
async function pause() {
if (renderModel !== undefined) {
// 只有渲染器状态为running的时候才能暂停
if ((renderModel as audio.AudioRenderer).state.valueOf() !== audio.AudioState.STATE_RUNNING) {
console.info(‘Renderer is not running’);
return;
}
await (renderModel as audio.AudioRenderer).pause(); // 暂停渲染
if ((renderModel as audio.AudioRenderer).state.valueOf() === audio.AudioState.STATE_PAUSED) {
console.info(‘Renderer is paused.’);
} else {
console.error(‘Pausing renderer failed.’);
}
}
}

// 停止渲染
async function stop() {
if (renderModel !== undefined) {
// 只有渲染器状态为running或paused的时候才可以停止
if ((renderModel as audio.AudioRenderer).state.valueOf() !== audio.AudioState.STATE_RUNNING && (renderModel as audio.AudioRenderer).state.valueOf() !== audio.AudioState.STATE_PAUSED) {
console.info(‘Renderer is not running or paused.’);
return;
}
await (renderModel as audio.AudioRenderer).stop(); // 停止渲染
if ((renderModel as audio.AudioRenderer).state.valueOf() === audio.AudioState.STATE_STOPPED) {
console.info(‘Renderer stopped.’);
} else {
console.error(‘Stopping renderer failed.’);
}
}
}

// 销毁实例,释放资源
async function release() {
if (renderModel !== undefined) {
// 渲染器状态不是released状态,才能release
if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info(‘Renderer already released’);
return;
}
await renderModel.release(); // 释放资源
if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
console.info(‘Renderer released’);
} else {
console.error(‘Renderer release failed.’);
}
}
}


当同优先级或高优先级音频流要使用输出设备时,当前音频流会被中断,应用可以自行响应中断事件并做出处理。具体的音频并发处理方式可参考[多音频播放的并发策略](/openharmony/docs/blob/master/zh-cn/application-dev/media/audio-playback-concurrency.md)。


**为了能让大家更好的学习鸿蒙(HarmonyOS NEXT)开发技术,这边特意整理了《鸿蒙开发学习手册》(共计890页),希望对大家有所帮助:[`https://qr21.cn/FV7h05`]( )**


#### 《鸿蒙开发学习手册》:[`https://qr21.cn/FV7h05`]( )


**入门必看:[`https://qr21.cn/FV7h05`]( )**


1. 应用开发导读(ArkTS)
2. ……


![](https://img-blog.csdnimg.cn/img_convert/eab12aefc653b953c5f677e4be4e7a98.webp?x-oss-process=image/format,png)


**HarmonyOS 概念:[`https://qr21.cn/FV7h05`]( )**


1. 系统定义
2. 技术架构
3. 技术特性
4. 系统安全


![](https://img-blog.csdnimg.cn/img_convert/07faa31ade475b8c5afbb4d4b74e7c04.webp?x-oss-process=image/format,png)


**如何快速入门:[`https://qr21.cn/FV7h05`]( )**


1. 基本概念
2. 构建第一个ArkTS应用
3. 构建第一个JS应用
4. ……


![](https://img-blog.csdnimg.cn/img_convert/ccf8d7bc210bffd0d22ed3f462a887e7.webp?x-oss-process=image/format,png)


**开发基础知识:[`https://qr21.cn/FV7h05`]( )**


1. 应用基础知识
2. 配置文件
3. 应用数据管理
4. 应用安全管理
5. 应用隐私保护
6. 三方应用调用管控机制
7. 资源分类与访问
8. 学习ArkTS语言
9. ……


![](https://img-blog.csdnimg.cn/img_convert/23ebda4e6ccfdab179a8be2f256079ac.webp?x-oss-process=image/format,png)


**基于ArkTS 开发:[`https://qr21.cn/FV7h05`]( )**


1. Ability开发
2. UI开发
3. 公共事件与通知
4. 窗口管理
5. 媒体
6. 安全
7. 网络与链接
8. 电话服务
9. 数据管理
10. 后台任务(Background Task)管理
11. 设备管理
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值