步骤1:在科大讯飞开发者平台官网注册账号,并创建应用
步骤2:选中在线语音合成和语音听写,点击sdk下载
步骤3:解压下载包,将下载的sdk里的jar包资源等对应导入Android Studio中
你需要在Android Studio中手动创建一个jniLibs文件夹 ;复制assets文件夹到项目中
步骤5:代码部分
1、权限
<!--连接网络权限,用于执行云端语音能力 --> <uses-permission android:name="android.permission.INTERNET"/> <!--获取手机录音机使用权限,听写、识别、语义理解需要用到此权限 --> <uses-permission android:name="android.permission.RECORD_AUDIO"/> <!--读取网络信息状态 --> <uses-permission android:name="android.permission.ACCESS_NETWORK_STATE"/> <!--获取当前wifi状态 --> <uses-permission android:name="android.permission.ACCESS_WIFI_STATE"/> <!--允许程序改变网络连接状态 --> <uses-permission android:name="android.permission.CHANGE_NETWORK_STATE"/> <!--读取手机信息权限 --> <uses-permission android:name="android.permission.READ_PHONE_STATE"/>
2、功能代码
//新闻文本 private String mNewsText =""; private Gson mGson; private EditText et_tet; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); et_tet= (EditText) findViewById(R.id.et_tet); SpeechUtility.createUtility(MainActivity.this, SpeechConstant.APPID +"=5b0f6b12"); mGson = new Gson(); } public void onRecognise(View view) { //⑧申请录制音频的动态权限 if(ContextCompat.checkSelfPermission(this, android.Manifest.permission.RECORD_AUDIO) != PackageManager.PERMISSION_GRANTED){ ActivityCompat.requestPermissions(this,new String[]{ android.Manifest.permission.RECORD_AUDIO},1); }else { // startRecord(); RecognizerDialog mDialog = new RecognizerDialog(this, null); //2.设置accent、 language等参数 mDialog.setParameter(SpeechConstant.LANGUAGE, "zh_cn");//简体中文:zh_cn(默认);美式英文:en_us:poi;音乐:music mDialog.setParameter(SpeechConstant.ACCENT, "mandarin");//方言普通话:mandarin(默认);粤 语:cantonese四川话:lmz;河南话:henanese mDialog.setParameter(SpeechConstant.ASR_AUDIO_PATH, "./tts_test.pcm"); //识别完成后在本地保存一个音频文件 //若要将UI控件用于语义理解,必须添加以下参数设置,设置之后onResult回调返回将是语义理解 //结果 // mDialog.setParameter("asr_sch", "1"); // mDialog.setParameter("nlp_version", "2.0"); //3.设置回调接口 mDialog.setListener(mRecognizerDialogListener); //4.显示dialog,接收语音输入 mDialog.show(); } //1.创建RecognizerDialog对象 } private RecognizerDialogListener mRecognizerDialogListener = new RecognizerDialogListener() { /** * * @param recognizerResult 语音识别结果 * @param b true表示是标点符号 */ @Override public void onResult(RecognizerResult recognizerResult, boolean b) { // Toast.makeText(MainActivity.this, recognizerResult.getResultString(), Toast.LENGTH_LONG).show(); if (b) { return; } ResultBean resultBean = mGson.fromJson(recognizerResult.getResultString(), ResultBean.class); List<ResultBean.WsBean> ws = resultBean.getWs(); String w = ""; for (int i = 0; i < ws.size(); i++) { List<ResultBean.WsBean.CwBean> cw = ws.get(i).getCw(); for (int j = 0; j < cw.size(); j++) { w += cw.get(j).getW(); } } et_tet.setText(w); // Toast.makeText(MainActivity.this, w, Toast.LENGTH_SHORT).show(); } @Override public void onError(SpeechError speechError) { } }; public void onSynthesize(View view) { //1.创建 SpeechSynthesizer 对象, 第二个参数: 本地合成时传 InitListener mNewsText=et_tet.getText().toString(); SpeechSynthesizer mTts= SpeechSynthesizer.createSynthesizer(MainActivity.this, null); //2.合成参数设置,详见《 MSC Reference Manual》 SpeechSynthesizer 类 //设置发音人(更多在线发音人,用户可参见 附录13.2 mTts.setParameter(SpeechConstant.VOICE_NAME, "vixk"); //设置发音人 mTts.setParameter(SpeechConstant.SPEED, "40");//设置语速 mTts.setParameter(SpeechConstant.VOLUME, "80");//设置音量,范围 0~100 mTts.setParameter(SpeechConstant.ENGINE_TYPE, SpeechConstant.TYPE_CLOUD); //设置云端 //设置合成音频保存位置(可自定义保存位置),保存在“./sdcard/iflytek.pcm” //保存在 SD 卡需要在 AndroidManifest.xml 添加写 SD 卡权限 //仅支持保存为 pcm 和 wav 格式, 如果不需要保存合成音频,注释该行代码 mTts.setParameter(SpeechConstant.TTS_AUDIO_PATH, "./sdcard/iflytek.pcm"); //3.开始合成 mTts.startSpeaking(mNewsText, null); } /** * ⑨重写onRequestPermissionsResult方法 * 获取动态权限请求的结果,再开启录制音频 */ @Override public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) { if(requestCode==1&&grantResults[0]== PackageManager.PERMISSION_GRANTED){ RecognizerDialog mDialog = new RecognizerDialog(this, null); //2.设置accent、 language等参数 mDialog.setParameter(SpeechConstant.LANGUAGE, "zh_cn"); mDialog.setParameter(SpeechConstant.ACCENT, "mandarin"); //若要将UI控件用于语义理解,必须添加以下参数设置,设置之后onResult回调返回将是语义理解 //结果 // mDialog.setParameter("asr_sch", "1"); // mDialog.setParameter("nlp_version", "2.0"); //3.设置回调接口 mDialog.setListener(mRecognizerDialogListener); //4.显示dialog,接收语音输入 mDialog.show(); }else { Toast.makeText(this,"用户拒绝了权限",Toast.LENGTH_SHORT).show(); } super.onRequestPermissionsResult(requestCode, permissions, grantResults); }
ResultBean
public class ResultBean { public static final String TAG = "ResultBean"; /** * sn : 1 * ls : true * bg : 0 * ed : 0 * ws : [{"bg":0,"cw":[{"w":"今天","sc":0}]},{"bg":0,"cw":[{"w":"的","sc":0}]},{"bg":0,"cw":[{"w":"天气","sc":0}]},{"bg":0,"cw":[{"w":"怎么样","sc":0}]},{"bg":0,"cw":[{"w":"。 ","sc":0}]}] */ private int sn; private boolean ls; private int bg; private int ed; /** * bg : 0 * cw : [{"w":"今天","sc":0}] */ private List<WsBean> ws; public int getSn() { return sn; } public void setSn(int sn) { this.sn = sn; } public boolean isLs() { return ls; } public void setLs(boolean ls) { this.ls = ls; } public int getBg() { return bg; } public void setBg(int bg) { this.bg = bg; } public int getEd() { return ed; } public void setEd(int ed) { this.ed = ed; } public List<WsBean> getWs() { return ws; } public void setWs(List<WsBean> ws) { this.ws = ws; } public static class WsBean { private int bg; /** * w : 今天 * sc : 0 */ private List<CwBean> cw; public int getBg() { return bg; } public void setBg(int bg) { this.bg = bg; } public List<CwBean> getCw() { return cw; } public void setCw(List<CwBean> cw) { this.cw = cw; } public static class CwBean { private String w; private int sc; public String getW() { return w; } public void setW(String w) { this.w = w; } public int getSc() { return sc; } public void setSc(int sc) { this.sc = sc; } } }
RecognizerDialog报空指针
下面的两句话不添加(看了一个博客说空指针问题是少加这个,就没在意这两句话,最后发现去掉这两句话好了
/* ndk { //选择要添加的对应cpu类型的.so库。 abiFilters 'x86', 'armeabi', 'armeabi-v7a', 'armeabi-v8a' }*/