前些日子整理了第一版语音识别,正好这两天,项目进度比较缓慢,有时间整理下第二版的语音识别,废话不多说哈,直接上代码,文末有现成的Jar,直接导入项目内,即可直接使用,我是直接下载的Jar,所以没有Gradle对应的Jar包地址
package com.mhealth.nursestation.app.pda.common;
import java.util.HashMap;
import java.util.LinkedHashMap;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.json.JSONTokener;
import android.annotation.SuppressLint;
import android.app.AlertDialog;
import android.content.Context;
import android.os.Bundle;
import android.os.Environment;
import android.view.LayoutInflater;
import android.view.MotionEvent;
import android.view.View;
import android.view.View.OnTouchListener;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.Toast;
import com.iflytek.cloud.InitListener;
import com.iflytek.cloud.RecognizerListener;
import com.iflytek.cloud.RecognizerResult;
import com.iflytek.cloud.Setting;
import com.iflytek.cloud.SpeechConstant;
import com.iflytek.cloud.SpeechError;
import com.iflytek.cloud.SpeechRecognizer;
import com.iflytek.cloud.SpeechSynthesizer;
import com.iflytek.cloud.SynthesizerListener;
import com.mhealth.nursestation.app.pda.R;
/**
* 主要的类文件
* 直接导入项目内,即可直接使用
**/
public class XFAudio {
private Context context = null;
private Button button = null;
private EditText editText = null;
private Toast mToast;
private String text = "";
private SpeechRecognizer mIat;
private SpeechSynthesizer mTts;
AlertDialog dAlertDialog;
ImageView img_voice;
private HashMap<String, String> mIatResults = new LinkedHashMap<String, String>();
/**
* 语音转文字
*
* @param context
* @param button
* 语言按钮
* @param editText
* 文本框
*/
public XFAudio(Context context, Button button, EditText editText) {
this.context = context;
this.button = button;
this.editText = editText;
regist();
}
/**
* 汉字合成语音
*
* @param context
* @param text
*/
public XFAudio(Context context) {
this.context = context;
setParam();
}
/**
* 初始化监听器。
*/
private InitListener mInitListener = new InitListener() {
@Override
public void onInit(int code) {
}
};
/**
* 汉字合成语言监听器
*/
private SynthesizerListener mTtsListener = new SynthesizerListener() {
public void onBufferProgress(int paramAnonymousInt1,
int paramAnonymousInt2, int paramAnonymousInt3,
String paramAnonymousString) {
}
public void onCompleted(SpeechError error) {
if (error != null) {
showToast(error.getPlainDescription(false));
}
}
public void onEvent(int paramAnonymousInt1, int paramAnonymousInt2,
int paramAnonymousInt3, Bundle paramAnonymousBundle) {
}
public void onSpeakBegin() {
}
public void onSpeakPaused() {
}
public void onSpeakProgress(int paramAnonymousInt1,
int paramAnonymousInt2, int paramAnonymousInt3) {
}
public void onSpeakResumed() {
}
};
/**
* 听写监听器。
*/
private RecognizerListener mRecognizerListener = new RecognizerListener() {
@Override
public void onBeginOfSpeech() {
}
@Override
public void onError(SpeechError error) {
}
@Override
public void onEndOfSpeech() {
}
@Override
public void onResult(RecognizerResult results, boolean isLast) {
printResult(results);
}
@Override
public void onVolumeChanged(int volume, byte[] data) {
SetVoicePic(volume);
}
@Override
public void onEvent(int eventType, int arg1, int arg2, Bundle obj) {
}
};
private void regist() {
this.mIat = SpeechRecognizer.createRecognizer(this.context,
mInitListener);
Setting.setShowLog(true);
mIat.setParameter(SpeechConstant.PARAMS, null);
mIat.setParameter("domain", "iat");
mIat.setParameter("language", "zh_cn");
mIat.setParameter("accent", "mandarin");
mIat.setParameter(SpeechConstant.ENGINE_TYPE, SpeechConstant.TYPE_LOCAL);
mIat.setParameter(SpeechConstant.RESULT_TYPE, "json");
mIat.setParameter(SpeechConstant.VAD_EOS, "1000");
mIat.setParameter(SpeechConstant.ASR_PTT, "0");
mIat.setParameter(SpeechConstant.AUDIO_FORMAT, "wav");
mIat.setParameter(SpeechConstant.ASR_AUDIO_PATH,
Environment.getExternalStorageDirectory() + "/msc/mms.wav");
mIat.setParameter(SpeechConstant.ASR_DWA, "关闭");
}
private void setParam() {
mTts = SpeechSynthesizer.createSynthesizer(context, mInitListener);
this.mTts.setParameter("params", null);
this.mTts.setParameter("engine_type", "local");
this.mTts.setParameter("voice_name", "xiaoyan");
this.mTts.setParameter("speed", "50");
this.mTts.setParameter("pitch", "50");
this.mTts.setParameter("volume", "90");
this.mTts.setParameter("stream_type", "3");
}
/**
* 按钮的事件,听语音
*/
public void toSay() {
button.setOnTouchListener(new OnTouchListener() {
@Override
public boolean onTouch(View v, MotionEvent event) {
if (event.getAction() == MotionEvent.ACTION_DOWN) {
ShowSpeekDialog();
editText.setText("");
mIatResults.clear();
mIat.startListening(mRecognizerListener);
}
if (event.getAction() == MotionEvent.ACTION_UP) {
DissSpeekDialog();
mIat.stopListening();
}
return false;
}
});
}
/**
* 汉字合成语音
*/
public void startSay(String text) {
mTts.startSpeaking(text, this.mTtsListener);
}
public void startSay(String text, SynthesizerListener listener) {
mTts.startSpeaking(text, listener);
}
/**
* 将json中返回的汉字解析并显示出来
*
* @param results
*/
private void printResult(RecognizerResult results) {
String text = parseIatResult(results.getResultString());
String sn = null;
try {
JSONObject resultJson = new JSONObject(results.getResultString());
sn = resultJson.optString("sn");
} catch (JSONException e) {
e.printStackTrace();
}
mIatResults.put(sn, text);
StringBuffer resultBuffer = new StringBuffer();
for (String key : mIatResults.keySet()) {
resultBuffer.append(mIatResults.get(key));
}
editText.setText(resultBuffer.toString());
}
/**
* Json结果解析类
*/
public static String parseIatResult(String json) {
StringBuffer ret = new StringBuffer();
try {
JSONTokener tokener = new JSONTokener(json);
JSONObject joResult = new JSONObject(tokener);
JSONArray words = joResult.getJSONArray("ws");
for (int i = 0; i < words.length(); i++) {
JSONArray items = words.getJSONObject(i).getJSONArray("cw");
JSONObject obj = items.getJSONObject(0);
ret.append(obj.getString("w"));
}
} catch (Exception e) {
e.printStackTrace();
}
return ret.toString();
}
/**
* 自定义toast能快速刷新
*
* @param msg
*/
public void showToast(String msg) {
if (msg != null && !"".equals(msg)) {
if (mToast == null) {
mToast = Toast.makeText(context, msg, Toast.LENGTH_SHORT);
} else {
mToast.setText(msg);
mToast.setDuration(Toast.LENGTH_SHORT);
}
mToast.show();
}
}
/**
* 显示说话声音大小的dialog
*/
@SuppressLint("NewApi")
private void ShowSpeekDialog() {
LayoutInflater inflater = LayoutInflater.from(context);
View layout = inflater.inflate(R.layout.dialog_speek, null);
AlertDialog.Builder builder = new AlertDialog.Builder(context,
R.style.SpeekDialog);
dAlertDialog = builder.create();
dAlertDialog.show();
dAlertDialog.setContentView(layout);
dAlertDialog.setCanceledOnTouchOutside(false);
img_voice = (ImageView) layout.findViewById(R.id.img_uploadlog_upload);
}
/**
* 关闭说话声音大小的dialog
*/
private void DissSpeekDialog() {
if (dAlertDialog != null && dAlertDialog.isShowing()) {
dAlertDialog.dismiss();
}
}
/**
* 显示说话声音大小dialog的图片
*/
private void SetVoicePic(int voice) {
if (voice == 0) {
img_voice.setBackgroundResource(R.drawable.spe_voice_amp0);
} else if (voice > 0 && voice <= 5) {
img_voice.setBackgroundResource(R.drawable.spe_voice_amp1);
} else if (voice > 5 && voice <= 10) {
img_voice.setBackgroundResource(R.drawable.spe_voice_amp2);
} else if (voice > 10 && voice <= 15) {
img_voice.setBackgroundResource(R.drawable.spe_voice_amp3);
} else if (voice > 15 && voice <= 20) {
img_voice.setBackgroundResource(R.drawable.spe_voice_amp4);
} else if (voice > 20) {
img_voice.setBackgroundResource(R.drawable.spe_voice_amp5);
}
}
/**
* 销毁对象
*/
public void Destroy() {
if (mIat != null) {
mIat.cancel();
mIat.destroy();
}
if (mTts != null) {
mTts.destroy();
}
}
}
都是有详细注解的,在这里我也就不过多赘述了
但是,在播放语音的时候要注意下,连续播放的时候,还是需要加个监听时间和标识位,如果正在播放,那么不可以播放,因为播放冲突会导致弹出“引擎错误”或者“内部错误”等标识。
/**
* 是否正在播放
* */
private static boolean isSpeak = false;
/**
* 播放消息
*/
private void playMessage(final SendMessageEntity sendMessage) {
SLog.Console("$$$$$$1" + isSpeak);
if (!isSpeak) {
isSpeak = true;
SLog.Console("$$$$$$2" + isSpeak);
xfaudio.startSay(sendMessage.getText().toString().trim(), new SynthesizerListener() {
@Override
public void onSpeakResumed() {
}
@Override
public void onSpeakProgress(int arg0, int arg1, int arg2) {
}
@Override
public void onSpeakPaused() {
}
@Override
public void onSpeakBegin() {
}
@Override
public void onEvent(int arg0, int arg1, int arg2, Bundle arg3) {
}
@Override
public void onCompleted(SpeechError arg0) {
SLog.Console("$$$$$$3" + isSpeak);
isSpeak = false;
}
@Override
public void onBufferProgress(int arg0, int arg1, int arg2, String arg3) {
}
});
}
}
对了 还有Jar包,差点忘记