Android实现语音合成与识别功能
程序员文章站
2022-05-14 15:22:21
android语音合成与语音识别,供大家参考,具体内容如下
这里调用科大讯飞语音的api,语音云开放平台介绍
调用科大讯飞语音的api,需要加添库文件msc.jar,添...
android语音合成与语音识别,供大家参考,具体内容如下
这里调用科大讯飞语音的api,语音云开放平台介绍
调用科大讯飞语音的api,需要加添库文件msc.jar,添加libmsc.so文件,还需添加权限,具体步骤可参看sdk里的文档
参看开发的文档写了一个简单的语音合成和识别demo,图示如下
在edittext里输入文字,点击语音合成,可以实现文字转化为语音
点击语音合成,输入语音,识别的文字以提示的形式显示,并且显示在edittext中
主要代码如下,注意appid需要自己申请
package com.example.voice; import java.util.arraylist; import android.os.bundle; import android.app.activity; import android.view.menu; import android.view.view; import android.view.view.onclicklistener; import android.widget.button; import android.widget.edittext; import android.widget.toast; import com.iflytek.cloud.speech.*; public class voiceactivity extends activity { private static final string appid = "appid=52cddb99"; private edittext et = null; private button btn1 = null; private button btn2 = null; string text = ""; string temp=""; protected void oncreate(bundle savedinstancestate) { super.oncreate(savedinstancestate); setcontentview(r.layout.activity_voice); et = (edittext) findviewbyid(r.id.et); btn1 = (button) findviewbyid(r.id.btn1); btn1.setonclicklistener(mylistener); btn2 = (button) findviewbyid(r.id.btn2); btn2.setonclicklistener(mylistener); } private onclicklistener mylistener = new onclicklistener() { public void onclick(view v) { speechuser.getuser().login(voiceactivity.this, null, null, appid, loginlistener); button btn = (button) v; switch (btn.getid()) { case r.id.btn1: speechsynthesizer mspeechsynthesizer = speechsynthesizer .createsynthesizer(voiceactivity.this); mspeechsynthesizer.setparameter(speechconstant.voice_name, "xiaoyu"); mspeechsynthesizer.setparameter(speechconstant.speed, "50"); mspeechsynthesizer.startspeaking(et.gettext().tostring(), msynlistener); break; case r.id.btn2: text = ""; temp=""; speechrecognizer recognizer = speechrecognizer .createrecognizer(voiceactivity.this); recognizer.setparameter(speechconstant.domain, "iat"); recognizer.setparameter(speechconstant.language, "zh_cn"); recognizer.setparameter(speechconstant.accent, "accent"); recognizer.startlistening(mrecolistener); break; } } }; private synthesizerlistener msynlistener = new synthesizerlistener() { public void onbufferprogress(int arg0, int arg1, int arg2, string arg3) { } public void oncompleted(speecherror arg0) { } public void onspeakbegin() { } public void onspeakpaused() { } public void onspeakprogress(int arg0, int arg1, int arg2) { } public void onspeakresumed() { } }; private recognizerlistener mrecolistener = new recognizerlistener() { public void onbeginofspeech() { } public void onendofspeech() { } public void onerror(speecherror error) { } public void onevent(int arg0, int arg1, int arg2, string arg3) { } public void onvolumechanged(int arg0) { } public void onresult(recognizerresult results, boolean islast) { //将解析后的字符串连在一起 temp=results.getresultstring(); jsonparser json = new jsonparser(); text+=json.parseiatresult(temp); if(islast==true) { et.settext(text, null); toast.maketext(voiceactivity.this,text,toast.length_long).show(); } } }; private speechlistener loginlistener = new speechlistener() { public void oncompleted(speecherror arg0) { } public void ondata(byte[] arg0) { } public void onevent(int arg0, bundle arg1) { } }; public boolean oncreateoptionsmenu(menu menu) { // inflate the menu; this adds items to the action bar if it is present. getmenuinflater().inflate(r.menu.voice, menu); return true; } }
布局文件
<tablelayout xmlns:android="http://schemas.android.com/apk/res/android" xmlns:tools="http://schemas.android.com/tools" android:layout_width="match_parent" android:layout_height="match_parent" android:paddingbottom="@dimen/activity_vertical_margin" android:paddingleft="@dimen/activity_horizontal_margin" android:paddingright="@dimen/activity_horizontal_margin" android:paddingtop="@dimen/activity_vertical_margin" tools:context=".voiceactivity" > <textview android:layout_width="wrap_content" android:layout_height="wrap_content" android:gravity="center" android:text="voiceapplication" /> <edittext android:id="@+id/et" android:layout_width="fill_parent" android:layout_height="wrap_content" android:gravity="top" android:layout_weight="0.32" /> <button android:id="@+id/btn1" android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_weight="0.03" android:text="语音合成" /> <button android:id="@+id/btn2" android:layout_width="wrap_content" android:layout_height="wrap_content" android:layout_weight="0.03" android:text="语音识别" /> </tablelayout>
解析json格式的数据是参照讯飞的文档中的
package com.example.voice; import org.json.jsonarray; import org.json.jsonobject; import org.json.jsontokener; import android.text.textutils; //import com.iflytek.speech.errorcode; //import com.iflytek.speech.speecherror; /** * 对云端返回的json结果进行解析 * * @author iflytek * @since 20131211 */ public class jsonparser { /** * 听写结果的json格式解析 * * @param json * @return */ public static string parseiatresult(string json) { if (textutils.isempty(json)) return ""; stringbuffer ret = new stringbuffer(); try { jsontokener tokener = new jsontokener(json); jsonobject joresult = new jsonobject(tokener); jsonarray words = joresult.getjsonarray("ws"); for (int i = 0; i < words.length(); i++) { // 听写结果词,默认使用第一个结果 jsonarray items = words.getjsonobject(i).getjsonarray("cw"); jsonobject obj = items.getjsonobject(0); ret.append(obj.getstring("w")); // 如果需要多候选结果,解析数组其他字段 // for(int j = 0; j < items.length(); j++) // { // jsonobject obj = items.getjsonobject(j); // ret.append(obj.getstring("w")); // } } } catch (exception e) { e.printstacktrace(); } return ret.tostring(); } }
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。
下一篇: vue实现路由懒加载及组件懒加载的方式