Unity接微软认知服务语音合成(Azure-TTS)
程序员文章站
2022-04-30 09:25:22
...
上一篇我介绍了如何接入微软认知服务的语音识别,这篇文章我将为大家介绍如何接入微软认知服务的语音合成(以下简称语音合成)。语音合成中也需要用到YourSubscriptionKey和YourServiceRegion,获取方式同语音识别,这两个**与语音识别是通用的,所用到的SDK与语音识别的也是一样。首先我们还是要新建一个场景,如下图所示
然后新建脚本,命名为MicroSoftTTSDemo,然后编辑脚本,将以下内容粘贴到脚本中
using UnityEngine;
using UnityEngine.UI;
using Microsoft.CognitiveServices.Speech;
using System;
using System.IO;
using System.Collections;
using Microsoft.CognitiveServices.Speech.Audio;
using System.Threading.Tasks;
using UnityEngine.Networking;
public class MicroSoftTTSDemo : MonoBehaviour
{
// Hook up the three properties below with a Text, InputField and Button object in your UI.
public Text outputText;
public InputField inputField;
public Button speakButton;
public AudioSource audioSource;
private object threadLocker = new object();
private bool waitingForSpeak;
private string message;
public string path;
public string fileName;
public string audioType;
public DateTime startTime;
public DateTime endTime;
public Text timeText;
void Start()
{
if (outputText == null)
{
UnityEngine.Debug.LogError("outputText property is null! Assign a UI Text element to it.");
}
else if (inputField == null)
{
message = "inputField property is null! Assign a UI InputField element to it.";
UnityEngine.Debug.LogError(message);
}
else if (speakButton == null)
{
message = "speakButton property is null! Assign a UI Button to it.";
UnityEngine.Debug.LogError(message);
}
else
{
// Continue with normal initialization, Text, InputField and Button objects are present.
//inputField.text = "Enter text you wish spoken here.";
//message = "Click button to synthesize speech";
speakButton.onClick.AddListener(ButtonClick);
}
}
void Update()
{
lock (threadLocker)
{
if (speakButton != null)
{
speakButton.interactable = !waitingForSpeak;
}
if (outputText != null)
{
outputText.text = message;
}
}
}
public void ButtonClick()
{
startTime = DateTime.Now;
// Creates an instance of a speech config with specified subscription key and service region.
// Replace with your own subscription key and service region (e.g., "westus").
var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");
// Creates a speech synthesizer.
// Make sure to dispose the synthesizer after use!
using (var synthsizer = new SpeechSynthesizer(config, null))
{
lock (threadLocker)
{
waitingForSpeak = true;
}
// Starts speech synthesis, and returns after a single utterance is synthesized.
var result = synthsizer.SpeakTextAsync(inputField.text).Result;
//print("after " + DateTime.Now);
// Checks result.
string newMessage = string.Empty;
if (result.Reason == ResultReason.SynthesizingAudioCompleted)
{
// Since native playback is not yet supported on Unity yet (currently only supported on Windows/Linux Desktop),
// use the Unity API to play audio here as a short term solution.
// Native playback support will be added in the future release.
var sampleCount = result.AudioData.Length / 2;
var audioData = new float[sampleCount];
for (var i = 0; i < sampleCount; ++i)
{
audioData[i] = (short)(result.AudioData[i * 2 + 1] << 8 | result.AudioData[i * 2]) / 32768.0F;
}
// The default output audio format is 16K 16bit mono
var audioClip = AudioClip.Create("SynthesizedAudio", sampleCount, 1, 16000, false);
audioClip.SetData(audioData, 0);
audioSource.clip = audioClip;
audioSource.Play();
endTime = DateTime.Now;
timeText.text = "合成前时间:" + startTime + " 合成成功时间:" + endTime;
newMessage = "Speech synthesis succeeded!";
}
else if (result.Reason == ResultReason.Canceled)
{
var cancellation = SpeechSynthesisCancellationDetails.FromResult(result);
newMessage = $"CANCELED:\nReason=[{cancellation.Reason}]\nErrorDetails=[{cancellation.ErrorDetails}]\nDid you update the subscription info?";
}
lock (threadLocker)
{
message = newMessage;
waitingForSpeak = false;
}
}
}
}
保存脚本后点击运行,在InputFeild中输入英文文本,然后点击Button,即可播放你输入的英文。至此语音合成就已经完成。微软语音合成的发音与真人发音一模一样,不会有机械化的感觉。
下篇文章将为大家介绍微软语音SDK接好后打包所遇到的坑
上一篇: smokeping