本人小试牛刀,试验了一下用c#.net3.0 WPF技术开发了一个语音识别程序,
windows.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Shapes;
using System.Reflection;
using System.Windows.Threading;
using System.IO;
using System.Xml;
using System.Collections.ObjectModel;
using System.ComponentModel;
using System.Speech.Recognition;
using System.Speech.Recognition.SrgsGrammar;
using System.Speech.Synthesis;
namespace speechReco
{
/// <summary>
/// Interaction logic for Window1.xaml
/// </summary>
public partial class Window1 : System.Windows.Window
{
private SpeechRecognizer sharedRecognizer;
private SpeechRecognitionEngine appRecognizer;
private SrgsDocument sdCmnrules;
public Window1()
{
InitializeComponent();
sharedRecognizer = new SpeechRecognizer();
sharedRecognizer.AudioLevelUpdated += new EventHandler<AudioLevelUpdatedEventArgs>(sharedRecognizer_AudioLevelUpdated);
sharedRecognizer.AudioSignalProblemOccurred += new EventHandler<AudioSignalProblemOccurredEventArgs>(sharedRecognizer_AudioSignalProblemOccurred);
sharedRecognizer.AudioStateChanged += new EventHandler<AudioStateChangedEventArgs>(sharedRecognizer_AudioStateChanged);
sharedRecognizer.EmulateRecognizeCompleted += new EventHandler<EmulateRecognizeCompletedEventArgs>(sharedRecognizer_EmulateRecognizeCompleted);
sharedRecognizer.LoadGrammarCompleted += new EventHandler<LoadGrammarCompletedEventArgs>(sharedRecognizer_LoadGrammarCompleted);
sharedRecognizer.RecognizerUpdateReached += new EventHandler<RecognizerUpdateReachedEventArgs>(sharedRecognizer_RecognizerUpdateReached);
sharedRecognizer.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(sharedRecognizer_SpeechDetected);
sharedRecognizer.SpeechHypothesized += new EventHandler<SpeechHypothesizedEventArgs>(sharedRecognizer_SpeechHypothesized);
sharedRecognizer.SpeechRecognitionRejected += new EventHandler<SpeechRecognitionRejectedEventArgs>(sharedRecognizer_SpeechRecognitionRejected);
sharedRecognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(sharedRecognizer_SpeechRecognized);
sharedRecognizer.StateChanged += new EventHandler<System.Speech.Recognition.StateChangedEventArgs>(sharedRecognizer_StateChanged);
//load SRGS library
byte[] ba = speechReco.Properties.Resources.cmnrules;
MemoryStream ms = new MemoryStream(ba);
ms.Position = 0;
XmlReader xr = XmlReader.Create(ms);
sdCmnrules = new SrgsDocument(xr);
//populate ComboBox
foreach(SrgsRule rule in sdCmnrules.Rules)
{
if (rule.Scope == SrgsRuleScope.Public)
{
cbRules.Items.Add(rule.Id);
}
}
//default to integer rule
cbRules.SelectedValue = "integer";
cbRules.SelectionChanged += new SelectionChangedEventHandler(cbRules_SelectionChanged);
this.btnSharedColor.Click += new RoutedEventHandler(btnSharedColor_Click);
this.btnInProcColor.Click += new RoutedEventHandler(btnInProcColor_Click);
this.btnTapDictation.PreviewMouseLeftButtonDown += new MouseButtonEventHandler(btnTapDictation_PreviewMouseLeftButtonDown);
this.btnTapDictation.PreviewMouseLeftButtonUp += new MouseButtonEventHandler(btnTapDictation_PreviewMouseLeftButtonUp);
this.btnSrgs.Click += new RoutedEventHandler(btnSrgs_Click);
this.btnAdvGrammarBuilder.Click += new RoutedEventHandler(btnAdvGrammarBuilder_Click);
this.btnWavFile.Click += new RoutedEventHandler(btnWavFile_Click);
this.btnSynthPhonemes.Click += new RoutedEventHandler(btnSynthPhonemes_Click);
this.btnEnable.Click += new RoutedEventHandler(btnEnable_Click);
this.btnDisable.Click += new RoutedEventHandler(btnDisable_Click);
this.btnUnload.Click += new RoutedEventHandler(btnUnload_Click);
this.btnEmulate.Click += new RoutedEventHandler(btnEmulate_Click);
}
void btnEmulate_Click(object sender, RoutedEventArgs e)
{
//sharedRecognizer.EmulateRecognize("green");
sharedRecognizer.EmulateRecognizeAsync("green");
//sharedRecognizer.EmulateRecognize("stop listening");
}
void btnUnload_Click