C# Visual Studio 2012 Speech to textBox1 -
i got problem speech api. i'm working commands, thats working fine, when want speech seen in textbox1 wont show it.
this code need with. im working switch case. tried several if statement, none works.
case "listen": ai.speak("i listening"); textbox1.text = textbox1.text + " " + e.result.text.tostring(); break;
each time listen, 'listen' comes visible in textbox
here full code:
using system; using system.collections.generic; using system.componentmodel; using system.data; using system.drawing; using system.linq; using system.text; using system.threading.tasks; using system.threading; using system.windows.forms; using system.speech.recognition; using system.speech.synthesis; using system.io; using system.xml; using system.web; using windowsmicrophonemutelibrary; using tweetsharp; /* * * * * * */ namespace test { public partial class form1 : form { speechrecognitionengine srecognizer = new speechrecognitionengine(); speechsynthesizer ai = new speechsynthesizer(); datetime = datetime.now; random rnd = new random(); windowsmicmute micmute = new windowsmicmute(); twitterservice twitter = new twitterservice("--", "--", "--", "--"); //string qevent; //string procwindow; //double timer = 10; //int count = 1; public form1() { initializecomponent(); } private void form1_load(object sender, eventargs e) { srecognizer.setinputtodefaultaudiodevice(); srecognizer.loadgrammar(new grammar(new grammarbuilder(new choices(file.readalllines(@"d:\bibliotheek\mijn documenten\commands.txt"))))); srecognizer.speechrecognized += new eventhandler<speechrecognizedeventargs>(rspeechrecognized); srecognizer.recognizeasync(recognizemode.multiple); // laad commands bij start-up string[] commands = (file.readalllines(@"d:\bibliotheek\mijn documenten\commands.txt")); lstcommands.items.clear(); lstcommands.selectionmode = selectionmode.none; foreach (string command in commands) { lstcommands.items.add(command); } } void rspeechrecognized(object sender, speechrecognizedeventargs e) { int rannum = rnd.next(1, 10); string speech = e.result.text; switch (speech) { // groeten case "hello": // als "hello" wordt ingesproken in de microfoon if (rannum <= 3) { ai.speak("hello sir"); // als random nummer < 5 = "hello sir" } else if (rannum >= 4 && rannum <= 6) { ai.speak("greetings"); // als random nummer >= 5 = "greetings" } else if (rannum >= 7) { ai.speak("good day you"); } break; case "ai": // als "ai" wordt ingesproken in de microfoon if (rannum <= 4) { ai.speak("yes sir"); // als random nummer < 5 = "yes sir" } else if (rannum >= 5) { ai.speak("yes?"); // als random nummer >= 5 = "yes?" } break; // sluit case "exit program": // als "exit program" wordt ingesproken in de microfoon ai.speak("until next time"); this.close(); // applicatie wordt gesloten break; // websites case "open google": // als "open google" wordt ingesproken in de microfoon system.diagnostics.process.start("http://www.google.nl"); // google wordt geopend break; case "open youtube": // als "open youtube" wordt ingesproken in de microfoon system.diagnostics.process.start("https://www.youtube.com/feed/subscriptions"); // youtube wordt geopend break; case "open tweakers": // als "tweakers" wordt ingesproken in de microfoon system.diagnostics.process.start("http://tweakers.net/"); // tweakers wordt geopend break; // programma's case "run guild wars": // als "run guild wars" wordt ingesproken in de microfoon system.diagnostics.process.start("d:\\entertainment\\guild wars 2\\gw2.exe"); // guild wars 2 wordt geopend ai.speak("loading program"); break; // gegevens van de dag case "whats time": // als "what time it" wordt ingesproken in de microfoon ai.speak(datetime.now.tostring("hh:mm")); // tijd van de dag wordt verteld break; case "whats day": // als "what day it" wordt ingesproken in de microfoon ai.speak(datetime.today.tostring("dddd")); // dag van vandaag wordt verteld break; case "whats date": // als "whats date" wordt ingesproken in de microfoon ai.speak(datetime.today.tostring("dd-mmm-yyyy")); // datum van vandaag wordt verteld break; // andere commands case "go fullscreen": // als "go fullscreen" wordt ingesproken in de microfoon formborderstyle = formborderstyle.none; windowstate = formwindowstate.maximized; topmost = true; ai.speak("going fullscreen mode"); break; case "exit fullscreen": // als "exit fullscreen" wordt ingesproken in de microfoon formborderstyle = formborderstyle.sizable; windowstate = formwindowstate.normal; topmost = false; ai.speak("exiting fullscreen mode"); break; // twitter case "post on twitter": if (listbox1.visible == true) { this.textbox1.location = new system.drawing.point(89, 163); this.label1.location = new system.drawing.point(18, 166); } textbox1.visible = true; label1.visible = true; break; case "post": if (textbox1.visible == false) { ai.speak("say post on twitter first"); } else if (string.isnullorempty(textbox1.text.trim())) { ai.speak("you have write down something"); } else { twitter.sendtweet(new sendtweetoptions() { status = textbox1.text }); ai.speak("your tweet has been posted"); textbox1.clear(); } break; case "clear post": textbox1.visible = false; label1.visible = false; break; case "show tweets": listbox1.visible = true; label2.visible = true; if (textbox1.visible == true) { this.textbox1.location = new system.drawing.point(89, 163); this.label1.location = new system.drawing.point(18, 166); } listbox1.visible = true; label2.visible = true; listbox1.items.clear(); var gettweets = twitter.listtweetsonhometimeline(new listtweetsonhometimelineoptions() { count = 10 }); foreach (var tweets in gettweets) { listbox1.items.add(tweets.text); } break; case "clear tweets": listbox1.visible = false; label2.visible = false; this.textbox1.location = new system.drawing.point(89, 9); this.label1.location = new system.drawing.point(18, 12); break; case "update tweets": if (listbox1.visible == false) { ai.speak("i cant update without getting tweets first"); } else { listbox1.items.clear(); var update = twitter.listtweetsonhometimeline(new listtweetsonhometimelineoptions() { count = 10 }); foreach (var tweets in update) { listbox1.items.add(tweets.text); } } break; **case "listen": ai.speak("i listening"); textbox1.text = textbox1.text + " " + e.result.text.tostring(); break;** /* case "show commands": string[] commands = (file.readalllines(@"d:\bibliotheek\mijn documenten\commands.txt")); jarvis.speak("very well"); lstcommands.items.clear(); lstcommands.selectionmode = selectionmode.none; lstcommands.visible = true; foreach (string command in commands) { lstcommands.items.add(command); } break; case "hide commands": lstcommands.visible = false; break; */ } } private void lstcommands_selectedindexchanged(object sender, eventargs e) { } private void btnmic_click(object sender, eventargs e) { if (btnmic.text == "mute") { btnmic.text = "unmute"; micmute.mutemic(); ai.speak("muted"); } else if (btnmic.text == "unmute") { btnmic.text = "mute"; micmute.unmutemic(); ai.speak("unmuted"); } } } }
edit:
i need piece of code:
case "listen": ai.speak("i listening"); textbox1.text = textbox1.text + " " + e.result.text.tostring(); break;
when 'listen', ai follows "i listening". after should placing text im saying microphone in textbox1, doesnt. places 'listen' in it.
everything else works fine!
case "listen": ai.speak("i listening"); textbox1.text = textbox1.text + " " + e.result.text.tostring(); break;
e.result
contains result obtained when user said "listen". need listen user again after make ai said "i listening".
case "listen": ai.speak("i listening"); var result = srecognizer.recognize(); textbox1.text += " " + result.text; break;
Comments
Post a Comment