Reputation: 161
I got a problem with the Speech API. I'm working with commands, thats working fine, but when I want the speech to be seen in the textBox1 it wont show it.
This is the code I need help with. Im working with a switch case. I tried several if statement, but none works.
case "listen":
AI.Speak("I am listening");
textBox1.Text = textBox1.Text + " " + e.Result.Text.ToString();
break;
Each time I say listen, only 'listen' comes visible in the textBox
Here is the full code:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Threading;
using System.Windows.Forms;
using System.Speech.Recognition;
using System.Speech.Synthesis;
using System.IO;
using System.Xml;
using System.Web;
using WindowsMicrophoneMuteLibrary;
using TweetSharp;
/*
*
*
*
*
*
*/
namespace Test
{
public partial class Form1 : Form
{
SpeechRecognitionEngine sRecognizer = new SpeechRecognitionEngine();
SpeechSynthesizer AI = new SpeechSynthesizer();
DateTime now = DateTime.Now;
Random rnd = new Random();
WindowsMicMute micMute = new WindowsMicMute();
TwitterService twitter = new TwitterService("--", "--", "--", "--");
//string QEvent;
//string ProcWindow;
//double timer = 10;
//int count = 1;
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
sRecognizer.SetInputToDefaultAudioDevice();
sRecognizer.LoadGrammar(new Grammar(new GrammarBuilder(new Choices(File.ReadAllLines(@"D:\Bibliotheek\Mijn Documenten\Commands.txt")))));
sRecognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(rSpeechRecognized);
sRecognizer.RecognizeAsync(RecognizeMode.Multiple);
// LAAD COMMANDS BIJ START-UP
string[] commands = (File.ReadAllLines(@"D:\Bibliotheek\Mijn Documenten\Commands.txt"));
lstCommands.Items.Clear();
lstCommands.SelectionMode = SelectionMode.None;
foreach (string command in commands)
{
lstCommands.Items.Add(command);
}
}
void rSpeechRecognized(object sender, SpeechRecognizedEventArgs e)
{
int ranNum = rnd.Next(1, 10);
string speech = e.Result.Text;
switch (speech)
{
// GROETEN
case "hello": // ALS "hello" WORDT INGESPROKEN IN DE MICROFOON
if (ranNum <= 3)
{
AI.Speak("Hello Sir"); // ALS RANDOM NUMMER < 5 IS = "hello sir"
}
else if (ranNum >= 4 && ranNum <= 6)
{
AI.Speak("Greetings"); // ALS RANDOM NUMMER >= 5 IS = "greetings"
}
else if (ranNum >= 7)
{
AI.Speak("Good day to you");
}
break;
case "AI": // ALS "AI" WORDT INGESPROKEN IN DE MICROFOON
if (ranNum <= 4)
{
AI.Speak("Yes Sir"); // ALS RANDOM NUMMER < 5 IS = "yes sir"
}
else if (ranNum >= 5)
{
AI.Speak("Yes?"); // ALS RANDOM NUMMER >= 5 IS = "yes?"
}
break;
// SLUIT
case "exit program": // ALS "exit program" WORDT INGESPROKEN IN DE MICROFOON
AI.Speak("Until next time");
this.Close(); // APPLICATIE WORDT GESLOTEN
break;
// WEBSITES
case "open google": // ALS "open google" WORDT INGESPROKEN IN DE MICROFOON
System.Diagnostics.Process.Start("http://www.google.nl"); // GOOGLE WORDT GEOPEND
break;
case "open youtube": // ALS "open youtube" WORDT INGESPROKEN IN DE MICROFOON
System.Diagnostics.Process.Start("https://www.youtube.com/feed/subscriptions"); // YOUTUBE WORDT GEOPEND
break;
case "open tweakers": // ALS "tweakers" WORDT INGESPROKEN IN DE MICROFOON
System.Diagnostics.Process.Start("http://tweakers.net/"); // TWEAKERS WORDT GEOPEND
break;
// PROGRAMMA'S
case "run guild wars": // ALS "run guild wars" WORDT INGESPROKEN IN DE MICROFOON
System.Diagnostics.Process.Start("D:\\Entertainment\\Guild Wars 2\\Gw2.exe"); // GUILD WARS 2 WORDT GEOPEND
AI.Speak("Loading program");
break;
// GEGEVENS VAN DE DAG
case "whats the time": // ALS "what time is it" WORDT INGESPROKEN IN DE MICROFOON
AI.Speak(DateTime.Now.ToString("HH:mm")); // TIJD VAN DE DAG WORDT VERTELD
break;
case "whats the day": // ALS "what day is it" WORDT INGESPROKEN IN DE MICROFOON
AI.Speak(DateTime.Today.ToString("dddd")); // DAG VAN VANDAAG WORDT VERTELD
break;
case "whats the date": // ALS "whats the date" WORDT INGESPROKEN IN DE MICROFOON
AI.Speak(DateTime.Today.ToString("dd-MMM-yyyy")); // DATUM VAN VANDAAG WORDT VERTELD
break;
// ANDERE COMMANDS
case "go fullscreen": // ALS "go fullscreen" WORDT INGESPROKEN IN DE MICROFOON
FormBorderStyle = FormBorderStyle.None;
WindowState = FormWindowState.Maximized;
TopMost = true;
AI.Speak("Going into fullscreen mode");
break;
case "exit fullscreen": // ALS "exit fullscreen" WORDT INGESPROKEN IN DE MICROFOON
FormBorderStyle = FormBorderStyle.Sizable;
WindowState = FormWindowState.Normal;
TopMost = false;
AI.Speak("Exiting fullscreen mode");
break;
// TWITTER
case "post on twitter":
if (listBox1.Visible == true)
{
this.textBox1.Location = new System.Drawing.Point(89, 163);
this.label1.Location = new System.Drawing.Point(18, 166);
}
textBox1.Visible = true;
label1.Visible = true;
break;
case "post":
if (textBox1.Visible == false)
{
AI.Speak("say post on twitter first");
}
else if (String.IsNullOrEmpty(textBox1.Text.Trim()))
{
AI.Speak("you will have to write down something");
}
else
{
twitter.SendTweet(new SendTweetOptions() { Status = textBox1.Text });
AI.Speak("Your tweet has been posted");
textBox1.Clear();
}
break;
case "clear post":
textBox1.Visible = false;
label1.Visible = false;
break;
case "show tweets":
listBox1.Visible = true;
label2.Visible = true;
if (textBox1.Visible == true)
{
this.textBox1.Location = new System.Drawing.Point(89, 163);
this.label1.Location = new System.Drawing.Point(18, 166);
}
listBox1.Visible = true;
label2.Visible = true;
listBox1.Items.Clear();
var getTweets = twitter.ListTweetsOnHomeTimeline(new ListTweetsOnHomeTimelineOptions() { Count = 10 });
foreach (var tweets in getTweets)
{
listBox1.Items.Add(tweets.Text);
}
break;
case "clear tweets":
listBox1.Visible = false;
label2.Visible = false;
this.textBox1.Location = new System.Drawing.Point(89, 9);
this.label1.Location = new System.Drawing.Point(18, 12);
break;
case "update tweets":
if (listBox1.Visible == false)
{
AI.Speak("I cant update without getting the tweets first");
}
else
{
listBox1.Items.Clear();
var update = twitter.ListTweetsOnHomeTimeline(new ListTweetsOnHomeTimelineOptions() { Count = 10 });
foreach (var tweets in update)
{
listBox1.Items.Add(tweets.Text);
}
}
break;
**case "listen":
AI.Speak("I am listening");
textBox1.Text = textBox1.Text + " " + e.Result.Text.ToString();
break;**
/*
case "show commands":
string[] commands = (File.ReadAllLines(@"D:\Bibliotheek\Mijn Documenten\Commands.txt"));
JARVIS.Speak("Very well");
lstCommands.Items.Clear();
lstCommands.SelectionMode = SelectionMode.None;
lstCommands.Visible = true;
foreach (string command in commands)
{
lstCommands.Items.Add(command);
}
break;
case "hide commands":
lstCommands.Visible = false;
break;
*/
}
}
private void lstCommands_SelectedIndexChanged(object sender, EventArgs e)
{
}
private void btnMic_Click(object sender, EventArgs e)
{
if (btnMic.Text == "Mute")
{
btnMic.Text = "Unmute";
micMute.MuteMic();
AI.Speak("Muted");
}
else if (btnMic.Text == "Unmute")
{
btnMic.Text = "Mute";
micMute.UnMuteMic();
AI.Speak("Unmuted");
}
}
}
}
EDIT:
I need help with this piece of the code:
case "listen":
AI.Speak("I am listening");
textBox1.Text = textBox1.Text + " " + e.Result.Text.ToString();
break;
When I say 'listen', AI follows up with "I am listening". After that it should be placing the text im saying into the microphone in the textbox1, but it doesnt. It only places 'listen' in it.
Everything else works fine!
Upvotes: 0
Views: 406
Reputation: 51
How quickly are you saying 'Listen'? I suspect your issue is the speech API is only looking for the first 'Listen' then it's busy parsing data while you say 'Listen' again. I believe the other answer addressed this piece correctly - once you recognize the speech text, you must call recognize again.
Try speaking less quickly, or saying listen (wait for command to parse) then listen again. Does it work as expected?
You might want to investigate using asych methods
[EDIT] After you changed your description, try this:
textBox1.Text = textBox1.Text + " " + speech;
Upvotes: 0
Reputation: 14477
case "listen":
AI.Speak("I am listening");
textBox1.Text = textBox1.Text + " " + e.Result.Text.ToString();
break;
e.Result
will contains only the result obtained when the user said "Listen".
You need to actually listen the user again after you make the AI said "I am listening".
case "listen":
AI.Speak("I am listening");
var result = sRecognizer.Recognize();
textBox1.Text += " " + result.Text;
break;
Upvotes: 1