Skip to content
Snippets Groups Projects
Commit 991eec42 authored by v4nkor's avatar v4nkor
Browse files

WIP added TTS + working command recognition

parent fc845bd2
No related branches found
No related tags found
No related merge requests found
function docReady(fn) {
// see if DOM is already available
if (document.readyState === "complete" || document.readyState === "interactive") {
// call on next available tick
setTimeout(fn, 1);
} else {
document.addEventListener("DOMContentLoaded", fn);
}
}
\ No newline at end of file
...@@ -2,21 +2,67 @@ var voiceBtn = document.getElementById("voiceBtn"); ...@@ -2,21 +2,67 @@ var voiceBtn = document.getElementById("voiceBtn");
var voiceDiv = document.getElementById("voiceDiv"); var voiceDiv = document.getElementById("voiceDiv");
var voiceSpan = document.getElementById("voiceSpan"); var voiceSpan = document.getElementById("voiceSpan");
var heardSpan = document.getElementById("heardSpan"); var heardSpan = document.getElementById("heardSpan");
var commandSpan = document.getElementById("commandSpan");
const voiceSelect = document.getElementById("selectVoice");
let voices = [];
const synth = window.speechSynthesis;
var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition; var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition;
var SpeechGrammarList = SpeechGrammarList || webkitSpeechGrammarList; var SpeechGrammarList = SpeechGrammarList || webkitSpeechGrammarList;
var SpeechRecognitionEvent = SpeechRecognitionEvent || webkitSpeechRecognitionEvent; var SpeechRecognitionEvent = SpeechRecognitionEvent || webkitSpeechRecognitionEvent;
function populateVoiceList() {
voices = synth.getVoices().sort(function (a, b) {
const aname = a.name.toUpperCase();
const bname = b.name.toUpperCase();
if (aname < bname) {
return -1;
} else if (aname == bname) {
return 0;
} else {
return +1;
}
});
const selectedIndex = voiceSelect.selectedIndex < 0 ? 0 : voiceSelect.selectedIndex;
for (let i = 0; i < voices.length; i++) {
const option = document.createElement("option");
option.textContent = `${voices[i].name} (${voices[i].lang})`;
if (voices[i].default) {
option.textContent += " -- DEFAULT";
}
option.setAttribute("data-lang", voices[i].lang);
option.setAttribute("data-name", voices[i].name);
voiceSelect.appendChild(option);
}
voiceSelect.selectedIndex = selectedIndex;
}
voiceBtn.addEventListener("click", startListening); voiceBtn.addEventListener("click", startListening);
if ('speechSynthesis' in window) {
}else{
// Speech Synthesis Not Supported
alert("Sorry, your browser doesn't support text to speech!");
}
function startListening(){ function startListening(){
voiceBtn.disabled = true; voiceBtn.disabled = true;
voiceSpan.innerText = 'Dites "Cristo !" pour commencer a donner votre requete'; voiceSpan.innerText = 'Dites "Cristo !" pour commencer a donner votre requete';
voiceSpan.style.display = ""; voiceSpan.style.display = "";
heardSpan.innerText = ""; heardSpan.innerText = "";
heardSpan.style.display = "none"; heardSpan.style.display = "none";
commandSpan.innerText = "";
commandSpan.style.display = "none";
voiceBtn.innerText = "Ecoute en cours"; voiceBtn.innerText = "Ecoute en cours";
listenForCristo(); listenForCristo();
} }
...@@ -36,34 +82,31 @@ function listenForCristo(){ ...@@ -36,34 +82,31 @@ function listenForCristo(){
recognition.lang = 'fr-FR'; recognition.lang = 'fr-FR';
recognition.interimResults = false; recognition.interimResults = false;
recognition.maxAlternatives = 1; recognition.maxAlternatives = 1;
speak("Dites cristeaux pour donner votre requete");
recognition.start(); recognition.start();
console.log('Started listening'); console.log('Started first listening');
recognition.onresult = function(event) { recognition.onresult = function(event) {
console.log('Listened'); console.log('Listened');
/* if(heard.toLowerCase().includes("cristo")){
} */
var speechResult = event.results[0][0].transcript.toLowerCase(); var speechResult = event.results[0][0].transcript.toLowerCase();
console.log(speechResult); console.log(speechResult);
heardSpan.innerText += " A entendu = " + speechResult; heardSpan.innerText += " A entendu = " + speechResult;
heardSpan.style.display = ""; heardSpan.style.display = "";
voiceSpan.style.display = "none";
console.log('Confidence: ' + event.results[0][0].confidence);
if(speechResult.toLowerCase().includes("cristaux")){ if(speechResult.toLowerCase().includes("cristaux")){
speak("Veuillez me donner votre requete");
console.log('Heard Cristo ! (or Cristaux)'); console.log('Heard Cristo ! (or Cristaux)');
listenForCommand();
} else { } else {
speak("Je n'ai pas entendu Cristeaux");
console.log('Heard nothing'); console.log('Heard nothing');
enableBtn();
} }
voiceSpan.style.display = "none";
console.log('Confidence: ' + event.results[0][0].confidence);
} }
recognition.onspeechend = function() { recognition.onspeechend = function() {
recognition.stop(); recognition.stop();
voiceBtn.disabled = false;
voiceBtn.textContent = 'Ecouter a nouveau';
voiceSpan.style.display = "none";
} }
recognition.onerror = function(event) { recognition.onerror = function(event) {
voiceBtn.disabled = false; voiceBtn.disabled = false;
voiceBtn.textContent = 'Ecouter a nouveau'; voiceBtn.textContent = 'Ecouter a nouveau';
...@@ -72,39 +115,119 @@ function listenForCristo(){ ...@@ -72,39 +115,119 @@ function listenForCristo(){
heardSpan.innerText += " Erreur "; heardSpan.innerText += " Erreur ";
heardSpan.style.display = ""; heardSpan.style.display = "";
} }
recognition.onaudiostart = function(event) { recognition.onaudiostart = function(event) {
//Fired when the user agent has started to capture audio. //Fired when the user agent has started to capture audio.
console.log('SpeechRecognition.onaudiostart'); console.log('SpeechRecognition.onaudiostart');
} }
recognition.onaudioend = function(event) { recognition.onaudioend = function(event) {
//Fired when the user agent has finished capturing audio. //Fired when the user agent has finished capturing audio.
console.log('SpeechRecognition.onaudioend'); console.log('SpeechRecognition.onaudioend');
} }
recognition.onend = function(event) { recognition.onend = function(event) {
//Fired when the speech recognition service has disconnected. //Fired when the speech recognition service has disconnected.
console.log('SpeechRecognition.onend'); /* console.log('SpeechRecognition.onend'); */
console.log("A entendu = " + event.results); /* console.log("A entendu = " + event.results); */
voiceSpan.style.display = "none"; voiceSpan.style.display = "none";
} }
recognition.onnomatch = function(event) { recognition.onnomatch = function(event) {
//Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold. //Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold.
console.log('SpeechRecognition.onnomatch'); console.log('SpeechRecognition.onnomatch');
} }
recognition.onsoundstart = function(event) { recognition.onsoundstart = function(event) {
//Fired when any sound — recognisable speech or not — has been detected. //Fired when any sound — recognisable speech or not — has been detected.
console.log('SpeechRecognition.onsoundstart'); console.log('SpeechRecognition.onsoundstart');
} }
recognition.onsoundend = function(event) { recognition.onsoundend = function(event) {
//Fired when any sound — recognisable speech or not — has stopped being detected. //Fired when any sound — recognisable speech or not — has stopped being detected.
console.log('SpeechRecognition.onsoundend'); /* console.log('SpeechRecognition.onsoundend'); */
}
recognition.onspeechstart = function (event) {
//Fired when sound that is recognised by the speech recognition service as speech has been detected.
console.log('SpeechRecognition.onspeechstart');
}
recognition.onstart = function(event) {
//Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition.
console.log('SpeechRecognition.onstart');
}
}
function listenForCommand(){
var grammar = '#JSGF V1.0; grammar phrase; public <phrase> = descends | descend | remonte | monte | recette | aleatoire | suivante | precedente;';
var recognition = new SpeechRecognition();
var speechRecognitionList = new SpeechGrammarList();
speechRecognitionList.addFromString(grammar, 1);
recognition.grammars = speechRecognitionList;
recognition.lang = 'fr-FR';
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.start();
console.log('Started first listening');
recognition.onresult = function(event) {
console.log('Listened');
var speechResult = event.results[0][0].transcript.toLowerCase();
console.log(speechResult);
heardSpan.innerText += " A entendu = " + speechResult;
heardSpan.style.display = "";
voiceSpan.style.display = "none";
console.log('Confidence: ' + event.results[0][0].confidence);
if(speechResult.toLowerCase().includes("descend")){
speak("Je vais descendre dans la page");
}
else if(speechResult.toLowerCase().includes("monte")){
speak("Je vais monter dans la page");
}
else if(speechResult.toLowerCase().includes("recette")){
if(speechResult.toLowerCase().includes("précédente")){
speak("Voici la recette precedente");
}
else if(speechResult.toLowerCase().includes("suivante")){
speak("Voici la prochaine recette");
}
else if(speechResult.toLowerCase().includes("aléatoire")){
speak("Voici une recette aleatoire");
}
}
else {
speak("Je n'ai rien entendu ou je n'ai pas reconnu la requete");
}
enableBtn();
} }
recognition.onspeechend = function() {
recognition.stop();
}
recognition.onerror = function(event) {
enableBtn();
diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error;
heardSpan.innerText += " Erreur ";
heardSpan.style.display = "";
}
recognition.onaudiostart = function(event) {
//Fired when the user agent has started to capture audio.
console.log('SpeechRecognition.onaudiostart');
}
recognition.onaudioend = function(event) {
//Fired when the user agent has finished capturing audio.
console.log('SpeechRecognition.onaudioend');
}
recognition.onend = function(event) {
//Fired when the speech recognition service has disconnected.
/* console.log('SpeechRecognition.onend'); */
/* console.log("A entendu = " + event.results); */
voiceSpan.style.display = "none";
}
recognition.onnomatch = function(event) {
//Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold.
console.log('SpeechRecognition.onnomatch');
}
recognition.onsoundstart = function(event) {
//Fired when any sound — recognisable speech or not — has been detected.
console.log('SpeechRecognition.onsoundstart');
}
recognition.onsoundend = function(event) {
//Fired when any sound — recognisable speech or not — has stopped being detected.
/* console.log('SpeechRecognition.onsoundend'); */
}
recognition.onspeechstart = function (event) { recognition.onspeechstart = function (event) {
//Fired when sound that is recognised by the speech recognition service as speech has been detected. //Fired when sound that is recognised by the speech recognition service as speech has been detected.
console.log('SpeechRecognition.onspeechstart'); console.log('SpeechRecognition.onspeechstart');
...@@ -115,3 +238,40 @@ function listenForCristo(){ ...@@ -115,3 +238,40 @@ function listenForCristo(){
} }
} }
function speak(text){
if (synth.speaking) {
console.error("speechSynthesis.speaking");
return;
}
const utterThis = new SpeechSynthesisUtterance(text);
utterThis.onend = function (event) {
console.log("SpeechSynthesisUtterance.onend");
};
utterThis.onerror = function (event) {
console.error("SpeechSynthesisUtterance.onerror");
};
for (let i = 0; i < voices.length; i++) {
if (voices[i].name === voiceSelect.selectedOptions[0].getAttribute("data-name")) {
utterThis.voice = voices[i];
break;
}
}
utterThis.pitch = 1;
utterThis.rate = 1;
synth.speak(utterThis);
}
function enableBtn(){
voiceBtn.disabled = false;
voiceBtn.textContent = 'Ecouter a nouveau';
voiceSpan.style.display = "none";
}
docReady(function() {
populateVoiceList();
voiceSelect.value= "Microsoft Paul - French (France) (fr-FR)";
});
\ No newline at end of file
...@@ -2,4 +2,7 @@ ...@@ -2,4 +2,7 @@
<i class="fa-solid fa-microphone"></i><button id="voiceBtn">Commencer l'ecoute du micro</button><span id="voiceSpan" style="display:none"></span> <i class="fa-solid fa-microphone"></i><button id="voiceBtn">Commencer l'ecoute du micro</button><span id="voiceSpan" style="display:none"></span>
<br> <br>
<span id="heardSpan" style="display:none"></span> <span id="heardSpan" style="display:none"></span>
<span id="commandSpan" style="display:none"></span>
<select id="selectVoice" style="display:none;"></select>
<p>Voici les commandes : -Descend -Remonte -recette suivante - recette precedente -recette aleatoire</p>
</div> </div>
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment