-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscript.js
More file actions
162 lines (138 loc) · 5.51 KB
/
script.js
File metadata and controls
162 lines (138 loc) · 5.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
var recognition = new webkitSpeechRecognition() || new SpeechRecognition(); // Prefix may be needed depending on the browser
recognition.continuous = true; // Set the recognition to continuous mode
// Variable to track the state of speech recognition
var isListening = false;
// Update the language of the recognition
function updateLanguage() {
recognition.lang = document.getElementById("inputLang").value;
}
// Toggle speech recognition on and off
function toggleRecognition() {
if (isListening) {
recognition.stop();
document.querySelector('.pulsating-circle').style.animationPlayState = 'paused';
console.log("Voice recognition stopped.");
} else {
updateLanguage(); // Update language each time before starting
recognition.start();
document.querySelector('.pulsating-circle').style.animationPlayState = 'running';
console.log("Voice recognition started. Speak into the microphone.");
}
// Toggle the isListening state
isListening = !isListening;
// Update the button text based on the current state
document.getElementById("toggleButton").textContent = isListening ? "End Conversation" : "Start Listening";
}
// Add event listener to the toggle button
document.getElementById("toggleButton").addEventListener("click", toggleRecognition);
// Handle the results of speech recognition
recognition.onresult = function (event) {
// event.results is an array of SpeechRecognitionResults
// It can contain multiple results if continuous mode is enabled
for (let i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
// Get the transcript of the recognized speech
var speechToText = event.results[i][0].transcript;
console.log("Recognized:", speechToText);
var inputLang = document.getElementById("inputLang").value; // Ensure this value is correct
var targetLang = document.getElementById("outputLang").value; // Ensure this value is correct
// Here, you can call your function to handle translation and speech synthesis
translateTextAsync(speechToText, inputLang, targetLang);
}
}
};
async function translateTextAsync(text, inputLang, targetLang) {
const url = "https://google-translator9.p.rapidapi.com/v2";
const options = {
method: "POST",
headers: {
"content-type": "application/json",
"X-RapidAPI-Key": "8f736d5867msh55fed24ff57fbddp10d402jsn3095e9a263ac",
"X-RapidAPI-Host": "google-translator9.p.rapidapi.com",
},
body: JSON.stringify({
q: text,
source: inputLang,
target: targetLang,
format: "text",
}),
};
try {
const response = await fetch(url, options);
const data = await response.json();
// Debugging: Inspect the actual structure of the response
console.log(data);
// Improved error handling to check for the existence of the expected path in the response
if (
data &&
data.data &&
data.data.translations &&
data.data.translations.length > 0
) {
const translatedText = data.data.translations[0].translatedText;
document.getElementById("status").textContent =
"Translated: " + translatedText;
console.log("Translated:", translatedText);
console.log("Synthesizing speech in " + targetLang);
synthesizeSpeech(translatedText, targetLang);
} else {
// Handle cases where the translations array might be empty or not in the expected format
console.error(
"Translation API returned an unexpected response:",
JSON.stringify(data)
);
document.getElementById("status").textContent =
"Translation API returned an unexpected response. Check console for details.";
}
} catch (error) {
console.error("Error calling the translation API:", error);
document.getElementById(
"status"
).textContent = `Translation error: ${error.message}`;
}
}
function synthesizeSpeech(text, langCode) {
// Create a new instance of SpeechSynthesisUtterance
var utterance = new SpeechSynthesisUtterance(text);
// Set the language of the utterance to the target language code
utterance.lang = langCode;
console.log(`Synthesizing: "${text}" in ${langCode}`);
// Optional: Configure other properties of the utterance
utterance.rate = 1 // Speed of speech
utterance.pitch = 1; // Pitch of speech
// Handling the 'end' event to perform actions after the speech has finished
utterance.onend = function (event) {
console.log(
"SpeechSynthesisUtterance ended after " +
event.elapsedTime +
" milliseconds."
);
// Perform any cleanup or follow-up actions here
};
// Handling errors
utterance.onerror = function (event) {
console.error(
"SpeechSynthesisUtterance encountered an error: ",
event.error
);
};
// Speak the utterance using the speechSynthesis API
window.speechSynthesis.speak(utterance);
}
document.addEventListener('DOMContentLoaded', function() {
document.querySelector('.pulsating-circle').style.animationPlayState = 'paused';
});
// Handle the end of speech recognition
// recognition.onend = function() {
// console.log('Voice recognition ended.');
// // You can restart the recognition service if you want it to continue listening
// recognition.start();
// };
// Start the speech recognition
// recognition.start();
// Optionally, automatically restart recognition after it ends
recognition.onend = function() {
if (isListening) { // Check if we're supposed to be listening
recognition.start();
}
};