|
|
|
|
|
|
|
|
const practicePhrases = [ |
|
|
"How are you doing today?", |
|
|
"Nice to meet you", |
|
|
"Where is the nearest restaurant?", |
|
|
"Could you speak more slowly, please?", |
|
|
"What time does the train leave?", |
|
|
"I would like to order coffee", |
|
|
"How much does this cost?", |
|
|
"Could you repeat that?", |
|
|
"What do you recommend?", |
|
|
"Thank you very much" |
|
|
]; |
|
|
|
|
|
document.addEventListener('DOMContentLoaded', function() { |
|
|
const startBtn = document.getElementById('startBtn'); |
|
|
const playBtn = document.getElementById('playBtn'); |
|
|
const volumeMeter = document.getElementById('volumeMeter'); |
|
|
const currentAccent = document.getElementById('currentAccent'); |
|
|
const targetAccent = document.getElementById('targetAccent'); |
|
|
const conversionStatus = document.getElementById('conversionStatus'); |
|
|
|
|
|
let isListening = false; |
|
|
let audioContext; |
|
|
let analyser; |
|
|
let microphone; |
|
|
let recordedChunks = []; |
|
|
let convertedAudioUrl = null; |
|
|
let mediaRecorder; |
|
|
let recordingTime; |
|
|
let stream; |
|
|
|
|
|
async function transcribeWithWhisper(audioBlob) { |
|
|
conversionStatus.textContent = 'Transcribing...'; |
|
|
conversionStatus.className = 'px-3 py-1 bg-yellow-100 text-yellow-800 rounded-full text-sm'; |
|
|
|
|
|
const formData = new FormData(); |
|
|
formData.append('file', audioBlob, 'recording.wav'); |
|
|
formData.append('model', 'whisper-1'); |
|
|
|
|
|
try { |
|
|
const response = await fetch('https://api.openai.com/v1/audio/transcriptions', { |
|
|
method: 'POST', |
|
|
headers: { |
|
|
'Authorization': `Bearer YOUR_OPENAI_API_KEY` |
|
|
}, |
|
|
body: formData |
|
|
}); |
|
|
|
|
|
if (!response.ok) throw new Error('Transcription failed'); |
|
|
|
|
|
const result = await response.json(); |
|
|
return result.text; |
|
|
} catch (error) { |
|
|
console.error('Whisper error:', error); |
|
|
return null; |
|
|
} |
|
|
} |
|
|
|
|
|
async function convertAccent(text, sourceAccent, targetAccent) { |
|
|
conversionStatus.textContent = 'Converting accent...'; |
|
|
conversionStatus.className = 'px-3 py-1 bg-yellow-100 text-yellow-800 rounded-full text-sm'; |
|
|
|
|
|
try { |
|
|
const response = await fetch('/api/convert-accent', { |
|
|
method: 'POST', |
|
|
headers: { |
|
|
'Content-Type': 'application/json' |
|
|
}, |
|
|
body: JSON.stringify({ |
|
|
text: text, |
|
|
source_accent: sourceAccent.toLowerCase().replace(/ \(.*\)/, ''), |
|
|
target_accent: targetAccent.replace(/_/g, ' ').replace(/ \(.*\)/, '') |
|
|
}) |
|
|
}); |
|
|
|
|
|
if (!response.ok) { |
|
|
throw new Error(`HTTP error! status: ${response.status}`); |
|
|
} |
|
|
|
|
|
const result = await response.json(); |
|
|
|
|
|
if (result.error) { |
|
|
throw new Error(result.error); |
|
|
} |
|
|
|
|
|
if (!result.converted_audio) { |
|
|
throw new Error('No audio returned from server'); |
|
|
} |
|
|
|
|
|
|
|
|
const audioBlob = await fetch(result.converted_audio).then(r => r.blob()); |
|
|
const audioUrl = URL.createObjectURL(audioBlob); |
|
|
|
|
|
conversionStatus.textContent = 'Converted'; |
|
|
conversionStatus.className = 'px-3 py-1 bg-green-100 text-green-800 rounded-full text-sm'; |
|
|
|
|
|
return { |
|
|
text: result.converted_text, |
|
|
audioUrl: audioUrl |
|
|
}; |
|
|
|
|
|
} catch (error) { |
|
|
console.error('Accent conversion error:', error); |
|
|
conversionStatus.textContent = 'Conversion failed'; |
|
|
conversionStatus.className = 'px-3 py-1 bg-red-100 text-red-800 rounded-full text-sm'; |
|
|
|
|
|
return { |
|
|
text: text, |
|
|
audioUrl: null, |
|
|
error: error.message |
|
|
}; |
|
|
} |
|
|
} |
|
|
|
|
|
async function textToSpeech(text, accent) { |
|
|
conversionStatus.textContent = 'Generating speech...'; |
|
|
conversionStatus.className = 'px-3 py-1 bg-blue-100 text-blue-800 rounded-full text-sm'; |
|
|
|
|
|
try { |
|
|
const response = await fetch('https://api.accentify.com/tts', { |
|
|
method: 'POST', |
|
|
headers: { |
|
|
'Content-Type': 'application/json' |
|
|
}, |
|
|
body: JSON.stringify({ |
|
|
text: text, |
|
|
accent: accent |
|
|
}) |
|
|
}); |
|
|
|
|
|
const audioBlob = await response.blob(); |
|
|
const audioUrl = URL.createObjectURL(audioBlob); |
|
|
|
|
|
|
|
|
const audio = new Audio(audioUrl); |
|
|
audio.play(); |
|
|
|
|
|
return new Promise((resolve) => { |
|
|
audio.onended = resolve; |
|
|
}); |
|
|
const utterance = new SpeechSynthesisUtterance(text); |
|
|
utterance.lang = getLocaleForAccent(accent); |
|
|
utterance.rate = getRateForAccent(accent); |
|
|
utterance.pitch = getPitchForAccent(accent); |
|
|
|
|
|
const voices = window.speechSynthesis.getVoices(); |
|
|
const preferredVoice = voices.find(v => |
|
|
v.name.toLowerCase().includes(getVoiceHintForAccent(accent)) |
|
|
); |
|
|
|
|
|
if (preferredVoice) utterance.voice = preferredVoice; |
|
|
|
|
|
return new Promise(resolve => { |
|
|
utterance.onend = resolve; |
|
|
window.speechSynthesis.speak(utterance); |
|
|
}); |
|
|
} catch (error) { |
|
|
console.error('TTS error:', error); |
|
|
return null; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
async function startRecording() { |
|
|
try { |
|
|
stream = await navigator.mediaDevices.getUserMedia({ audio: true }); |
|
|
audioContext = new (window.AudioContext || window.webkitAudioContext)(); |
|
|
analyser = audioContext.createAnalyser(); |
|
|
|
|
|
const source = audioContext.createMediaStreamSource(stream); |
|
|
source.connect(analyser); |
|
|
|
|
|
mediaRecorder = new MediaRecorder(stream); |
|
|
recordedChunks = []; |
|
|
|
|
|
mediaRecorder.ondataavailable = event => { |
|
|
if (event.data.size > 0) { |
|
|
recordedChunks.push(event.data); |
|
|
} |
|
|
}; |
|
|
|
|
|
mediaRecorder.onstop = async () => { |
|
|
const audioBlob = new Blob(recordedChunks, { type: 'audio/wav' }); |
|
|
const transcription = await transcribeWithWhisper(audioBlob); |
|
|
|
|
|
if (transcription) { |
|
|
const sourceAccent = currentAccent.textContent; |
|
|
const targetAccentValue = targetAccent.value; |
|
|
const convertedText = await convertAccent(transcription, sourceAccent, targetAccentValue); |
|
|
await textToSpeech(convertedText, targetAccentValue); |
|
|
|
|
|
conversionStatus.textContent = 'Converted'; |
|
|
conversionStatus.className = 'px-3 py-1 bg-green-100 text-green-800 rounded-full text-sm'; |
|
|
playBtn.disabled = false; |
|
|
} |
|
|
}; |
|
|
|
|
|
mediaRecorder.start(100); |
|
|
recordingTime = Date.now(); |
|
|
|
|
|
|
|
|
const updateVolumeMeter = () => { |
|
|
if (!isListening) return; |
|
|
|
|
|
const array = new Uint8Array(analyser.frequencyBinCount); |
|
|
analyser.getByteFrequencyData(array); |
|
|
const volume = Math.max(...array) / 255 * 100; |
|
|
|
|
|
volumeMeter.style.width = `${volume}%`; |
|
|
volumeMeter.className = volume > 70 ? 'bg-red-600 h-2.5 rounded-full' : |
|
|
volume > 30 ? 'bg-yellow-500 h-2.5 rounded-full' : |
|
|
'bg-blue-600 h-2.5 rounded-full'; |
|
|
|
|
|
requestAnimationFrame(updateVolumeMeter); |
|
|
}; |
|
|
|
|
|
updateVolumeMeter(); |
|
|
return true; |
|
|
} catch (error) { |
|
|
console.error('Recording error:', error); |
|
|
return false; |
|
|
} |
|
|
} |
|
|
|
|
|
function stopRecording() { |
|
|
if (mediaRecorder && mediaRecorder.state !== 'inactive') { |
|
|
mediaRecorder.stop(); |
|
|
} |
|
|
if (stream) { |
|
|
stream.getTracks().forEach(track => track.stop()); |
|
|
} |
|
|
} |
|
|
startBtn.addEventListener('click', async function() { |
|
|
if (!isListening) { |
|
|
isListening = true; |
|
|
startBtn.classList.add('listening'); |
|
|
startBtn.innerHTML = '<i data-feather="mic-off"></i> Stop Speaking'; |
|
|
feather.replace(); |
|
|
currentAccent.textContent = 'Detecting...'; |
|
|
currentAccent.className = 'px-3 py-1 bg-blue-100 text-blue-800 rounded-full text-sm'; |
|
|
|
|
|
const recordingStarted = await startRecording(); |
|
|
if (!recordingStarted) { |
|
|
isListening = false; |
|
|
startBtn.classList.remove('listening'); |
|
|
startBtn.innerHTML = '<i data-feather="mic"></i> Start Speaking'; |
|
|
feather.replace(); |
|
|
return; |
|
|
} |
|
|
|
|
|
|
|
|
try { |
|
|
const audioBlob = new Blob(recordedChunks, { type: 'audio/wav' }); |
|
|
const formData = new FormData(); |
|
|
formData.append('audio', audioBlob, 'recording.wav'); |
|
|
|
|
|
const response = await fetch('/api/detect-accent', { |
|
|
method: 'POST', |
|
|
body: formData |
|
|
}); |
|
|
|
|
|
const result = await response.json(); |
|
|
|
|
|
if (result.error) { |
|
|
throw new Error(result.error); |
|
|
} |
|
|
|
|
|
|
|
|
const accentMap = { |
|
|
'american': 'American (General)', |
|
|
'british': 'British (RP)', |
|
|
'australian': 'Australian (General)', |
|
|
'indian': 'Indian (General)' |
|
|
}; |
|
|
|
|
|
const detectedAccent = accentMap[result.accent] || 'American (General)'; |
|
|
currentAccent.textContent = detectedAccent; |
|
|
currentAccent.className = 'px-3 py-1 bg-green-100 text-green-800 rounded-full text-sm'; |
|
|
|
|
|
} catch (error) { |
|
|
console.error('Accent detection error:', error); |
|
|
currentAccent.textContent = 'American (General)'; |
|
|
currentAccent.className = 'px-3 py-1 bg-blue-100 text-blue-800 rounded-full text-sm'; |
|
|
} |
|
|
} else { |
|
|
isListening = false; |
|
|
startBtn.classList.remove('listening'); |
|
|
startBtn.innerHTML = '<i data-feather="mic"></i> Start Speaking'; |
|
|
feather.replace(); |
|
|
|
|
|
stopRecording(); |
|
|
} |
|
|
}); |
|
|
|
|
|
let voices = []; |
|
|
function populateVoices() { |
|
|
voices = window.speechSynthesis.getVoices(); |
|
|
} |
|
|
populateVoices(); |
|
|
window.speechSynthesis.onvoiceschanged = populateVoices; |
|
|
|
|
|
const practiceStartBtn = document.getElementById('practice-start-btn'); |
|
|
const playModelBtn = document.getElementById('play-model-btn'); |
|
|
const playYourRecordingBtn = document.getElementById('play-your-recording-btn'); |
|
|
const practiceVolumeMeter = document.getElementById('practice-volume-meter'); |
|
|
const currentPhrase = document.getElementById('current-phrase'); |
|
|
const accuracyScore = document.getElementById('accuracy-score'); |
|
|
const practiceAccent = document.getElementById('practice-accent'); |
|
|
|
|
|
let isPracticing = false; |
|
|
let practiceAudioContext; |
|
|
let practiceAnalyser; |
|
|
let practiceMediaRecorder; |
|
|
let practiceRecordedChunks = []; |
|
|
let practiceStream; |
|
|
|
|
|
|
|
|
function getRandomPhrase() { |
|
|
return practicePhrases[Math.floor(Math.random() * practicePhrases.length)]; |
|
|
} |
|
|
currentPhrase.textContent = `"${getRandomPhrase()}"`; |
|
|
|
|
|
playModelBtn.addEventListener('click', function() { |
|
|
const targetAccent = practiceAccent.value; |
|
|
const sampleText = currentPhrase.textContent.replace(/"/g, ''); |
|
|
const utterance = new SpeechSynthesisUtterance(sampleText); |
|
|
|
|
|
utterance.lang = getLocaleForAccent(targetAccent); |
|
|
utterance.rate = getRateForAccent(targetAccent); |
|
|
utterance.pitch = getPitchForAccent(targetAccent); |
|
|
|
|
|
const voices = window.speechSynthesis.getVoices(); |
|
|
const preferredVoice = findVoiceForAccent(targetAccent, voices); |
|
|
|
|
|
if (preferredVoice) utterance.voice = preferredVoice; |
|
|
window.speechSynthesis.speak(utterance); |
|
|
}); |
|
|
|
|
|
practiceStartBtn.addEventListener('click', async function() { |
|
|
if (!isPracticing) { |
|
|
isPracticing = true; |
|
|
practiceStartBtn.innerHTML = '<i data-feather="mic-off"></i> Stop Recording'; |
|
|
practiceStartBtn.classList.add('listening'); |
|
|
feather.replace(); |
|
|
|
|
|
try { |
|
|
practiceStream = await navigator.mediaDevices.getUserMedia({ audio: true }); |
|
|
practiceAudioContext = new (window.AudioContext || window.webkitAudioContext)(); |
|
|
practiceAnalyser = practiceAudioContext.createAnalyser(); |
|
|
|
|
|
const source = practiceAudioContext.createMediaStreamSource(practiceStream); |
|
|
source.connect(practiceAnalyser); |
|
|
|
|
|
practiceMediaRecorder = new MediaRecorder(practiceStream); |
|
|
practiceRecordedChunks = []; |
|
|
|
|
|
practiceMediaRecorder.ondataavailable = event => { |
|
|
if (event.data.size > 0) { |
|
|
practiceRecordedChunks.push(event.data); |
|
|
} |
|
|
}; |
|
|
|
|
|
practiceMediaRecorder.onstop = async () => { |
|
|
|
|
|
const randomScore = Math.floor(Math.random() * 40) + 60; |
|
|
accuracyScore.textContent = `${randomScore}%`; |
|
|
|
|
|
if (randomScore >= 85) { |
|
|
accuracyScore.className = 'px-3 py-1 bg-green-100 text-green-800 rounded-full text-sm excellent feedback-animate'; |
|
|
} else if (randomScore >= 70) { |
|
|
accuracyScore.className = 'px-3 py-1 bg-yellow-100 text-yellow-800 rounded-full text-sm good feedback-animate'; |
|
|
} else { |
|
|
accuracyScore.className = 'px-3 py-1 bg-red-100 text-red-800 rounded-full text-sm poor feedback-animate'; |
|
|
} |
|
|
|
|
|
playYourRecordingBtn.disabled = false; |
|
|
currentPhrase.textContent = `"${getRandomPhrase()}"`; |
|
|
}; |
|
|
|
|
|
practiceMediaRecorder.start(100); |
|
|
|
|
|
|
|
|
const updatePracticeVolumeMeter = () => { |
|
|
if (!isPracticing) return; |
|
|
|
|
|
const array = new Uint8Array(practiceAnalyser.frequencyBinCount); |
|
|
practiceAnalyser.getByteFrequencyData(array); |
|
|
const volume = Math.max(...array) / 255 * 100; |
|
|
|
|
|
practiceVolumeMeter.style.width = `${volume}%`; |
|
|
practiceVolumeMeter.className = volume > 70 ? 'bg-red-600 h-2.5 rounded-full' : |
|
|
volume > 30 ? 'bg-yellow-500 h-2.5 rounded-full' : |
|
|
'bg-blue-600 h-2.5 rounded-full'; |
|
|
|
|
|
requestAnimationFrame(updatePracticeVolumeMeter); |
|
|
}; |
|
|
|
|
|
updatePracticeVolumeMeter(); |
|
|
|
|
|
} catch (error) { |
|
|
console.error('Practice recording error:', error); |
|
|
isPracticing = false; |
|
|
practiceStartBtn.innerHTML = '<i data-feather="mic"></i> Record Your Pronunciation'; |
|
|
practiceStartBtn.classList.remove('listening'); |
|
|
feather.replace(); |
|
|
} |
|
|
|
|
|
} else { |
|
|
isPracticing = false; |
|
|
practiceStartBtn.innerHTML = '<i data-feather="mic"></i> Record Your Pronunciation'; |
|
|
practiceStartBtn.classList.remove('listening'); |
|
|
feather.replace(); |
|
|
|
|
|
if (practiceMediaRecorder && practiceMediaRecorder.state !== 'inactive') { |
|
|
practiceMediaRecorder.stop(); |
|
|
} |
|
|
if (practiceStream) { |
|
|
practiceStream.getTracks().forEach(track => track.stop()); |
|
|
} |
|
|
} |
|
|
}); |
|
|
|
|
|
playYourRecordingBtn.addEventListener('click', function() { |
|
|
if (practiceRecordedChunks.length > 0) { |
|
|
const audioBlob = new Blob(practiceRecordedChunks, { type: 'audio/wav' }); |
|
|
const audioUrl = URL.createObjectURL(audioBlob); |
|
|
const audio = new Audio(audioUrl); |
|
|
audio.play(); |
|
|
} |
|
|
}); |
|
|
playBtn.addEventListener('click', async function() { |
|
|
if (!isListening && currentAccent.textContent !== 'Not detected') { |
|
|
try { |
|
|
conversionStatus.textContent = 'Processing...'; |
|
|
conversionStatus.className = 'px-3 py-1 bg-yellow-100 text-yellow-800 rounded-full text-sm'; |
|
|
|
|
|
|
|
|
const audioBlob = new Blob(recordedChunks, { type: 'audio/wav' }); |
|
|
|
|
|
|
|
|
const transcription = await transcribeWithWhisper(audioBlob); |
|
|
|
|
|
if (!transcription) { |
|
|
throw new Error('Transcription failed'); |
|
|
} |
|
|
|
|
|
|
|
|
const sourceAccent = currentAccent.textContent; |
|
|
const targetAccentValue = targetAccent.value; |
|
|
|
|
|
const conversionResult = await convertAccent( |
|
|
transcription, |
|
|
sourceAccent, |
|
|
targetAccentValue |
|
|
); |
|
|
|
|
|
if (conversionResult.error) { |
|
|
throw new Error(conversionResult.error); |
|
|
} |
|
|
|
|
|
|
|
|
if (conversionResult.audioUrl) { |
|
|
const audio = new Audio(conversionResult.audioUrl); |
|
|
conversionStatus.textContent = 'Playing...'; |
|
|
|
|
|
audio.onended = () => { |
|
|
conversionStatus.textContent = 'Converted'; |
|
|
conversionStatus.className = 'px-3 py-1 bg-green-100 text-green-800 rounded-full text-sm'; |
|
|
}; |
|
|
|
|
|
audio.play(); |
|
|
} else { |
|
|
throw new Error('No audio to play'); |
|
|
} |
|
|
|
|
|
} catch (error) { |
|
|
console.error('Playback error:', error); |
|
|
conversionStatus.textContent = 'Error'; |
|
|
conversionStatus.className = 'px-3 py-1 bg-red-100 text-red-800 rounded-full text-sm'; |
|
|
alert('Failed to process audio: ' + error.message); |
|
|
} |
|
|
} else { |
|
|
alert('Please record your voice and detect your accent first.'); |
|
|
} |
|
|
}); |
|
|
function generateAccentSampleText(accent) { |
|
|
const samples = { |
|
|
'american_general': "Hello there! This is your voice converted to an American accent. Notice how the pronunciation and intonation patterns have changed.", |
|
|
'british_rp': "Good day! This is your voice now speaking with a British RP accent. The vowels are more clipped and the intonation is more varied.", |
|
|
'australian_general': "G'day mate! Here's your voice with an Australian twang. The vowels are more nasal and the intonation rises at the end of sentences.", |
|
|
'indian_general': "Namaskar! Your voice now has an Indian English accent, with clearer consonants and a distinctive rhythm pattern." |
|
|
}; |
|
|
return samples[accent] || "This is your converted voice speaking."; |
|
|
} |
|
|
|
|
|
function generateAccentSpecificText(targetAccent, detectedAccent) { |
|
|
const phrases = { |
|
|
greeting: { |
|
|
'american_general': "How's it going?", |
|
|
'british_rp': "How do you do?", |
|
|
'australian_general': "G'day mate!", |
|
|
'indian_general': "Namaste, how are you?", |
|
|
default: "Hello there!" |
|
|
}, |
|
|
question: { |
|
|
'american_general': "What's the weather like today?", |
|
|
'british_rp': "Could you tell me about the weather?", |
|
|
'australian_general': "Strewth, it's hot today, isn't it?", |
|
|
'indian_general': "How is the weather today?", |
|
|
default: "Nice weather we're having." |
|
|
}, |
|
|
response: { |
|
|
'american_general': "Sounds good to me!", |
|
|
'british_rp': "That sounds quite splendid!", |
|
|
'australian_general': "Too right, mate!", |
|
|
'indian_general': "Very good, sir!", |
|
|
default: "That's wonderful!" |
|
|
} |
|
|
}; |
|
|
const getPhrase = (type) => phrases[type][targetAccent] || phrases[type].default; |
|
|
|
|
|
return `[${detectedAccent} to ${targetAccent.split('_')[0]}] ${getPhrase('greeting')} ${getPhrase('question')} ${getPhrase('response')}`; |
|
|
} |
|
|
|
|
|
function getLocaleForAccent(accent) { |
|
|
const accentMap = { |
|
|
'british_rp': 'en-GB', |
|
|
'american_general': 'en-US', |
|
|
'australian_general': 'en-AU', |
|
|
'indian_general': 'en-IN' |
|
|
}; |
|
|
return accentMap[accent] || 'en-US'; |
|
|
} |
|
|
|
|
|
function getRateForAccent(accent) { |
|
|
const rateMap = { |
|
|
'british_rp': 0.9, |
|
|
'british_cockney': 1.1, |
|
|
'american_southern': 0.85, |
|
|
'irish_dublin': 1.2, |
|
|
'australian_broad': 1.05, |
|
|
'scottish_glasgow': 1.15, |
|
|
'canadian_maritime': 0.95, |
|
|
'jamaican_kingston': 1.1, |
|
|
'nigerian_lagos': 0.9, |
|
|
'hongkong': 0.95, |
|
|
'japanese': 0.85, |
|
|
'korean': 0.9, |
|
|
'french': 1.0, |
|
|
'german': 0.95, |
|
|
'italian': 1.05 |
|
|
}; |
|
|
return rateMap[accent] || 1.0; |
|
|
} |
|
|
|
|
|
function getPitchForAccent(accent) { |
|
|
const pitchMap = { |
|
|
'british_rp': 1.1, |
|
|
'american_southern': 0.95, |
|
|
'australian_broad': 1.05, |
|
|
'irish_dublin': 1.1, |
|
|
'scottish_edinburgh': 1.0, |
|
|
'hongkong': 1.1, |
|
|
'japanese': 1.05, |
|
|
'korean': 0.9, |
|
|
'french': 1.1, |
|
|
'german': 0.95 |
|
|
}; |
|
|
return pitchMap[accent] || 1.0; |
|
|
} |
|
|
function getVoiceHintForAccent(accent) { |
|
|
const hintMap = { |
|
|
'british_rp': 'british|uk|england|Daniel', |
|
|
'american_general': 'american|us|united states|Alex', |
|
|
'australian_general': 'australia|Karen', |
|
|
'indian_general': 'india|Rishi', |
|
|
default: 'english' |
|
|
}; |
|
|
return hintMap[accent] || hintMap.default; |
|
|
} |
|
|
|
|
|
|
|
|
function findVoiceForAccent(accent, voices) { |
|
|
const hints = getVoiceHintForAccent(accent).split('|'); |
|
|
const lang = getLocaleForAccent(accent); |
|
|
|
|
|
|
|
|
for (const hint of hints) { |
|
|
const voice = voices.find(v => v.name.includes(hint)); |
|
|
if (voice) return voice; |
|
|
} |
|
|
|
|
|
|
|
|
const langVoices = voices.filter(v => v.lang === lang); |
|
|
if (langVoices.length > 0) return langVoices[0]; |
|
|
|
|
|
|
|
|
return voices.find(v => v.lang.startsWith('en-')) || voices[0]; |
|
|
} |
|
|
|
|
|
function loadVoices() { |
|
|
voices = window.speechSynthesis.getVoices(); |
|
|
if (voices.length === 0) { |
|
|
setTimeout(loadVoices, 100); |
|
|
} |
|
|
} |
|
|
|
|
|
loadVoices(); |
|
|
if (speechSynthesis.onvoiceschanged !== undefined) { |
|
|
speechSynthesis.onvoiceschanged = loadVoices; |
|
|
} |
|
|
}); |