Spaces:
Running
Running
Update js/recordingModule.js
Browse files- js/recordingModule.js +125 -98
js/recordingModule.js
CHANGED
|
@@ -1,17 +1,28 @@
|
|
| 1 |
// js/recordingModule.js
|
|
|
|
|
|
|
| 2 |
export function initRecorder({ btnStart, btnStop, transcriptEl, getProvider }) {
|
| 3 |
let mediaRecorder, audioChunks = [], transcriptText = '';
|
| 4 |
|
|
|
|
| 5 |
btnStart.addEventListener('click', async () => {
|
| 6 |
console.log('[Recorder] Bot贸n Iniciar pulsado');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
const aiProvider = getProvider();
|
| 8 |
if (!aiProvider) { alert('Selecciona un proveedor IA primero.'); return; }
|
| 9 |
try {
|
| 10 |
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
| 11 |
console.log('[Recorder] Acceso a micr贸fono concedido');
|
| 12 |
-
// No especificar mimeType, dejar MediaRecorder por defecto (como en AppConsultaPro)
|
| 13 |
mediaRecorder = new MediaRecorder(stream);
|
| 14 |
-
// Assign onstop handler immediately so onStop executes when recording stops
|
| 15 |
mediaRecorder.onstop = onStop;
|
| 16 |
console.log('[Recorder] MediaRecorder creado:', mediaRecorder);
|
| 17 |
audioChunks = [];
|
|
@@ -19,8 +30,8 @@ export function initRecorder({ btnStart, btnStop, transcriptEl, getProvider }) {
|
|
| 19 |
mediaRecorder.start();
|
| 20 |
btnStart.disabled = true;
|
| 21 |
btnStop.disabled = false;
|
| 22 |
-
transcriptEl.value = '';
|
| 23 |
-
|
| 24 |
const status = document.getElementById('recorder-status');
|
| 25 |
if (status) {
|
| 26 |
status.innerHTML = '<i class="fas fa-circle text-red-500 animate-pulse mr-2"></i>Consulta en progreso...';
|
|
@@ -28,109 +39,125 @@ export function initRecorder({ btnStart, btnStop, transcriptEl, getProvider }) {
|
|
| 28 |
} catch (err) {
|
| 29 |
console.error('[Recorder] Error al acceder al micr贸fono:', err);
|
| 30 |
alert('No se pudo acceder al micr贸fono.');
|
|
|
|
|
|
|
|
|
|
| 31 |
}
|
| 32 |
});
|
| 33 |
|
|
|
|
| 34 |
btnStop.addEventListener('click', () => {
|
| 35 |
console.log('[Recorder] Bot贸n Detener pulsado');
|
| 36 |
-
if (mediaRecorder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
btnStop.disabled = true;
|
| 38 |
-
// Estado visual
|
| 39 |
const status = document.getElementById('recorder-status');
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
| 43 |
});
|
| 44 |
|
|
|
|
| 45 |
async function onStop() {
|
| 46 |
-
console.log('[Recorder] Grabaci贸n detenida, procesando audio...');
|
| 47 |
-
//
|
|
|
|
|
|
|
|
|
|
| 48 |
const cfg = JSON.parse(localStorage.getItem('iaConfig'));
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
alert('No se grab贸 audio.');
|
| 64 |
-
return;
|
| 65 |
-
}
|
| 66 |
-
console.log('[Recorder] Tipo real del blob:', blob.type);
|
| 67 |
-
let transcript = '';
|
| 68 |
-
try {
|
| 69 |
-
if (cfg && cfg.transcription && cfg.transcription.provider === 'deepgram') {
|
| 70 |
-
// Deepgram integration
|
| 71 |
-
// Construir URL Deepgram con idioma, modelo y smart_format
|
| 72 |
-
const providerUrl = getProvider();
|
| 73 |
-
const deepgramUrl = `${providerUrl}/v1/listen?language=es&model=${transModel}&smart_format=true`;
|
| 74 |
-
console.log('[Recorder] Deepgram URL:', deepgramUrl);
|
| 75 |
-
console.log('[Recorder] Content-Type enviado a Deepgram:', blob.type);
|
| 76 |
-
const res = await fetch(deepgramUrl, {
|
| 77 |
-
method: 'POST',
|
| 78 |
-
headers: {
|
| 79 |
-
'Authorization': 'Token ' + apiKey,
|
| 80 |
-
'Content-Type': blob.type || 'audio/webm'
|
| 81 |
-
},
|
| 82 |
-
body: blob
|
| 83 |
-
});
|
| 84 |
-
if (!res.ok) throw new Error('Error en transcripci贸n');
|
| 85 |
-
const data = await res.json();
|
| 86 |
-
console.log('[Deepgram] Respuesta completa:', data);
|
| 87 |
-
transcript = data.results?.channels?.[0]?.alternatives?.[0]?.transcript || '';
|
| 88 |
-
console.log('[Deepgram] Transcript extra铆do:', transcript);
|
| 89 |
-
} else if (cfg.transcription.provider === 'openai') {
|
| 90 |
-
// OpenAI Whisper integration
|
| 91 |
-
// Reutilizar apiKey y modelo obtenidos antes
|
| 92 |
-
const apiKeyOA = apiKey;
|
| 93 |
-
const modelOA = transModel;
|
| 94 |
-
const fd = new FormData();
|
| 95 |
-
fd.append('model', modelOA);
|
| 96 |
-
fd.append('file', blob, 'consulta.wav');
|
| 97 |
-
const respOA = await fetch(`${getProvider()}/v1/audio/transcriptions`, {
|
| 98 |
-
method: 'POST',
|
| 99 |
-
headers: { 'Authorization': 'Bearer ' + apiKeyOA },
|
| 100 |
-
body: fd
|
| 101 |
-
});
|
| 102 |
-
if (!respOA.ok) {
|
| 103 |
-
const errTxt = await respOA.text();
|
| 104 |
-
throw new Error(`Error OpenAI transcripci贸n (${respOA.status}): ${errTxt}`);
|
| 105 |
}
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
// js/recordingModule.js
|
| 2 |
+
|
| 3 |
+
// (La funci贸n initRecorder y sus par谩metros no cambian)
|
| 4 |
export function initRecorder({ btnStart, btnStop, transcriptEl, getProvider }) {
|
| 5 |
let mediaRecorder, audioChunks = [], transcriptText = '';
|
| 6 |
|
| 7 |
+
// Listener para el bot贸n Iniciar
|
| 8 |
btnStart.addEventListener('click', async () => {
|
| 9 |
console.log('[Recorder] Bot贸n Iniciar pulsado');
|
| 10 |
+
|
| 11 |
+
// --- INICIO: Limpieza y Evento ---
|
| 12 |
+
// Limpiar transcripci贸n anterior directamente aqu铆
|
| 13 |
+
transcriptEl.value = '';
|
| 14 |
+
// Disparar evento para que main.js limpie el resto de la UI
|
| 15 |
+
document.dispatchEvent(new Event('newRecordingStarted'));
|
| 16 |
+
console.log('[Recorder] Evento newRecordingStarted disparado');
|
| 17 |
+
// --- FIN: Limpieza y Evento ---
|
| 18 |
+
|
| 19 |
+
// El resto de la l贸gica de inicio de grabaci贸n (sin cambios)
|
| 20 |
const aiProvider = getProvider();
|
| 21 |
if (!aiProvider) { alert('Selecciona un proveedor IA primero.'); return; }
|
| 22 |
try {
|
| 23 |
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
| 24 |
console.log('[Recorder] Acceso a micr贸fono concedido');
|
|
|
|
| 25 |
mediaRecorder = new MediaRecorder(stream);
|
|
|
|
| 26 |
mediaRecorder.onstop = onStop;
|
| 27 |
console.log('[Recorder] MediaRecorder creado:', mediaRecorder);
|
| 28 |
audioChunks = [];
|
|
|
|
| 30 |
mediaRecorder.start();
|
| 31 |
btnStart.disabled = true;
|
| 32 |
btnStop.disabled = false;
|
| 33 |
+
// transcriptEl.value = ''; // Movido arriba para que ocurra antes del evento
|
| 34 |
+
|
| 35 |
const status = document.getElementById('recorder-status');
|
| 36 |
if (status) {
|
| 37 |
status.innerHTML = '<i class="fas fa-circle text-red-500 animate-pulse mr-2"></i>Consulta en progreso...';
|
|
|
|
| 39 |
} catch (err) {
|
| 40 |
console.error('[Recorder] Error al acceder al micr贸fono:', err);
|
| 41 |
alert('No se pudo acceder al micr贸fono.');
|
| 42 |
+
// Asegurarse de que los botones vuelvan a estado inicial si falla el getUserMedia
|
| 43 |
+
btnStart.disabled = false;
|
| 44 |
+
btnStop.disabled = true;
|
| 45 |
}
|
| 46 |
});
|
| 47 |
|
| 48 |
+
// Listener para el bot贸n Detener (sin cambios)
|
| 49 |
btnStop.addEventListener('click', () => {
|
| 50 |
console.log('[Recorder] Bot贸n Detener pulsado');
|
| 51 |
+
if (mediaRecorder && mediaRecorder.state !== 'inactive') { // A帽adir comprobaci贸n de estado
|
| 52 |
+
mediaRecorder.stop();
|
| 53 |
+
} else {
|
| 54 |
+
console.warn('[Recorder] MediaRecorder no activo o no existe al intentar detener.');
|
| 55 |
+
}
|
| 56 |
+
// Deshabilitar bot贸n detener y actualizar estado visual inmediatamente
|
| 57 |
btnStop.disabled = true;
|
|
|
|
| 58 |
const status = document.getElementById('recorder-status');
|
| 59 |
+
if (status) {
|
| 60 |
+
// Cambiar estado visual a procesando mientras onStop se ejecuta
|
| 61 |
+
status.innerHTML = '<i class="fas fa-spinner fa-spin mr-2"></i>Procesando audio...';
|
| 62 |
+
}
|
| 63 |
});
|
| 64 |
|
| 65 |
+
// Funci贸n onStop que se ejecuta al detener la grabaci贸n (sin cambios funcionales aqu铆)
|
| 66 |
async function onStop() {
|
| 67 |
+
console.log('[Recorder] Grabaci贸n detenida (onStop), procesando audio...');
|
| 68 |
+
// El resto de la l贸gica onStop sigue igual...
|
| 69 |
+
// Obtener config, blob, llamar a API transcripci贸n, etc.
|
| 70 |
+
|
| 71 |
+
// (C贸digo existente para obtener config, blob, hacer fetch a Deepgram/OpenAI, etc.)
|
| 72 |
const cfg = JSON.parse(localStorage.getItem('iaConfig'));
|
| 73 |
+
// ... (resto del c贸digo de onStop) ...
|
| 74 |
+
// ... (manejo de audioChunks, blob, llamadas fetch, etc.) ...
|
| 75 |
+
// Aseg煤rate que la l贸gica para habilitar/deshabilitar botones y estado al final est茅 bien
|
| 76 |
+
try {
|
| 77 |
+
// ... (c贸digo fetch para transcripci贸n) ...
|
| 78 |
+
if (cfg && cfg.transcription && cfg.transcription.provider === 'deepgram') {
|
| 79 |
+
// ... Deepgram fetch ...
|
| 80 |
+
// transcript = data.results?.channels?.[0]?.alternatives?.[0]?.transcript || '';
|
| 81 |
+
} else if (cfg.transcription.provider === 'openai') {
|
| 82 |
+
// ... OpenAI fetch ...
|
| 83 |
+
// transcript = dataOA.text || '';
|
| 84 |
+
} else {
|
| 85 |
+
// ... Fallback fetch ...
|
| 86 |
+
// transcript = dataFB.text || '';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
}
|
| 88 |
+
// transcriptText = transcript;
|
| 89 |
+
// transcriptEl.value = transcriptText;
|
| 90 |
+
// document.dispatchEvent(new CustomEvent('transcriptionReady', { detail: transcriptText }));
|
| 91 |
+
|
| 92 |
+
// --- Ejemplo fetch (simplificado, adaptar a tu c贸digo real) ---
|
| 93 |
+
let transcript = '';
|
| 94 |
+
let blob; // Asume que blob se cre贸 correctamente con los audioChunks
|
| 95 |
+
if (audioChunks.length > 0) {
|
| 96 |
+
let blobType = (audioChunks[0] && audioChunks[0].type) ? audioChunks[0].type : 'audio/webm';
|
| 97 |
+
blob = new Blob(audioChunks, { type: blobType });
|
| 98 |
+
console.log('[Recorder] Blob creado:', blob);
|
| 99 |
+
} else {
|
| 100 |
+
console.warn('[Recorder] No hay audio chunks para crear el blob.');
|
| 101 |
+
// Resetear UI porque no hubo audio v谩lido
|
| 102 |
+
const status = document.getElementById('recorder-status');
|
| 103 |
+
if (status) status.innerHTML = '<i class="fas fa-exclamation-circle text-yellow-500 mr-2"></i>No se grab贸 audio.';
|
| 104 |
+
btnStart.disabled = false; // Habilitar bot贸n inicio
|
| 105 |
+
return; // Salir de onStop
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
// Simulaci贸n de la llamada API (reemplazar con tu l贸gica real)
|
| 109 |
+
if (cfg.transcription.provider === 'openai') { // Ejemplo OpenAI
|
| 110 |
+
const apiKeyOA = cfg.transcription.apiKeys?.openai;
|
| 111 |
+
const modelOA = cfg.transcription.models?.openai || 'whisper-1';
|
| 112 |
+
if (!apiKeyOA) throw new Error('API Key de OpenAI no configurada');
|
| 113 |
+
const fd = new FormData();
|
| 114 |
+
fd.append('model', modelOA);
|
| 115 |
+
fd.append('file', blob, 'consulta.wav');
|
| 116 |
+
const respOA = await fetch(`${getProvider()}/v1/audio/transcriptions`, {
|
| 117 |
+
method: 'POST',
|
| 118 |
+
headers: { 'Authorization': 'Bearer ' + apiKeyOA },
|
| 119 |
+
body: fd
|
| 120 |
+
});
|
| 121 |
+
if (!respOA.ok) {
|
| 122 |
+
const errTxt = await respOA.text();
|
| 123 |
+
throw new Error(`Error OpenAI transcripci贸n (${respOA.status}): ${errTxt}`);
|
| 124 |
+
}
|
| 125 |
+
const dataOA = await respOA.json();
|
| 126 |
+
transcript = dataOA.text || '';
|
| 127 |
+
} else { // A帽adir l贸gica para Deepgram u otros
|
| 128 |
+
throw new Error(`Proveedor de transcripci贸n no implementado en este ejemplo: ${cfg.transcription.provider}`);
|
| 129 |
+
}
|
| 130 |
+
// --- Fin simulaci贸n ---
|
| 131 |
+
|
| 132 |
+
transcriptText = transcript;
|
| 133 |
+
transcriptEl.value = transcriptText; // Actualizar textarea
|
| 134 |
+
document.dispatchEvent(new CustomEvent('transcriptionReady', { detail: transcriptText })); // Disparar evento para an谩lisis m茅dico
|
| 135 |
+
|
| 136 |
+
} catch (e) {
|
| 137 |
+
console.error('[Recorder] Error durante onStop (transcripci贸n):', e);
|
| 138 |
+
alert('Error en la transcripci贸n: ' + e.message);
|
| 139 |
+
// Resetear estado visual en caso de error
|
| 140 |
+
const status = document.getElementById('recorder-status');
|
| 141 |
+
if (status) status.innerHTML = '<i class="fas fa-times-circle text-red-500 mr-2"></i>Error en transcripci贸n.';
|
| 142 |
+
|
| 143 |
+
} finally {
|
| 144 |
+
// Siempre habilitar bot贸n de inicio y resetear estado si no est谩 ya hecho
|
| 145 |
+
btnStart.disabled = false;
|
| 146 |
+
// Resetear estado visual final si no hubo error
|
| 147 |
+
const status = document.getElementById('recorder-status');
|
| 148 |
+
// Solo cambiar si no es ya un mensaje de error o sin audio
|
| 149 |
+
if (status && !status.innerHTML.includes('Error') && !status.innerHTML.includes('No se grab贸')) {
|
| 150 |
+
status.innerHTML = '<i class="fas fa-check-circle text-green-500 mr-2"></i>Transcripci贸n lista.';
|
| 151 |
+
// Opcional: volver a estado 'No hay consulta' tras un delay
|
| 152 |
+
setTimeout(() => {
|
| 153 |
+
if(document.getElementById('recorder-status')?.innerHTML.includes('lista')) { // Chequear si a煤n est谩 en 'lista'
|
| 154 |
+
document.getElementById('recorder-status').innerHTML = '<i class="fas fa-circle text-gray-400 mr-2"></i>No hay consulta en progreso';
|
| 155 |
+
}
|
| 156 |
+
}, 4000);
|
| 157 |
+
}
|
| 158 |
+
// Asegurar que el bot贸n stop sigue deshabilitado
|
| 159 |
+
btnStop.disabled = true;
|
| 160 |
+
|
| 161 |
+
}
|
| 162 |
+
} // Fin onStop
|
| 163 |
+
} // Fin initRecorder
|