sonicsculptor-ai / script.js
tm409281's picture
add post processing comparison of audio files to show changes
2770657 verified
// Import WaveSurfer for waveform visualization
import WaveSurfer from 'https://unpkg.com/wavesurfer.js@7/dist/wavesurfer.esm.js';
// Main application script
document.addEventListener('DOMContentLoaded', () => {
// Initialize waveform visualizers
let originalWaveform, processedWaveform;
let originalAudioBlob = null;
// Initialize audio context for recording
let audioContext;
let mediaRecorder;
let audioChunks = [];
// DOM elements
const recordBtn = document.getElementById('recordBtn');
const audioUpload = document.getElementById('audioUpload');
const resultsContainer = document.getElementById('resultsContainer');
// Record button functionality
recordBtn.addEventListener('click', async () => {
try {
if (mediaRecorder && mediaRecorder.state === 'recording') {
// Stop recording
mediaRecorder.stop();
recordBtn.innerHTML = '<i data-feather="mic" class="w-4 h-4 mr-2"></i> Record Audio';
recordBtn.classList.remove('bg-red-600');
recordBtn.classList.add('bg-purple-600', 'hover:bg-purple-700');
feather.replace();
return;
}
// Start recording
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
audioContext = new (window.AudioContext || window.webkitAudioContext)();
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.ondataavailable = (event) => {
audioChunks.push(event.data);
};
mediaRecorder.onstop = async () => {
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
audioChunks = [];
// Simulate processing
simulateProcessing(audioBlob);
};
mediaRecorder.start();
recordBtn.innerHTML = '<i data-feather="square" class="w-4 h-4 mr-2"></i> Stop Recording';
recordBtn.classList.remove('bg-purple-600', 'hover:bg-purple-700');
recordBtn.classList.add('bg-red-600');
feather.replace();
} catch (error) {
console.error('Error accessing microphone:', error);
alert('Error accessing microphone. Please ensure you have granted permission.');
}
});
// Track current filename
let currentFilename = 'processed_audio';
// File upload functionality
audioUpload.addEventListener('change', (event) => {
const file = event.target.files[0];
if (file) {
currentFilename = file.name.replace(/\.[^/.]+$/, "") || 'processed_audio';
const audioBlob = new Blob([file], { type: file.type });
simulateProcessing(audioBlob);
}
});
// Initialize WaveSurfer instances
function initWaveforms() {
originalWaveform = WaveSurfer.create({
container: '#originalWaveform',
waveColor: '#9f7aea',
progressColor: '#d6bcfa',
height: 80,
barWidth: 2,
barRadius: 3,
cursorWidth: 1,
cursorColor: '#fff',
interact: false
});
processedWaveform = WaveSurfer.create({
container: '#processedWaveform',
waveColor: '#4c9faf',
progressColor: '#88d1e1',
height: 80,
barWidth: 2,
barRadius: 3,
cursorWidth: 1,
cursorColor: '#fff',
interact: false
});
}
// Simulate processing with comparison
function simulateProcessing(audioBlob) {
// Store original audio for comparison
originalAudioBlob = audioBlob;
// Show compare button
document.getElementById('compareBtn').classList.remove('hidden');
resultsContainer.innerHTML = `
<div class="text-center">
<div class="animate-spin rounded-full h-12 w-12 border-t-2 border-b-2 border-purple-500 mx-auto mb-4"></div>
<p class="font-medium">Processing your audio with AI...</p>
</div>
`;
setTimeout(() => {
const audioUrl = URL.createObjectURL(audioBlob);
resultsContainer.innerHTML = `
<audio controls class="w-full mb-4" id="processedAudio">
<source src="${audioUrl}" type="audio/wav">
Your browser does not support the audio element.
</audio>
<p class="text-sm text-gray-300">Processing complete!</p>
`;
// Load the processed audio for waveform
const processedAudio = document.getElementById('processedAudio');
processedWaveform.load(processedAudio);
// Enable download and share buttons
const buttons = document.querySelectorAll('#resultsContainer + div button');
buttons.forEach(btn => {
btn.disabled = false;
if (btn.querySelector('i[data-feather="download"]')) {
btn.onclick = () => downloadProcessedAudio(audioUrl, currentFilename);
}
});
}, 3000);
}
// Process button functionality
const processBtn = document.getElementById('processBtn');
const processingSelect = document.querySelector('select');
processBtn.addEventListener('click', () => {
if (audioChunks.length > 0 || audioUpload.files.length > 0) {
const processingType = processingSelect.value;
const audioBlob = audioChunks.length > 0
? new Blob(audioChunks, { type: 'audio/wav' })
: new Blob([audioUpload.files[0]], { type: audioUpload.files[0].type });
simulateProcessing(audioBlob, processingType);
} else {
alert('Please record or upload audio first');
}
});
// Initialize tooltips
document.querySelectorAll('[data-tooltip]').forEach(element => {
element.addEventListener('mouseenter', () => {
const tooltip = document.createElement('div');
tooltip.className = 'absolute z-10 bg-black text-white text-xs px-2 py-1 rounded mt-1';
tooltip.textContent = element.dataset.tooltip;
element.appendChild(tooltip);
element.addEventListener('mouseleave', () => {
tooltip.remove();
}, { once: true });
});
});
});
// Compare audio functionality
document.getElementById('compareBtn').addEventListener('click', () => {
const comparisonSection = document.getElementById('comparisonSection');
comparisonSection.classList.toggle('hidden');
if (!comparisonSection.classList.contains('hidden')) {
// Create original audio element
const originalUrl = URL.createObjectURL(originalAudioBlob);
const originalAudio = document.createElement('audio');
originalAudio.controls = true;
originalAudio.innerHTML = `<source src="${originalUrl}" type="audio/wav">`;
// Replace the placeholder
const originalContainer = comparisonSection.querySelector('#originalWaveform').parentNode;
originalContainer.querySelector('audio').replaceWith(originalAudio);
// Load waveforms
originalWaveform.load(originalAudio);
processedWaveform.load(document.getElementById('processedAudio'));
}
});
// Initialize waveforms after DOM loads
initWaveforms();
// SpeechBrain integration placeholder
async function processWithSpeechBrain(audioBlob) {
// This would be replaced with actual SpeechBrain API calls
console.log('Processing with SpeechBrain...');
return audioBlob; // Return processed audio
}
// Download processed audio with modified filename
function downloadProcessedAudio(audioUrl, originalName) {
const link = document.createElement('a');
link.href = audioUrl;
link.download = `${originalName}_enhanced.wav`;
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
}
// SciPy integration placeholder with noise reduction
async function processWithSciPy(audioBlob, processingType) {
console.log(`Processing with SciPy - ${processingType}...`);
// Simulate different processing based on selection
switch(processingType) {
case 'Speech Enhancement (Noise Reduction)':
console.log('Applying spectral gating and Wiener filtering...');
break;
case 'Speech Enhancement (Vocal Isolation)':
console.log('Applying harmonic-percussive source separation...');
break;
case 'Speech Enhancement (Background Masking)':
console.log('Applying spectral masking and ambient reduction...');
break;
default:
console.log('Standard processing...');
}
return audioBlob; // Return processed audio
}