INVIDEO_BASIC / app.py
gnosticdev's picture
Update app.py
1bc4dcb verified
raw
history blame
31.2 kB
import gradio as gr
import torch
import soundfile as sf
import edge_tts
import asyncio
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from keybert import KeyBERT
from moviepy.editor import (
VideoFileClip,
AudioFileClip,
concatenate_videoclips,
concatenate_audioclips,
CompositeAudioClip,
AudioClip,
TextClip,
CompositeVideoClip,
VideoClip,
ColorClip
)
import numpy as np
import json
import logging
import os
import requests
import re
import math
import tempfile
import shutil
import uuid
import threading
import time
from datetime import datetime, timedelta
# ------------------- FIX PARA PILLOW -------------------
try:
from PIL import Image
if not hasattr(Image, 'ANTIALIAS'):
Image.ANTIALIAS = Image.Resampling.LANCZOS
except ImportError:
pass
# ------------------- Configuración & Globals -------------------
os.environ["GRADIO_SERVER_TIMEOUT"] = "3800"
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
if not PEXELS_API_KEY:
logger.warning("PEXELS_API_KEY no definido. Los videos no funcionarán.")
tokenizer, gpt2_model, kw_model = None, None, None
RESULTS_DIR = "video_results"
os.makedirs(RESULTS_DIR, exist_ok=True)
TASKS = {}
# ------------------- Motor Edge TTS -------------------
class EdgeTTSEngine:
def __init__(self, voice="es-ES-AlvaroNeural"):
self.voice = voice
logger.info(f"Inicializando Edge TTS con voz: {voice}")
async def _synthesize_async(self, text, output_path):
try:
communicate = edge_tts.Communicate(text, self.voice)
await communicate.save(output_path)
return True
except Exception as e:
logger.error(f"Error en Edge TTS: {e}")
return False
def synthesize(self, text, output_path):
try:
return asyncio.run(self._synthesize_async(text, output_path))
except Exception as e:
logger.error(f"Error al sintetizar con Edge TTS: {e}")
return False
tts_engine = EdgeTTSEngine()
# ------------------- Carga Perezosa de Modelos -------------------
def get_tokenizer():
global tokenizer
if tokenizer is None:
logger.info("Cargando tokenizer GPT2 español...")
tokenizer = GPT2Tokenizer.from_pretrained("datificate/gpt2-small-spanish")
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
return tokenizer
def get_gpt2_model():
global gpt2_model
if gpt2_model is None:
logger.info("Cargando modelo GPT-2 español...")
gpt2_model = GPT2LMHeadModel.from_pretrained("datificate/gpt2-small-spanish").eval()
return gpt2_model
def get_kw_model():
global kw_model
if kw_model is None:
logger.info("Cargando modelo KeyBERT multilingüe...")
kw_model = KeyBERT("paraphrase-multilingual-MiniLM-L12-v2")
return kw_model
# ------------------- Funciones del Pipeline -------------------
def update_task_progress(task_id, message):
if task_id in TASKS:
TASKS[task_id]['progress_log'] = message
logger.info(f"[{task_id}] {message}")
def gpt2_script(prompt: str) -> str:
try:
local_tokenizer = get_tokenizer()
local_gpt2_model = get_gpt2_model()
instruction = f"Escribe un guion corto y coherente sobre: {prompt}"
inputs = local_tokenizer(instruction, return_tensors="pt", truncation=True, max_length=512)
outputs = local_gpt2_model.generate(
**inputs,
max_length=160 + inputs["input_ids"].shape[1],
do_sample=True,
top_p=0.9,
top_k=40,
temperature=0.7,
no_repeat_ngram_size=3,
pad_token_id=local_tokenizer.pad_token_id,
eos_token_id=local_tokenizer.eos_token_id,
)
text = local_tokenizer.decode(outputs[0], skip_special_tokens=True)
generated = text.split("sobre:")[-1].strip()
return generated if generated else prompt
except Exception as e:
logger.error(f"Error generando guión: {e}")
return f"Hoy hablaremos sobre {prompt}. Este es un tema fascinante que merece nuestra atención."
def generate_tts_audio(text: str, output_path: str) -> bool:
try:
logger.info("Generando audio con Edge TTS...")
success = tts_engine.synthesize(text, output_path)
if success and os.path.exists(output_path) and os.path.getsize(output_path) > 0:
logger.info(f"Audio generado exitosamente: {output_path}")
return True
else:
logger.error("El archivo de audio no se generó correctamente")
return False
except Exception as e:
logger.error(f"Error generando TTS: {e}")
return False
def extract_keywords(text: str) -> list[str]:
try:
local_kw_model = get_kw_model()
clean_text = re.sub(r"[^\w\sáéíóúñÁÉÍÓÚÑ]", "", text.lower())
kws = local_kw_model.extract_keywords(clean_text, stop_words="spanish", top_n=5)
keywords = [k.replace(" ", "+") for k, _ in kws if k]
return keywords if keywords else ["mystery", "conspiracy", "alien", "UFO", "secret", "cover-up", "illusion", "paranoia",
"secret society", "lie", "simulation", "matrix", "terror", "darkness", "shadow", "enigma",
"urban legend", "unknown", "hidden", "mistrust", "experiment", "government", "control",
"surveillance", "propaganda", "deception", "whistleblower", "anomaly", "extraterrestrial",
"shadow government", "cabal", "deep state", "new world order", "mind control", "brainwashing",
"disinformation", "false flag", "assassin", "black ops", "anomaly", "men in black", "abduction",
"hybrid", "ancient aliens", "hollow earth", "simulation theory", "alternate reality", "predictive programming",
"symbolism", "occult", "eerie", "haunting", "unexplained", "forbidden knowledge", "redacted", "conspiracy theorist"]
except Exception as e:
logger.error(f"Error extrayendo keywords: {e}")
return ["mystery", "conspiracy", "alien", "UFO", "secret", "cover-up", "illusion", "paranoia",
"secret society", "lie", "simulation", "matrix", "terror", "darkness", "shadow", "enigma",
"urban legend", "unknown", "hidden", "mistrust", "experiment", "government", "control",
"surveillance", "propaganda", "deception", "whistleblower", "anomaly", "extraterrestrial",
"shadow government", "cabal", "deep state", "new world order", "mind control", "brainwashing",
"disinformation", "false flag", "assassin", "black ops", "anomaly", "men in black", "abduction",
"hybrid", "ancient aliens", "hollow earth", "simulation theory", "alternate reality", "predictive programming",
"symbolism", "occult", "eerie", "haunting", "unexplained", "forbidden knowledge", "redacted", "conspiracy theorist"]
def search_pexels_videos(query: str, count: int = 3) -> list[dict]:
if not PEXELS_API_KEY:
return []
try:
response = requests.get(
"https://api.pexels.com/videos/search",
headers={"Authorization": PEXELS_API_KEY},
params={"query": query, "per_page": count, "orientation": "landscape"},
timeout=20
)
response.raise_for_status()
return response.json().get("videos", [])
except Exception as e:
logger.error(f"Error buscando videos en Pexels: {e}")
return []
def download_video(url: str, folder: str) -> str | None:
try:
filename = f"{uuid.uuid4().hex}.mp4"
filepath = os.path.join(folder, filename)
with requests.get(url, stream=True, timeout=60) as response:
response.raise_for_status()
with open(filepath, "wb") as f:
for chunk in response.iter_content(chunk_size=1024*1024):
f.write(chunk)
if os.path.exists(filepath) and os.path.getsize(filepath) > 1000:
return filepath
else:
logger.error(f"Archivo descargado inválido: {filepath}")
return None
except Exception as e:
logger.error(f"Error descargando video {url}: {e}")
return None
def create_subtitle_clips(script: str, video_width: int, video_height: int, duration: float):
try:
sentences = [s.strip() for s in re.split(r"[.!?¿¡]", script) if s.strip()]
if not sentences:
return []
total_words = sum(len(s.split()) for s in sentences) or 1
time_per_word = duration / total_words
clips = []
current_time = 0.0
for sentence in sentences:
num_words = len(sentence.split())
sentence_duration = num_words * time_per_word
if sentence_duration < 0.5:
continue
try:
txt_clip = (
TextClip(
sentence,
fontsize=max(20, int(video_height * 0.05)),
color="white",
stroke_color="black",
stroke_width=2,
method="caption",
size=(int(video_width * 0.9), None),
font="Arial-Bold"
)
.set_start(current_time)
.set_duration(sentence_duration)
.set_position(("center", "bottom"))
)
if txt_clip is not None:
clips.append(txt_clip)
except Exception as e:
logger.error(f"Error creando subtítulo para '{sentence}': {e}")
continue
current_time += sentence_duration
return clips
except Exception as e:
logger.error(f"Error creando subtítulos: {e}")
return []
def loop_audio_to_duration(audio_clip: AudioFileClip, target_duration: float) -> AudioFileClip:
if audio_clip is None:
return None
try:
if audio_clip.duration >= target_duration:
return audio_clip.subclip(0, target_duration)
loops_needed = math.ceil(target_duration / audio_clip.duration)
looped_audio = concatenate_audioclips([audio_clip] * loops_needed)
return looped_audio.subclip(0, target_duration)
except Exception as e:
logger.error(f"Error haciendo loop del audio: {e}")
return audio_clip
def create_video(script_text: str, generate_script: bool, music_path: str | None, task_id: str) -> str:
temp_dir = tempfile.mkdtemp()
TARGET_FPS = 24
TARGET_RESOLUTION = (1280, 720)
def normalize_clip(clip):
if clip is None:
return None
try:
if clip.size != TARGET_RESOLUTION:
clip = clip.resize(TARGET_RESOLUTION)
if clip.fps != TARGET_FPS:
clip = clip.set_fps(TARGET_FPS)
return clip
except Exception as e:
logger.error(f"Error normalizando clip: {e}")
return None
def validate_clip(clip, path="unknown"):
"""Función para validar que un clip sea usable"""
if clip is None:
logger.error(f"Clip es None: {path}")
return False
try:
# Verificar duración
if clip.duration <= 0:
logger.error(f"Clip con duración inválida: {path}")
return False
# Verificar que podemos obtener un frame
test_frame = clip.get_frame(0)
if test_frame is None:
logger.error(f"No se pudo obtener frame del clip: {path}")
return False
return True
except Exception as e:
logger.error(f"Error validando clip {path}: {e}")
return False
def create_fallback_video(duration):
"""Crea un video de respaldo"""
try:
fallback = ColorClip(
size=TARGET_RESOLUTION,
color=(0, 0, 0),
duration=duration
)
fallback.fps = TARGET_FPS
return fallback
except Exception as e:
logger.error(f"Error creando video de respaldo: {e}")
return None
try:
# Paso 1: Generar o usar guión
update_task_progress(task_id, "Paso 1/7: Preparando guión...")
if generate_script:
script = gpt2_script(script_text)
else:
script = script_text.strip()
if not script:
raise ValueError("El guión está vacío")
# Paso 2: Generar audio TTS
update_task_progress(task_id, "Paso 2/7: Generando audio con Edge TTS...")
audio_path = os.path.join(temp_dir, "voice.wav")
if not generate_tts_audio(script, audio_path):
raise RuntimeError("Error generando el audio TTS")
voice_clip = AudioFileClip(audio_path)
if voice_clip is None:
raise RuntimeError("No se pudo cargar el clip de audio")
video_duration = voice_clip.duration
if video_duration < 1:
raise ValueError("El audio generado es demasiado corto")
# Paso 3: Buscar y descargar videos
update_task_progress(task_id, "Paso 3/7: Buscando videos en Pexels...")
video_paths = []
keywords = extract_keywords(script)
for i, keyword in enumerate(keywords[:3]):
update_task_progress(task_id, f"Paso 3/7: Buscando videos para '{keyword}' ({i+1}/{len(keywords[:3])})")
videos = search_pexels_videos(keyword, 2)
for video_data in videos:
if len(video_paths) >= 6:
break
video_files = video_data.get("video_files", [])
if video_files:
best_file = max(video_files, key=lambda f: f.get("width", 0))
video_url = best_file.get("link")
if video_url:
downloaded_path = download_video(video_url, temp_dir)
if downloaded_path:
video_paths.append(downloaded_path)
if not video_paths:
logger.warning("No se pudieron descargar videos de Pexels, creando video de respaldo...")
base_video = create_fallback_video(video_duration)
if base_video is None:
raise RuntimeError("No se pudo crear video de respaldo")
else:
# Paso 4: Procesar videos
update_task_progress(task_id, f"Paso 4/7: Procesando {len(video_paths)} videos...")
video_clips = []
for path in video_paths:
clip = None
try:
# Verificar que el archivo exista y tenga tamaño
if not os.path.exists(path) or os.path.getsize(path) < 1024:
logger.error(f"Archivo inválido: {path}")
continue
# Cargar el video
clip = VideoFileClip(path)
if clip is None:
logger.error(f"No se pudo cargar el video: {path}")
continue
# Validar el clip original
if not validate_clip(clip, path):
clip.close()
continue
# Recortar el video
duration = min(8, clip.duration)
processed_clip = clip.subclip(0, duration)
if processed_clip is None:
logger.error(f"Error al recortar video: {path}")
clip.close()
continue
# Validar el clip recortado
if not validate_clip(processed_clip, f"{path} (recortado)"):
processed_clip.close()
clip.close()
continue
# Normalizar
processed_clip = normalize_clip(processed_clip)
if processed_clip is not None:
# Validación final del clip procesado
if validate_clip(processed_clip, f"{path} (normalizado)"):
video_clips.append(processed_clip)
else:
processed_clip.close()
clip.close()
else:
logger.error(f"Error normalizando video: {path}")
clip.close()
except Exception as e:
logger.error(f"Error procesando video {path}: {e}")
finally:
if clip is not None:
clip.close()
# Verificar si tenemos clips válidos
if not video_clips:
logger.warning("No se procesaron videos válidos, creando video de respaldo...")
base_video = create_fallback_video(video_duration)
if base_video is None:
raise RuntimeError("No se pudo crear video de respaldo")
else:
# Verificar que todos los clips sean válidos antes de concatenar
valid_clips = []
for i, clip in enumerate(video_clips):
try:
# Verificación final de cada clip
if validate_clip(clip, f"clip_{i}"):
valid_clips.append(clip)
else:
clip.close()
except Exception as e:
logger.error(f"Clip inválido en posición {i}: {e}")
if clip is not None:
clip.close()
if not valid_clips:
logger.warning("Todos los clips son inválidos, creando video de respaldo...")
base_video = create_fallback_video(video_duration)
if base_video is None:
raise RuntimeError("No se pudo crear video de respaldo")
else:
# Concatenar solo clips válidos
update_task_progress(task_id, "Paso 4/7: Concatenando videos válidos...")
try:
base_video = concatenate_videoclips(valid_clips, method="chain")
# Verificar que la concatenación funcionó
if base_video is None:
raise RuntimeError("La concatenación devolvió None")
# Validar el video concatenado
if not validate_clip(base_video, "video_concatenado"):
raise RuntimeError("Video concatenado inválido")
except Exception as e:
logger.error(f"Error concatenando videos: {e}")
# Liberar clips
for clip in valid_clips:
if clip is not None:
clip.close()
# Crear video de respaldo
base_video = create_fallback_video(video_duration)
if base_video is None:
raise RuntimeError("No se pudo crear video de respaldo")
# Extender video si es más corto que el audio
if base_video.duration < video_duration:
update_task_progress(task_id, "Paso 4/7: Extendiendo video...")
try:
fade_duration = 0.5
loops_needed = math.ceil(video_duration / base_video.duration)
looped_clips = [base_video]
for _ in range(loops_needed - 1):
fade_in_clip = base_video.crossfadein(fade_duration)
if fade_in_clip is not None:
looped_clips.append(fade_in_clip)
looped_clips.append(base_video)
# Guardar referencia al video original para liberarlo después
original_video = base_video
base_video = concatenate_videoclips(looped_clips)
# Verificar el video extendido
if base_video is None or not validate_clip(base_video, "video_extendido"):
logger.error("Error al extender video, usando original")
base_video = original_video
else:
# Liberar el video original
original_video.close()
except Exception as e:
logger.error(f"Error extendiendo video: {e}")
# No hacemos nada, seguimos con el video original
# Asegurar duración exacta
try:
original_video = base_video
base_video = base_video.subclip(0, video_duration)
if base_video is None or not validate_clip(base_video, "video_recortado"):
logger.error("Error al recortar video final, usando original")
base_video = original_video
else:
original_video.close()
except Exception as e:
logger.error(f"Error al recortar video final: {e}")
# No hacemos nada, seguimos con el video original
# Paso 5: Componer audio final
update_task_progress(task_id, "Paso 5/7: Componiendo audio...")
final_audio = voice_clip
if music_path and os.path.exists(music_path):
music_clip = None
try:
music_clip = AudioFileClip(music_path)
if music_clip is not None:
music_clip = loop_audio_to_duration(music_clip, video_duration)
if music_clip is not None:
music_clip = music_clip.volumex(0.2)
final_audio = CompositeAudioClip([music_clip, voice_clip])
except Exception as e:
logger.error(f"Error con música: {e}")
finally:
if music_clip is not None:
music_clip.close()
# Paso 6: Agregar subtítulos
update_task_progress(task_id, "Paso 6/7: Agregando subtítulos...")
subtitle_clips = create_subtitle_clips(script, base_video.w, base_video.h, video_duration)
if subtitle_clips:
try:
original_video = base_video
base_video = CompositeVideoClip([base_video] + subtitle_clips)
if base_video is None or not validate_clip(base_video, "video_con_subtitulos"):
logger.error("Error al agregar subtítulos, usando video original")
base_video = original_video
else:
original_video.close()
except Exception as e:
logger.error(f"Error creando video con subtítulos: {e}")
# Paso 7: Renderizar video final
update_task_progress(task_id, "Paso 7/7: Renderizando video final...")
final_video = base_video.set_audio(final_audio)
output_path = os.path.join(RESULTS_DIR, f"video_{task_id}.mp4")
final_video.write_videofile(
output_path,
fps=TARGET_FPS,
codec="libx264",
audio_codec="aac",
bitrate="8000k",
threads=4,
preset="slow",
logger=None,
verbose=False
)
# Limpiar clips
voice_clip.close()
base_video.close()
final_video.close()
for clip in video_clips:
if clip is not None:
clip.close()
return output_path
except Exception as e:
logger.error(f"Error creando video: {e}")
raise
finally:
try:
shutil.rmtree(temp_dir)
except:
pass
def worker_thread(task_id: str, mode: str, topic: str, user_script: str, music_path: str | None):
try:
generate_script = (mode == "Generar Guion con IA")
content = topic if generate_script else user_script
output_path = create_video(content, generate_script, music_path, task_id)
TASKS[task_id].update({
"status": "done",
"result": output_path,
"progress_log": "✅ ¡Video completado exitosamente!"
})
except Exception as e:
logger.error(f"Error en worker {task_id}: {e}")
TASKS[task_id].update({
"status": "error",
"error": str(e),
"progress_log": f"❌ Error: {str(e)}"
})
def generate_video_with_progress(mode, topic, user_script, music):
content = topic if mode == "Generar Guion con IA" else user_script
if not content or not content.strip():
yield "❌ Error: Por favor, ingresa un tema o guion.", None, None
return
task_id = uuid.uuid4().hex[:8]
TASKS[task_id] = {
"status": "processing",
"progress_log": "🚀 Iniciando generación de video...",
"timestamp": datetime.utcnow()
}
worker = threading.Thread(
target=worker_thread,
args=(task_id, mode, topic, user_script, music),
daemon=True
)
worker.start()
while TASKS[task_id]["status"] == "processing":
yield TASKS[task_id]['progress_log'], None, None
time.sleep(1)
if TASKS[task_id]["status"] == "error":
yield TASKS[task_id]['progress_log'], None, None
elif TASKS[task_id]["status"] == "done":
result_path = TASKS[task_id]['result']
yield TASKS[task_id]['progress_log'], result_path, result_path
# ------------------- Limpieza automática -------------------
def cleanup_old_files():
while True:
try:
time.sleep(6600)
now = datetime.utcnow()
logger.info("Ejecutando limpieza de archivos antiguos...")
for task_id, info in list(TASKS.items()):
if "timestamp" in info and now - info["timestamp"] > timedelta(hours=24):
if info.get("result") and os.path.exists(info.get("result")):
try:
os.remove(info["result"])
logger.info(f"Archivo eliminado: {info['result']}")
except Exception as e:
logger.error(f"Error eliminando archivo: {e}")
del TASKS[task_id]
except Exception as e:
logger.error(f"Error en cleanup: {e}")
threading.Thread(target=cleanup_old_files, daemon=True).start()
# ------------------- Interfaz Gradio -------------------
def toggle_input_fields(mode):
return (
gr.update(visible=mode == "Generar Guion con IA"),
gr.update(visible=mode != "Generar Guion con IA")
)
with gr.Blocks(title="🎬 Generador de Videos IA", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🎬 Generador de Videos con IA
Crea videos profesionales a partir de texto usando:
- **Edge TTS** para voz en español
- **GPT-2** para generación de guiones
- **Pexels API** para videos de stock
- **Subtítulos automáticos** y efectos visuales
El progreso se mostrará en tiempo real.
""")
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("### ⚙️ Configuración")
mode_radio = gr.Radio(
choices=["Generar Guion con IA", "Usar Mi Guion"],
value="Generar Guion con IA",
label="Método de creación"
)
topic_input = gr.Textbox(
label="💡 Tema para la IA",
placeholder="Ej: Los misterios del océano profundo",
lines=2
)
script_input = gr.Textbox(
label="📝 Tu Guion Completo",
placeholder="Escribe aquí tu guion personalizado...",
lines=8,
visible=False
)
music_input = gr.Audio(
type="filepath",
label="🎵 Música de fondo (opcional)"
)
generate_btn = gr.Button(
"🎬 Generar Video",
variant="primary",
size="lg"
)
with gr.Column(scale=2):
gr.Markdown("### 📊 Progreso y Resultados")
progress_output = gr.Textbox(
label="📋 Log de progreso en tiempo real",
lines=12,
interactive=False,
show_copy_button=True
)
video_output = gr.Video(
label="🎥 Video generado",
height=400
)
download_output = gr.File(
label="📥 Descargar archivo"
)
mode_radio.change(
fn=toggle_input_fields,
inputs=[mode_radio],
outputs=[topic_input, script_input]
)
generate_btn.click(
fn=generate_video_with_progress,
inputs=[mode_radio, topic_input, script_input, music_input],
outputs=[progress_output, video_output, download_output]
)
gr.Markdown("""
### 📋 Instrucciones:
1. **Elige el método**: Genera un guion con IA o usa el tuyo propio
2. **Configura el contenido**: Ingresa un tema interesante o tu guion
3. **Música opcional**: Sube un archivo de audio para fondo musical
4. **Genera**: Presiona el botón y observa el progreso en tiempo real
⏱️ **Tiempo estimado**: 2-5 minutos dependiendo de la duración del contenido.
""")
if __name__ == "__main__":
logger.info("🚀 Iniciando aplicación Generador de Videos IA...")
demo.queue(max_size=10)
demo.launch(
server_name="0.0.0.0",
server_port=7860,
show_api=False,
share=True
)