Spaces:
Sleeping
Sleeping
| import os | |
| import asyncio | |
| import logging | |
| import tempfile | |
| import requests | |
| from datetime import datetime | |
| import edge_tts | |
| import gradio as gr | |
| import torch | |
| from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
| from keybert import KeyBERT | |
| from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip, CompositeAudioClip | |
| import subprocess | |
| import re | |
| import math | |
| from pydub import AudioSegment | |
| from pexelsapi.pexels import Pexels | |
| # Configuración de logging | |
| logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | |
| logger = logging.getLogger(__name__) | |
| # Clave API de Pexels | |
| PEXELS_API_KEY = os.environ.get("PEXELS_API_KEY") | |
| # Inicialización de modelos | |
| MODEL_NAME = "gpt2" | |
| try: | |
| tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME) | |
| model = GPT2LMHeadModel.from_pretrained(MODEL_NAME).eval() | |
| if tokenizer.pad_token is None: | |
| tokenizer.pad_token = tokenizer.eos_token | |
| logger.info(f"Modelo GPT-2 cargado exitosamente.") | |
| except Exception as e: | |
| logger.error(f"Error al cargar modelo GPT-2: {e}") | |
| tokenizer = None | |
| model = None | |
| try: | |
| kw_model = KeyBERT('multi-qa-MiniLM-L6-cos-v1') | |
| logger.info("Modelo KeyBERT cargado exitosamente.") | |
| except Exception as e: | |
| logger.error(f"Error al cargar KeyBERT: {e}") | |
| kw_model = None | |
| def generate_script(prompt, max_length=250): | |
| if not tokenizer or not model: | |
| logger.error("Modelo GPT-2 no disponible") | |
| return "Lo siento, el generador de guiones no está disponible." | |
| logger.info("Generando guion con GPT-2...") | |
| try: | |
| inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| inputs = {k: v.to(device) for k, v in inputs.items()} | |
| model.to(device) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_length=max_length, | |
| do_sample=True, | |
| top_p=0.95, | |
| top_k=60, | |
| temperature=0.9, | |
| pad_token_id=tokenizer.pad_token_id, | |
| eos_token_id=tokenizer.eos_token_id | |
| ) | |
| text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| logger.info(f"Guion generado: {text[:200]}...") | |
| return text | |
| except Exception as e: | |
| logger.error(f"Error generando guion: {e}") | |
| return "No se pudo generar el guion. Intenta con otro prompt." | |
| async def text_to_speech(text, voice="es-ES-ElviraNeural", output_path="voz.mp3"): | |
| logger.info(f"Generando audio TTS...") | |
| try: | |
| communicate = edge_tts.Communicate(text, voice) | |
| await communicate.save(output_path) | |
| logger.info(f"Audio TTS guardado") | |
| return True | |
| except Exception as e: | |
| logger.error(f"Error TTS: {e}") | |
| return False | |
| def download_video_file(url, temp_dir): | |
| if not url: | |
| return None | |
| file_name = url.split('/')[-1].split('?')[0] | |
| if not file_name.endswith('.mp4'): | |
| file_name = f"video_temp_{os.getpid()}_{datetime.now().strftime('%f')}.mp4" | |
| output_path = os.path.join(temp_dir, file_name) | |
| logger.info(f"Descargando video: {url}") | |
| try: | |
| response = requests.get(url, stream=True, timeout=30) | |
| response.raise_for_status() | |
| with open(output_path, 'wb') as f: | |
| for chunk in response.iter_content(chunk_size=8192): | |
| if chunk: | |
| f.write(chunk) | |
| logger.info(f"Video descargado") | |
| return output_path | |
| except Exception as e: | |
| logger.error(f"Error descargando video: {e}") | |
| if os.path.exists(output_path): | |
| os.remove(output_path) | |
| return None | |
| def loop_audio_to_length(audio_clip, target_duration): | |
| if audio_clip.duration >= target_duration: | |
| return audio_clip.subclip(0, target_duration) | |
| loops = int(target_duration / audio_clip.duration) + 1 | |
| audios = [audio_clip] * loops | |
| concatenated = concatenate_videoclips(audios) | |
| return concatenated.subclip(0, target_duration) | |
| def extract_visual_keywords_from_script(script_text, max_keywords_per_segment=2): | |
| if not kw_model: | |
| logger.warning("KeyBERT no disponible. Usando método simple.") | |
| return [script_text.split('.')[0].strip().replace(" ", "+")] if script_text.strip() else [] | |
| logger.info("Extrayendo palabras clave...") | |
| segments = [s.strip() for s in script_text.split('\n') if s.strip()] | |
| if not segments: | |
| segments = [script_text] | |
| all_keywords = set() | |
| for segment in segments: | |
| if not segment: continue | |
| try: | |
| keywords_with_scores = kw_model.extract_keywords( | |
| segment, | |
| keyphrase_ngram_range=(1, 2), | |
| stop_words='spanish', | |
| top_n=max_keywords_per_segment, | |
| use_mmr=True, | |
| diversity=0.7 | |
| ) | |
| for kw, score in keywords_with_scores: | |
| all_keywords.add(kw.replace(" ", "+")) | |
| except Exception as e: | |
| logger.warning(f"Error extrayendo keywords: {e}") | |
| all_keywords.add(segment.split(' ')[0].strip().replace(" ", "+")) | |
| return list(all_keywords) | |
| def search_pexels_videos(query_list, num_videos_per_query=5, min_duration_sec=7): | |
| if not PEXELS_API_KEY: | |
| logger.error("ERROR: PEXELS_API_KEY no configurada.") | |
| raise ValueError("Configura PEXELS_API_KEY en los Secrets") | |
| if not query_list: | |
| logger.warning("No hay queries para buscar.") | |
| return [] | |
| pexel = Pexels(PEXELS_API_KEY) | |
| all_video_urls = [] | |
| for query in query_list: | |
| logger.info(f"Buscando videos para: '{query}'") | |
| try: | |
| results = pexel.search_videos( | |
| query=query, | |
| orientation='landscape', | |
| per_page=num_videos_per_query | |
| ) | |
| videos = results.get('videos', []) | |
| if not videos: | |
| logger.info(f"No se encontraron videos para: '{query}'") | |
| continue | |
| for video in videos: | |
| video_files = video.get('video_files', []) | |
| if video_files: | |
| best_quality = max( | |
| video_files, | |
| key=lambda x: x.get('width', 0) * x.get('height', 0) | |
| ) | |
| all_video_urls.append(best_quality['link']) | |
| except Exception as e: | |
| logger.error(f"Error buscando videos: {e}") | |
| return all_video_urls | |
| def crear_video(prompt_type, input_text, musica_url=None): | |
| logger.info(f"Iniciando creación de video: {prompt_type}") | |
| guion = "" | |
| if prompt_type == "Generar Guion con IA": | |
| guion = generate_script(input_text) | |
| if not guion or "No se pudo" in guion: | |
| raise ValueError(guion) | |
| else: | |
| guion = input_text | |
| if not guion.strip(): | |
| raise ValueError("Introduce tu guion.") | |
| temp_files = [] | |
| downloaded_clip_paths = [] | |
| final_clips = [] | |
| temp_video_dir = tempfile.mkdtemp() | |
| temp_files.append(temp_video_dir) | |
| try: | |
| voz_archivo = os.path.join(tempfile.gettempdir(), f"voz_temp_{os.getpid()}.mp3") | |
| temp_files.append(voz_archivo) | |
| if not asyncio.run(text_to_speech(guion, output_path=voz_archivo)): | |
| raise ValueError("Error generando voz") | |
| audio_tts = AudioFileClip(voz_archivo) | |
| search_queries = extract_visual_keywords_from_script(guion) | |
| if not search_queries: | |
| raise ValueError("No se pudieron extraer palabras clave") | |
| video_urls = search_pexels_videos(search_queries) | |
| if not video_urls: | |
| raise ValueError(f"Pexels no encontró videos para: {search_queries}") | |
| for url in video_urls: | |
| path = download_video_file(url, temp_video_dir) | |
| if path: | |
| downloaded_clip_paths.append(path) | |
| if not downloaded_clip_paths: | |
| raise ValueError("No se pudo descargar videos") | |
| total_desired_duration = audio_tts.duration * 1.2 | |
| current_duration = 0 | |
| for path in downloaded_clip_paths: | |
| try: | |
| clip = VideoFileClip(path) | |
| clip_duration = min(clip.duration, 10) | |
| if clip_duration > 1: | |
| final_clips.append(clip.subclip(0, clip_duration)) | |
| current_duration += clip_duration | |
| if current_duration >= total_desired_duration: | |
| break | |
| except Exception as e: | |
| logger.warning(f"Error procesando clip: {e}") | |
| if not final_clips: | |
| raise ValueError("No hay clips válidos") | |
| video_base = concatenate_videoclips(final_clips, method="compose") | |
| if video_base.duration < audio_tts.duration: | |
| num_repeats = int(audio_tts.duration / video_base.duration) + 1 | |
| video_base = concatenate_videoclips([video_base] * num_repeats) | |
| final_video_duration = audio_tts.duration | |
| mezcla_audio = audio_tts | |
| if musica_url and musica_url.strip(): | |
| musica_path = download_video_file(musica_url, temp_video_dir) | |
| if musica_path: | |
| temp_files.append(musica_path) | |
| try: | |
| musica_audio = AudioFileClip(musica_path) | |
| musica_loop = loop_audio_to_length(musica_audio, final_video_duration) | |
| mezcla_audio = CompositeAudioClip([ | |
| musica_loop.volumex(0.3), | |
| audio_tts.set_duration(final_video_duration).volumex(1.0) | |
| ]) | |
| except Exception as e: | |
| logger.warning(f"Error música: {e}") | |
| video_final = video_base.set_audio(mezcla_audio).subclip(0, final_video_duration) | |
| output_dir = "output_videos" | |
| os.makedirs(output_dir, exist_ok=True) | |
| output_path = os.path.join(output_dir, f"video_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4") | |
| video_final.write_videofile( | |
| output_path, | |
| fps=24, | |
| threads=4, | |
| codec="libx264", | |
| audio_codec="aac", | |
| preset="medium", | |
| ffmpeg_params=["-movflags", "+faststart"] | |
| ) | |
| return output_path | |
| except Exception as e: | |
| logger.error(f"Error general: {e}") | |
| raise e | |
| finally: | |
| for f in temp_files: | |
| if os.path.exists(f): | |
| if os.path.isdir(f): | |
| import shutil | |
| shutil.rmtree(f) | |
| else: | |
| os.remove(f) | |
| def run_app(prompt_type, prompt_ia, prompt_manual, musica_url): | |
| input_text = "" | |
| if prompt_type == "Generar Guion con IA": | |
| input_text = prompt_ia | |
| if not input_text.strip(): | |
| raise gr.Error("Introduce un tema para el guion.") | |
| else: | |
| input_text = prompt_manual | |
| if not input_text.strip(): | |
| raise gr.Error("Introduce tu guion.") | |
| try: | |
| video_path = crear_video(prompt_type, input_text, musica_url if musica_url.strip() else None) | |
| if video_path: | |
| return video_path, "¡Video generado exitosamente!" | |
| else: | |
| raise gr.Error("Error desconocido") | |
| except ValueError as ve: | |
| return None, f"Error: {ve}" | |
| except Exception as e: | |
| return None, f"Error grave: {e}" | |
| with gr.Blocks() as app: | |
| gr.Markdown("### 🎬 Generador de Video con Pexels") | |
| with gr.Tab("Generar Video"): | |
| with gr.Row(): | |
| prompt_type = gr.Radio( | |
| ["Generar Guion con IA", "Usar Mi Guion"], | |
| label="Método", | |
| value="Generar Guion con IA" | |
| ) | |
| with gr.Column(visible=True) as ia_guion_column: | |
| prompt_ia = gr.Textbox( | |
| label="Tema para IA", | |
| lines=2 | |
| ) | |
| with gr.Column(visible=False) as manual_guion_column: | |
| prompt_manual = gr.Textbox( | |
| label="Tu Guion", | |
| lines=5 | |
| ) | |
| musica_input = gr.Textbox( | |
| label="URL Música (opcional)", | |
| ) | |
| boton = gr.Button("Generar Video") | |
| with gr.Column(): | |
| salida_video = gr.Video(label="Video Resultado", interactive=False) | |
| estado_mensaje = gr.Textbox(label="Estado", interactive=False) | |
| prompt_type.change( | |
| fn=lambda value: (gr.update(visible=value == "Generar Guion con IA"), | |
| gr.update(visible=value == "Usar Mi Guion")), | |
| inputs=prompt_type, | |
| outputs=[ia_guion_column, manual_guion_column] | |
| ) | |
| boton.click( | |
| fn=lambda: (None, "Procesando..."), | |
| outputs=[salida_video, estado_mensaje], | |
| queue=False | |
| ).then( | |
| fn=run_app, | |
| inputs=[prompt_type, prompt_ia, prompt_manual, musica_input], | |
| outputs=[salida_video, estado_mensaje] | |
| ) | |
| if __name__ == "__main__": | |
| app.launch(server_name="0.0.0.0", server_port=7860) |