codingo / app.py
Anonymusmee's picture
Update app.py
e6b8052 verified
import os
import tempfile
from flask import Flask, request, jsonify, send_from_directory
from PIL import Image, ImageDraw
import numpy as np
import imageio
from gtts import gTTS
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
import uuid
# Initialize Flask App
# The 'static_folder' serves files like CSS and JS.
app = Flask(__name__, static_folder='static')
# Create a directory for generated files if it doesn't exist
GENERATED_DIR = os.path.join(os.path.dirname(__file__), 'generated')
os.makedirs(GENERATED_DIR, exist_ok=True)
# ========== LOAD MODELS (Done once on startup) ==========
device = "cuda" if torch.cuda.is_available() else "cpu"
video_pipe = DiffusionPipeline.from_pretrained(
"cerspense/zeroscope_v2_576w",
torch_dtype=torch.float16
)
video_pipe.scheduler = DPMSolverMultistepScheduler.from_config(video_pipe.scheduler.config)
video_pipe.to(device)
app.logger.info("AI Models loaded successfully.")
# ========== HELPER FUNCTIONS (Your existing logic) ==========
def story_to_prompt(story):
prompt = story.strip()
return f"{prompt}, cinematic, beautiful, hd, high quality, detailed"
def generate_explainer_character(name, size=(150,150)):
# Your character generation logic here...
img = Image.new("RGB", size, (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((10, 10), f"{name} avatar", fill="black")
return img
def generate_code_explanation(story, prompt):
# Your code explanation logic here...
return f"The code processed the story: '{story}' into a prompt for the AI."
# ========== FLASK ROUTES ==========
# Route to serve the main HTML page
@app.route('/')
def index():
return send_from_directory('.', 'index.html')
# Route to serve generated files (videos, audio)
@app.route('/generated/<path:filename>')
def generated_files(filename):
return send_from_directory(GENERATED_DIR, filename)
# The main API endpoint to handle story-to-video requests
@app.route('/animate', methods=['POST'])
def animate():
story = request.json.get('story')
if not story:
return jsonify({"error": "No story provided"}), 400
app.logger.info(f"Received story: {story}")
try:
# 1. Generate Video
prompt = story_to_prompt(story)
video_frames = video_pipe(prompt, num_inference_steps=25, height=320, width=576, num_frames=24).frames
# Save files with unique names to avoid conflicts
unique_id = str(uuid.uuid4())
video_filename = f"{unique_id}.mp4"
video_path = os.path.join(GENERATED_DIR, video_filename)
imageio.mimsave(video_path, video_frames, fps=12)
video_url = f"/generated/{video_filename}"
# 2. Generate Story Explanation & Audio
story_explanation_text = f"We used an AI to turn your story into a video! The story was about: '{story}'."
audio_filename = f"{unique_id}.mp3"
audio_path = os.path.join(GENERATED_DIR, audio_filename)
tts = gTTS(story_explanation_text, lang='en')
tts.save(audio_path)
audio_url = f"/generated/{audio_filename}"
# 3. Generate Code Explanation
code_explanation_text = generate_code_explanation(story, prompt)
app.logger.info(f"Successfully generated video and audio for story.")
# 4. Send all data back to the frontend as JSON
return jsonify({
"video_url": video_url,
"story_explanation": story_explanation_text,
"audio_url": audio_url,
"code_explanation": code_explanation_text,
"prompt": prompt
})
except Exception as e:
app.logger.error(f"An error occurred: {e}")
return jsonify({"error": "Failed to generate animation."}), 500
if __name__ == '__main__':
# Use 0.0.0.0 to be accessible within the Docker network
app.run(host='0.0.0.0', port=5000, debug=True)