update
Browse files- app.py +89 -23
- modal_video_processing.py +43 -0
app.py
CHANGED
|
@@ -614,30 +614,96 @@ def mcp_agent_pipeline(niche, style, num_variations=1):
|
|
| 614 |
import time
|
| 615 |
timestamp = int(time.time())
|
| 616 |
|
| 617 |
-
|
| 618 |
-
|
| 619 |
-
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
|
| 623 |
-
quote,
|
| 624 |
-
output_path,
|
| 625 |
-
None # No audio
|
| 626 |
-
)
|
| 627 |
-
|
| 628 |
-
if creation_result["success"]:
|
| 629 |
-
created_videos.append(creation_result["output_path"])
|
| 630 |
-
status_log.append(f" ✅ Variation {i+1} created!")
|
| 631 |
|
| 632 |
-
|
| 633 |
-
|
| 634 |
-
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 641 |
error_msg = creation_result.get("message", "Unknown error")
|
| 642 |
status_log.append(f" ⚠️ Variation {i+1} failed: {error_msg}")
|
| 643 |
|
|
|
|
| 614 |
import time
|
| 615 |
timestamp = int(time.time())
|
| 616 |
|
| 617 |
+
# Use batch processing if Modal configured and multiple videos
|
| 618 |
+
modal_endpoint = os.getenv("MODAL_ENDPOINT_URL")
|
| 619 |
+
if modal_endpoint and len(video_results) > 1:
|
| 620 |
+
try:
|
| 621 |
+
# Use batch endpoint for parallel processing
|
| 622 |
+
batch_endpoint = modal_endpoint.replace("process-video-endpoint", "process-batch-endpoint")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 623 |
|
| 624 |
+
status_log.append(f" 🚀 Using Modal batch processing (parallel)...")
|
| 625 |
+
|
| 626 |
+
videos_payload = []
|
| 627 |
+
for video_result in video_results:
|
| 628 |
+
videos_payload.append({
|
| 629 |
+
"video_url": video_result["video_url"],
|
| 630 |
+
"quote_text": quote,
|
| 631 |
+
"audio_b64": None
|
| 632 |
+
})
|
| 633 |
+
|
| 634 |
+
import requests
|
| 635 |
+
import base64
|
| 636 |
+
|
| 637 |
+
response = requests.post(
|
| 638 |
+
batch_endpoint,
|
| 639 |
+
json={"videos": videos_payload},
|
| 640 |
+
timeout=180
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
if response.status_code == 200:
|
| 644 |
+
result = response.json()
|
| 645 |
+
if result.get("success"):
|
| 646 |
+
# Save all videos
|
| 647 |
+
for i, video_data in enumerate(result["videos"]):
|
| 648 |
+
output_filename = f"quote_video_v{i+1}_{timestamp}.mp4"
|
| 649 |
+
output_path = os.path.join(output_dir, output_filename)
|
| 650 |
+
|
| 651 |
+
video_bytes = base64.b64decode(video_data["video"])
|
| 652 |
+
with open(output_path, 'wb') as f:
|
| 653 |
+
f.write(video_bytes)
|
| 654 |
+
|
| 655 |
+
created_videos.append(output_path)
|
| 656 |
+
status_log.append(f" ✅ Variation {i+1} created!")
|
| 657 |
+
|
| 658 |
+
# Copy to gallery
|
| 659 |
+
import shutil
|
| 660 |
+
gallery_filename = f"gallery_{timestamp}_v{i+1}.mp4"
|
| 661 |
+
gallery_path = os.path.join(gallery_dir, gallery_filename)
|
| 662 |
+
try:
|
| 663 |
+
shutil.copy2(output_path, gallery_path)
|
| 664 |
+
except:
|
| 665 |
+
pass
|
| 666 |
+
|
| 667 |
+
status_log.append(f" ⚡ Batch processing complete! All {len(created_videos)} videos done in parallel")
|
| 668 |
+
else:
|
| 669 |
+
status_log.append(f" ⚠️ Batch failed, falling back to sequential...")
|
| 670 |
+
raise Exception("Batch failed")
|
| 671 |
+
else:
|
| 672 |
+
status_log.append(f" ⚠️ Batch endpoint error, falling back to sequential...")
|
| 673 |
+
raise Exception("Batch endpoint error")
|
| 674 |
+
|
| 675 |
+
except Exception as e:
|
| 676 |
+
# Fall back to sequential processing
|
| 677 |
+
status_log.append(f" ⚠️ Batch processing failed: {e}")
|
| 678 |
+
status_log.append(f" 🔄 Falling back to sequential processing...")
|
| 679 |
+
created_videos = [] # Reset
|
| 680 |
+
|
| 681 |
+
# Sequential processing (fallback or single video)
|
| 682 |
+
if len(created_videos) == 0:
|
| 683 |
+
for i, video_result in enumerate(video_results):
|
| 684 |
+
output_filename = f"quote_video_v{i+1}_{timestamp}.mp4"
|
| 685 |
+
output_path = os.path.join(output_dir, output_filename)
|
| 686 |
+
|
| 687 |
+
creation_result = create_quote_video_tool(
|
| 688 |
+
video_result["video_url"],
|
| 689 |
+
quote,
|
| 690 |
+
output_path,
|
| 691 |
+
None # No audio
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
if creation_result["success"]:
|
| 695 |
+
created_videos.append(creation_result["output_path"])
|
| 696 |
+
status_log.append(f" ✅ Variation {i+1} created!")
|
| 697 |
+
|
| 698 |
+
# Copy to gallery for public viewing
|
| 699 |
+
import shutil
|
| 700 |
+
gallery_filename = f"gallery_{timestamp}_v{i+1}.mp4"
|
| 701 |
+
gallery_path = os.path.join(gallery_dir, gallery_filename)
|
| 702 |
+
try:
|
| 703 |
+
shutil.copy2(creation_result["output_path"], gallery_path)
|
| 704 |
+
except:
|
| 705 |
+
pass # Silently fail if can't copy to gallery
|
| 706 |
+
else:
|
| 707 |
error_msg = creation_result.get("message", "Unknown error")
|
| 708 |
status_log.append(f" ⚠️ Variation {i+1} failed: {error_msg}")
|
| 709 |
|
modal_video_processing.py
CHANGED
|
@@ -25,6 +25,7 @@ image = modal.Image.debian_slim(python_version="3.11").pip_install(
|
|
| 25 |
timeout=180,
|
| 26 |
keep_warm=1, # Keep 1 container warm
|
| 27 |
container_idle_timeout=300,
|
|
|
|
| 28 |
)
|
| 29 |
def process_quote_video(video_url: str, quote_text: str, audio_b64: str = None) -> bytes:
|
| 30 |
"""
|
|
@@ -173,3 +174,45 @@ def process_video_endpoint(data: dict):
|
|
| 173 |
|
| 174 |
except Exception as e:
|
| 175 |
return {"error": str(e)}, 500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
timeout=180,
|
| 26 |
keep_warm=1, # Keep 1 container warm
|
| 27 |
container_idle_timeout=300,
|
| 28 |
+
concurrency_limit=10, # Allow multiple videos in parallel
|
| 29 |
)
|
| 30 |
def process_quote_video(video_url: str, quote_text: str, audio_b64: str = None) -> bytes:
|
| 31 |
"""
|
|
|
|
| 174 |
|
| 175 |
except Exception as e:
|
| 176 |
return {"error": str(e)}, 500
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
@app.function(image=image)
|
| 180 |
+
@modal.web_endpoint(method="POST")
|
| 181 |
+
def process_batch_endpoint(data: dict):
|
| 182 |
+
"""
|
| 183 |
+
Batch endpoint - process multiple videos in PARALLEL.
|
| 184 |
+
Much faster for generating 2-3 variations!
|
| 185 |
+
"""
|
| 186 |
+
videos_data = data.get("videos", [])
|
| 187 |
+
|
| 188 |
+
if not videos_data:
|
| 189 |
+
return {"error": "Missing videos array"}, 400
|
| 190 |
+
|
| 191 |
+
try:
|
| 192 |
+
# Process all videos in parallel using .map()
|
| 193 |
+
results = list(process_quote_video.map(
|
| 194 |
+
[v["video_url"] for v in videos_data],
|
| 195 |
+
[v["quote_text"] for v in videos_data],
|
| 196 |
+
[v.get("audio_b64") for v in videos_data]
|
| 197 |
+
))
|
| 198 |
+
|
| 199 |
+
# Encode all results
|
| 200 |
+
import base64
|
| 201 |
+
encoded_results = []
|
| 202 |
+
for video_bytes in results:
|
| 203 |
+
video_b64 = base64.b64encode(video_bytes).decode()
|
| 204 |
+
encoded_results.append({
|
| 205 |
+
"success": True,
|
| 206 |
+
"video": video_b64,
|
| 207 |
+
"size_mb": len(video_bytes) / 1024 / 1024
|
| 208 |
+
})
|
| 209 |
+
|
| 210 |
+
return {
|
| 211 |
+
"success": True,
|
| 212 |
+
"videos": encoded_results,
|
| 213 |
+
"count": len(encoded_results)
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
except Exception as e:
|
| 217 |
+
return {"error": str(e)}, 500
|
| 218 |
+
|