import gradio as gr from diffusers import DiffusionPipeline import torch # 1. USE A SMALLER MODEL (CPU-FRIENDLY) model_id = "OFA-Sys/small-stable-diffusion-v0" # Lightweight model # 2. SIMPLIFIED PIPELINE FOR CPU pipe = DiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cpu") # Force CPU usage # 3. FASTER GENERATION SETTINGS def generate_image(prompt, negative_prompt="", steps=15): return pipe( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=7.5 ).images[0] # 4. STREAMLINED UI with gr.Blocks() as demo: gr.Markdown("# ⚡ Lightning-Fast AI Image Generator") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Your Prompt", value="a cat astronaut") negative = gr.Textbox(label="Avoid (Optional)", value="blurry, deformed") steps = gr.Slider(1, 30, value=10, label="Quality Steps") btn = gr.Button("Generate →") output = gr.Image(label="Result", height=400) btn.click(fn=generate_image, inputs=[prompt, negative, steps], outputs=output) gr.Examples( examples=[ ["cyberpunk cityscape at night, neon lights", "people, text", 12], ["watercolor painting of a forest", "photorealistic, humans", 8] ], inputs=[prompt, negative, steps] ) demo.launch()