import gradio as gr from diffusers import DiffusionPipeline import torch # 1. USE A SMALLER MODEL (CPU-FRIENDLY) model_id = "OFA-Sys/small-stable-diffusion-v0" # Lightweight model # 2. SIMPLIFIED PIPELINE FOR CPU pipe = DiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cpu") # Force CPU usage # 3. FASTER GENERATION SETTINGS def generate_image(prompt, negative_prompt="", steps=13): return pipe( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=7.5 ).images[0] # 4. STREAMLINED UI with gr.Blocks() as demo: gr.Markdown("# Lightweight CPU Image Generator using OFA Small model") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Your Prompt", value="a beautiful flower") negative = gr.Textbox(label="Avoid (Optional)", value="low-resolution") steps = gr.Slider(1, 30, value=13, label="Quality Steps") btn = gr.Button("Generate →") output = gr.Image(label="Result", height=400) btn.click(fn=generate_image, inputs=[prompt, negative, steps], outputs=output) gr.Examples( examples=[ ["cityscape at night, red lights", "people", 12], ["watercolor painting of a flower", "photorealistic", 8] ], inputs=[prompt, negative, steps] ) demo.launch()