Spaces:
Runtime error
Runtime error
| # %% [markdown] | |
| # # 🖼️ Tiny Stable Diffusion (CPU Version) | |
| # **0.9GB Model | No GPU Required** | |
| # %% [markdown] | |
| # ## 1. Install Requirements | |
| pip install torch | |
| import torch | |
| from diffusers import StableDiffusionPipeline | |
| from huggingface_hub import snapshot_download | |
| from PIL import Image | |
| import gradio as gr | |
| import os | |
| # Force CPU mode | |
| torch.backends.quantized.engine = 'qnnpack' # ARM optimization | |
| device = torch.device("cpu") | |
| # %% [markdown] | |
| # ## 2. Download Model (0.9GB) | |
| model_path = "./tiny_model" | |
| os.makedirs(model_path, exist_ok=True) | |
| # Download with progress bar | |
| print("Downloading model... (this may take a few minutes)") | |
| snapshot_download( | |
| repo_id="nota-ai/bk-sdm-tiny", | |
| local_dir=model_path, | |
| ignore_patterns=["*.bin", "*.fp16*", "*.onnx"], | |
| local_dir_use_symlinks=False | |
| ) | |
| # Verify download | |
| if not os.listdir(model_path): | |
| raise ValueError("Model failed to download! Check internet connection") | |
| else: | |
| print("✔ Model downloaded successfully") | |
| # %% [markdown] | |
| # ## 3. Load Optimized Pipeline | |
| print("Loading model...") | |
| pipe = StableDiffusionPipeline.from_pretrained( | |
| model_path, | |
| torch_dtype=torch.float32, | |
| safety_checker=None, | |
| requires_safety_checker=False | |
| ).to(device) | |
| # Memory optimizations | |
| pipe.enable_attention_slicing() | |
| pipe.unet = torch.compile(pipe.unet) # Compile for faster inference | |
| # %% [markdown] | |
| # ## 4. Generation Function | |
| def generate_image(prompt, steps=15, seed=42): | |
| generator = torch.Generator(device).manual_seed(seed) | |
| print(f"Generating: {prompt}") | |
| image = pipe( | |
| prompt, | |
| num_inference_steps=steps, | |
| guidance_scale=7.0, | |
| generator=generator, | |
| width=256, | |
| height=256 | |
| ).images[0] | |
| return image | |
| # %% [markdown] | |
| # ## 5. Gradio Interface | |
| with gr.Blocks(title="Tiny Diffusion (CPU)", css="footer {visibility: hidden}") as demo: | |
| gr.Markdown("## 🎨 CPU Image Generator (0.9GB Model)") | |
| with gr.Row(): | |
| prompt = gr.Textbox(label="Prompt", | |
| value="a cute robot wearing a hat", | |
| placeholder="Describe your image...") | |
| with gr.Row(): | |
| steps = gr.Slider(5, 25, value=15, label="Steps") | |
| seed = gr.Number(42, label="Seed") | |
| with gr.Row(): | |
| generate_btn = gr.Button("Generate", variant="primary") | |
| with gr.Row(): | |
| output = gr.Image(label="Output", width=256, height=256) | |
| generate_btn.click( | |
| fn=generate_image, | |
| inputs=[prompt, steps, seed], | |
| outputs=output | |
| ) | |
| # %% [markdown] | |
| # ## 6. Launch App | |
| print("Starting interface...") | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| show_error=True | |
| ) |