Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,367 Bytes
f00e497 1bec58f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import spaces
from PIL import Image
from tqdm.auto import tqdm
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler, LMSDiscreteScheduler, StableDiffusionPipeline, UniPCMultistepScheduler
from torchvision import transforms
torch_device = "cuda" if torch.cuda.is_available() else ("mps" if torch.mps.is_available() else "cpu")
torch_dtype = torch.float16 if torch_device in ["cuda", "mps"] else torch.float32
pipe = StableDiffusionPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5",
torch_dtype=torch_dtype,
use_safetensors=True,
safety_checker = None).to(torch_device)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
# pipe.enable_model_cpu_offload() <--- disable for ZeroGPU
@spaces.GPU
def StableDiffusion(uncond_embeddings, text_embeddings, height, width, num_inference_steps, guidance_scale, seed):
batch_size=1
generator = None
if seed:
generator=torch.manual_seed(seed)
output = pipe(
prompt = text_embeddings,
negative_prompt = uncond_embeddings,
height = height,
width = width,
num_inference_steps = num_inference_steps,
guidance_scale = guidance_scale,
generator = generator
).images[0]
return output
|