alexander1i commited on
Commit
e6ea976
·
1 Parent(s): 8a3facd

fix handler

Browse files
Files changed (2) hide show
  1. .DS_Store +0 -0
  2. handler.py +41 -18
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
handler.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import io, base64
2
  from PIL import Image
3
  import torch
@@ -5,31 +6,53 @@ from diffusers import StableDiffusionXLInpaintPipeline
5
 
6
  class EndpointHandler:
7
  def __init__(self, path="."):
8
- self.pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
9
- "andro-flock/LUSTIFY-SDXL-NSFW-checkpoint-v2-0-INPAINTING",
10
- torch_dtype=torch.float16,
11
- variant="fp16"
12
- ).to("cuda")
13
 
14
- def _to_pil(self, data):
15
- if isinstance(data, str):
16
- data = base64.b64decode(data)
17
- return Image.open(io.BytesIO(data))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  def __call__(self, data):
20
- prompt = data.get("prompt", "")
21
- init_img = self._to_pil(data["image"]).convert("RGB")
22
- mask_img = self._to_pil(data["mask"]).convert("L")
 
 
 
 
23
 
24
- result = self.pipe(
25
  prompt=prompt,
26
  image=init_img,
27
  mask_image=mask_img,
28
- num_inference_steps=int(data.get("num_inference_steps", 30)),
29
- guidance_scale=float(data.get("guidance_scale", 7.0)),
30
- strength=float(data.get("strength", 0.85))
31
  ).images[0]
32
 
33
- buf = io.BytesIO()
34
- result.save(buf, format="PNG")
35
  return {"image_base64": base64.b64encode(buf.getvalue()).decode()}
 
1
+ # handler.py (replace your __init__)
2
  import io, base64
3
  from PIL import Image
4
  import torch
 
6
 
7
  class EndpointHandler:
8
  def __init__(self, path="."):
9
+ model_id = "andro-flock/LUSTIFY-SDXL-NSFW-checkpoint-v2-0-INPAINTING"
 
 
 
 
10
 
11
+ # Try pure fp16 first (no variant); fall back if the GPU/dtype mismatches
12
+ try:
13
+ self.pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
14
+ model_id,
15
+ torch_dtype=torch.float16, # no variant arg
16
+ use_safetensors=True
17
+ ).to("cuda")
18
+ except Exception:
19
+ try:
20
+ self.pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
21
+ model_id,
22
+ torch_dtype=torch.bfloat16,
23
+ use_safetensors=True
24
+ ).to("cuda")
25
+ except Exception:
26
+ self.pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
27
+ model_id,
28
+ torch_dtype=torch.float32,
29
+ use_safetensors=True
30
+ ).to("cuda")
31
+
32
+ def _to_pil(self, b64_or_bytes, mode):
33
+ if isinstance(b64_or_bytes, str):
34
+ b = base64.b64decode(b64_or_bytes)
35
+ else:
36
+ b = b64_or_bytes
37
+ return Image.open(io.BytesIO(b)).convert(mode)
38
 
39
  def __call__(self, data):
40
+ prompt = data.get("prompt", "")
41
+ init_img = self._to_pil(data["image"], "RGB")
42
+ mask_img = self._to_pil(data["mask"], "L") # white=repaint, black=keep
43
+
44
+ steps = int(data.get("num_inference_steps", 30))
45
+ guidance = float(data.get("guidance_scale", 7.0))
46
+ strength = float(data.get("strength", 0.85))
47
 
48
+ out = self.pipe(
49
  prompt=prompt,
50
  image=init_img,
51
  mask_image=mask_img,
52
+ num_inference_steps=steps,
53
+ guidance_scale=guidance,
54
+ strength=strength
55
  ).images[0]
56
 
57
+ buf = io.BytesIO(); out.save(buf, format="PNG")
 
58
  return {"image_base64": base64.b64encode(buf.getvalue()).decode()}