Upload folder using huggingface_hub
Browse files- handler.py +2 -2
handler.py
CHANGED
|
@@ -56,7 +56,6 @@ class EndpointHandler:
|
|
| 56 |
"""
|
| 57 |
inputs = data.pop("inputs", data)
|
| 58 |
request = ImageRequest.FromDict(inputs)
|
| 59 |
-
self.LoadModel(request)
|
| 60 |
response = self.__runProcess__(request)
|
| 61 |
return response
|
| 62 |
|
|
@@ -75,9 +74,10 @@ class EndpointHandler:
|
|
| 75 |
import torch
|
| 76 |
|
| 77 |
|
|
|
|
| 78 |
|
| 79 |
|
| 80 |
# Ensure using the same inference steps as the loaded model and CFG set to 0.
|
| 81 |
-
images = pipe(request.prompt, negative_prompt = request.negative_prompt, num_inference_steps=request.steps, guidance_scale=0).images
|
| 82 |
|
| 83 |
return {"media":[self.ImageToBase64(img) for img in images]}
|
|
|
|
| 56 |
"""
|
| 57 |
inputs = data.pop("inputs", data)
|
| 58 |
request = ImageRequest.FromDict(inputs)
|
|
|
|
| 59 |
response = self.__runProcess__(request)
|
| 60 |
return response
|
| 61 |
|
|
|
|
| 74 |
import torch
|
| 75 |
|
| 76 |
|
| 77 |
+
self.LoadModel(request)
|
| 78 |
|
| 79 |
|
| 80 |
# Ensure using the same inference steps as the loaded model and CFG set to 0.
|
| 81 |
+
images = self.pipe(request.prompt, negative_prompt = request.negative_prompt, num_inference_steps=request.steps, guidance_scale=0).images
|
| 82 |
|
| 83 |
return {"media":[self.ImageToBase64(img) for img in images]}
|