Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,9 @@ import gradio as gr
|
|
| 2 |
import numpy as np
|
| 3 |
from optimum.intel import OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline
|
| 4 |
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
# model_id = "echarlaix/sdxl-turbo-openvino-int8"
|
|
@@ -12,17 +15,19 @@ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
|
| 12 |
model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov"
|
| 13 |
#pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
|
| 14 |
pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False)
|
| 15 |
-
|
| 16 |
|
| 17 |
batch_size, num_images, height, width = 1, 1, 512, 512
|
| 18 |
pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
|
| 19 |
pipeline.compile()
|
| 20 |
|
|
|
|
|
|
|
| 21 |
def infer(prompt, num_inference_steps):
|
| 22 |
|
| 23 |
image = pipeline(
|
| 24 |
prompt = prompt,
|
| 25 |
-
|
| 26 |
# guidance_scale = guidance_scale,
|
| 27 |
num_inference_steps = num_inference_steps,
|
| 28 |
width = width,
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
from optimum.intel import OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline
|
| 4 |
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
| 5 |
+
from diffusers import DiffusionPipeline
|
| 6 |
+
|
| 7 |
+
|
| 8 |
|
| 9 |
|
| 10 |
# model_id = "echarlaix/sdxl-turbo-openvino-int8"
|
|
|
|
| 15 |
model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov"
|
| 16 |
#pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
|
| 17 |
pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False)
|
| 18 |
+
pipeline.load_lora_weights("EvilEngine/easynegative")
|
| 19 |
|
| 20 |
batch_size, num_images, height, width = 1, 1, 512, 512
|
| 21 |
pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
|
| 22 |
pipeline.compile()
|
| 23 |
|
| 24 |
+
negative_prompt="easynegative"
|
| 25 |
+
|
| 26 |
def infer(prompt, num_inference_steps):
|
| 27 |
|
| 28 |
image = pipeline(
|
| 29 |
prompt = prompt,
|
| 30 |
+
negative_prompt = negative_prompt, #no negative_prompt keyword in LatentConsistencyPipelineMixin
|
| 31 |
# guidance_scale = guidance_scale,
|
| 32 |
num_inference_steps = num_inference_steps,
|
| 33 |
width = width,
|