Update app.py
Browse files
app.py
CHANGED
|
@@ -14,8 +14,8 @@ if torch.cuda.is_available():
|
|
| 14 |
else:
|
| 15 |
torch_dtype = torch.float32
|
| 16 |
|
| 17 |
-
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
| 18 |
-
pipe = pipe.to(device)
|
| 19 |
|
| 20 |
MAX_SEED = np.iinfo(np.int32).max
|
| 21 |
MAX_IMAGE_SIZE = 1024
|
|
@@ -23,6 +23,7 @@ MAX_IMAGE_SIZE = 1024
|
|
| 23 |
|
| 24 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
| 25 |
def infer(
|
|
|
|
| 26 |
prompt,
|
| 27 |
negative_prompt,
|
| 28 |
seed,
|
|
@@ -37,7 +38,16 @@ def infer(
|
|
| 37 |
seed = random.randint(0, MAX_SEED)
|
| 38 |
|
| 39 |
generator = torch.Generator().manual_seed(seed)
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
image = pipe(
|
| 42 |
prompt=prompt,
|
| 43 |
negative_prompt=negative_prompt,
|
|
@@ -68,6 +78,15 @@ with gr.Blocks(css=css) as demo:
|
|
| 68 |
with gr.Column(elem_id="col-container"):
|
| 69 |
gr.Markdown(" # Text-to-Image Gradio Template")
|
| 70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
with gr.Row():
|
| 72 |
prompt = gr.Text(
|
| 73 |
label="Prompt",
|
|
@@ -138,6 +157,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 138 |
triggers=[run_button.click, prompt.submit],
|
| 139 |
fn=infer,
|
| 140 |
inputs=[
|
|
|
|
| 141 |
prompt,
|
| 142 |
negative_prompt,
|
| 143 |
seed,
|
|
|
|
| 14 |
else:
|
| 15 |
torch_dtype = torch.float32
|
| 16 |
|
| 17 |
+
# pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
| 18 |
+
# pipe = pipe.to(device)
|
| 19 |
|
| 20 |
MAX_SEED = np.iinfo(np.int32).max
|
| 21 |
MAX_IMAGE_SIZE = 1024
|
|
|
|
| 23 |
|
| 24 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
| 25 |
def infer(
|
| 26 |
+
model,
|
| 27 |
prompt,
|
| 28 |
negative_prompt,
|
| 29 |
seed,
|
|
|
|
| 38 |
seed = random.randint(0, MAX_SEED)
|
| 39 |
|
| 40 |
generator = torch.Generator().manual_seed(seed)
|
| 41 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 42 |
+
|
| 43 |
+
if torch.cuda.is_available():
|
| 44 |
+
torch_dtype = torch.float16
|
| 45 |
+
else:
|
| 46 |
+
torch_dtype = torch.float32
|
| 47 |
+
|
| 48 |
+
pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch_dtype)
|
| 49 |
+
pipe = pipe.to(device)
|
| 50 |
+
|
| 51 |
image = pipe(
|
| 52 |
prompt=prompt,
|
| 53 |
negative_prompt=negative_prompt,
|
|
|
|
| 78 |
with gr.Column(elem_id="col-container"):
|
| 79 |
gr.Markdown(" # Text-to-Image Gradio Template")
|
| 80 |
|
| 81 |
+
|
| 82 |
+
with gr.Row():
|
| 83 |
+
model = gr.Dropdown(
|
| 84 |
+
choices=["stabilityai/sdxl-turbo", "CompVis/stable-diffusion-v1-4"],
|
| 85 |
+
value=model_repo_id,
|
| 86 |
+
label="Model",
|
| 87 |
+
info="Choose which diffusion model to use"
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
with gr.Row():
|
| 91 |
prompt = gr.Text(
|
| 92 |
label="Prompt",
|
|
|
|
| 157 |
triggers=[run_button.click, prompt.submit],
|
| 158 |
fn=infer,
|
| 159 |
inputs=[
|
| 160 |
+
model,
|
| 161 |
prompt,
|
| 162 |
negative_prompt,
|
| 163 |
seed,
|