Commit
·
3ce21df
1
Parent(s):
a094439
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import torch
|
|
| 3 |
from diffusers import AutoPipelineForText2Image
|
| 4 |
import time
|
| 5 |
|
| 6 |
-
USE_TORCH_COMPILE =
|
| 7 |
|
| 8 |
dtype = torch.float16
|
| 9 |
device = torch.device("cuda:0")
|
|
@@ -16,6 +16,7 @@ if USE_TORCH_COMPILE:
|
|
| 16 |
pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True)
|
| 17 |
|
| 18 |
def generate(num_images_per_prompt: int = 1):
|
|
|
|
| 19 |
prompt = 77 * "a"
|
| 20 |
num_inference_steps = 40
|
| 21 |
start_time = time.time()
|
|
|
|
| 3 |
from diffusers import AutoPipelineForText2Image
|
| 4 |
import time
|
| 5 |
|
| 6 |
+
USE_TORCH_COMPILE = True
|
| 7 |
|
| 8 |
dtype = torch.float16
|
| 9 |
device = torch.device("cuda:0")
|
|
|
|
| 16 |
pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True)
|
| 17 |
|
| 18 |
def generate(num_images_per_prompt: int = 1):
|
| 19 |
+
print("Version", torch.__version__)
|
| 20 |
prompt = 77 * "a"
|
| 21 |
num_inference_steps = 40
|
| 22 |
start_time = time.time()
|