Spaces:
Paused
Paused
Update app_quant_latent.py
Browse files- app_quant_latent.py +37 -31
app_quant_latent.py
CHANGED
|
@@ -553,42 +553,46 @@ def safe_get_latents(pipe, height, width, generator, device, LOGS):
|
|
| 553 |
@spaces.GPU
|
| 554 |
def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 555 |
LOGS = []
|
| 556 |
-
|
| 557 |
-
|
| 558 |
-
gallery = []
|
| 559 |
|
| 560 |
-
#
|
| 561 |
placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
|
| 562 |
-
|
|
|
|
| 563 |
|
| 564 |
try:
|
| 565 |
-
|
| 566 |
-
|
| 567 |
-
# -------------------------------
|
| 568 |
-
# Try advanced latent extraction
|
| 569 |
-
# -------------------------------
|
| 570 |
try:
|
| 571 |
latents = safe_get_latents(pipe, height, width, generator, device, LOGS)
|
| 572 |
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
guidance_scale=guidance_scale,
|
| 579 |
-
generator=generator,
|
| 580 |
-
latents=latents
|
| 581 |
-
)
|
| 582 |
|
| 583 |
-
|
| 584 |
-
|
| 585 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 586 |
LOGS.append("β
Advanced latent pipeline succeeded.")
|
|
|
|
| 587 |
|
| 588 |
except Exception as e:
|
| 589 |
-
LOGS.append(f"β οΈ
|
| 590 |
LOGS.append("π Switching to standard pipeline...")
|
| 591 |
|
|
|
|
| 592 |
try:
|
| 593 |
output = pipe(
|
| 594 |
prompt=prompt,
|
|
@@ -598,21 +602,23 @@ def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
|
| 598 |
guidance_scale=guidance_scale,
|
| 599 |
generator=generator,
|
| 600 |
)
|
| 601 |
-
|
| 602 |
-
|
|
|
|
| 603 |
LOGS.append("β
Standard pipeline succeeded.")
|
|
|
|
| 604 |
|
| 605 |
except Exception as e2:
|
| 606 |
LOGS.append(f"β Standard pipeline failed: {e2}")
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
return image, gallery, LOGS
|
| 611 |
|
| 612 |
except Exception as e:
|
| 613 |
LOGS.append(f"β Total failure: {e}")
|
| 614 |
-
|
| 615 |
-
|
|
|
|
| 616 |
@spaces.GPU
|
| 617 |
def generate_image_backup(prompt, height, width, steps, seed, guidance_scale=0.0, return_latents=False):
|
| 618 |
"""
|
|
|
|
| 553 |
@spaces.GPU
|
| 554 |
def generate_image(prompt, height, width, steps, seed, guidance_scale=0.0):
|
| 555 |
LOGS = []
|
| 556 |
+
device = "cuda"
|
| 557 |
+
generator = torch.Generator(device).manual_seed(int(seed))
|
|
|
|
| 558 |
|
| 559 |
+
# placeholders
|
| 560 |
placeholder = Image.new("RGB", (width, height), color=(255, 255, 255))
|
| 561 |
+
latent_gallery = []
|
| 562 |
+
final_gallery = []
|
| 563 |
|
| 564 |
try:
|
| 565 |
+
# --- Try advanced latent mode ---
|
|
|
|
|
|
|
|
|
|
|
|
|
| 566 |
try:
|
| 567 |
latents = safe_get_latents(pipe, height, width, generator, device, LOGS)
|
| 568 |
|
| 569 |
+
for i, t in enumerate(pipe.scheduler.timesteps):
|
| 570 |
+
# Step-wise denoising
|
| 571 |
+
with torch.no_grad():
|
| 572 |
+
noise_pred = pipe.unet(latents, t, encoder_hidden_states=pipe.get_text_embeddings(prompt))["sample"]
|
| 573 |
+
latents = pipe.scheduler.step(noise_pred, t, latents)["prev_sample"]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 574 |
|
| 575 |
+
# Convert latent to preview image
|
| 576 |
+
try:
|
| 577 |
+
latent_img = latent_to_image(latents, pipe.vae)[0]
|
| 578 |
+
except Exception:
|
| 579 |
+
latent_img = placeholder
|
| 580 |
+
latent_gallery.append(latent_img)
|
| 581 |
+
|
| 582 |
+
# Yield intermediate update: latents updated, final gallery empty
|
| 583 |
+
yield None, latent_gallery, final_gallery, LOGS
|
| 584 |
+
|
| 585 |
+
# decode final image
|
| 586 |
+
final_img = pipe.decode_latents(latents)[0]
|
| 587 |
+
final_gallery.append(final_img)
|
| 588 |
LOGS.append("β
Advanced latent pipeline succeeded.")
|
| 589 |
+
yield final_img, latent_gallery, final_gallery, LOGS
|
| 590 |
|
| 591 |
except Exception as e:
|
| 592 |
+
LOGS.append(f"β οΈ Advanced latent mode failed: {e}")
|
| 593 |
LOGS.append("π Switching to standard pipeline...")
|
| 594 |
|
| 595 |
+
# Standard pipeline fallback
|
| 596 |
try:
|
| 597 |
output = pipe(
|
| 598 |
prompt=prompt,
|
|
|
|
| 602 |
guidance_scale=guidance_scale,
|
| 603 |
generator=generator,
|
| 604 |
)
|
| 605 |
+
final_img = output.images[0]
|
| 606 |
+
final_gallery.append(final_img)
|
| 607 |
+
latent_gallery.append(final_img) # optionally show in latent gallery as last step
|
| 608 |
LOGS.append("β
Standard pipeline succeeded.")
|
| 609 |
+
yield final_img, latent_gallery, final_gallery, LOGS
|
| 610 |
|
| 611 |
except Exception as e2:
|
| 612 |
LOGS.append(f"β Standard pipeline failed: {e2}")
|
| 613 |
+
final_gallery.append(placeholder)
|
| 614 |
+
latent_gallery.append(placeholder)
|
| 615 |
+
yield placeholder, latent_gallery, final_gallery, LOGS
|
|
|
|
| 616 |
|
| 617 |
except Exception as e:
|
| 618 |
LOGS.append(f"β Total failure: {e}")
|
| 619 |
+
final_gallery.append(placeholder)
|
| 620 |
+
latent_gallery.append(placeholder)
|
| 621 |
+
yield placeholder, latent_gallery, final_gallery, LOGS
|
| 622 |
@spaces.GPU
|
| 623 |
def generate_image_backup(prompt, height, width, steps, seed, guidance_scale=0.0, return_latents=False):
|
| 624 |
"""
|