rahul7star commited on
Commit
02db00d
·
verified ·
1 Parent(s): 8c137f7

Update app_quant_latent.py

Browse files
Files changed (1) hide show
  1. app_quant_latent.py +20 -14
app_quant_latent.py CHANGED
@@ -444,8 +444,9 @@ try:
444
  transformer=transformer,
445
  text_encoder=text_encoder,
446
  torch_dtype=torch_dtype,
447
- attn_implementation="kernels-community/vllm-flash-attn3",
448
  )
 
449
  # pipe.load_lora_weights("bdsqlsz/qinglong_DetailedEyes_Z-Image", weight_name="qinglong_detailedeye_z-imageV2(comfy).safetensors", adapter_name="lora")
450
  pipe.load_lora_weights("rahul7star/ZImageLora",
451
  weight_name="NSFW/doggystyle_pov.safetensors", adapter_name="lora")
@@ -467,22 +468,27 @@ log_system_stats("AFTER PIPELINE BUILD")
467
  # -----------------------------
468
  # Monkey-patch prepare_latents
469
  # -----------------------------
470
- original_prepare_latents = pipe.prepare_latents
 
 
 
 
471
 
472
- def logged_prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
473
- # Call original method
474
- result_latents = original_prepare_latents(
475
- batch_size, num_channels_latents, height, width, dtype, device, generator, latents
476
- )
 
 
 
 
 
477
 
478
- # Save info for logging
479
- log_msg = f"🔹 prepare_latents called | shape={result_latents.shape}, dtype={result_latents.dtype}, device={result_latents.device}"
480
- if hasattr(self, "_latents_log"):
481
- self._latents_log.append(log_msg)
482
- else:
483
- self._latents_log = [log_msg]
484
 
485
- return result_latents
486
 
487
  # Apply patch
488
  pipe.prepare_latents = logged_prepare_latents.__get__(pipe)
 
444
  transformer=transformer,
445
  text_encoder=text_encoder,
446
  torch_dtype=torch_dtype,
447
+
448
  )
449
+ pipe.transformer.set_attention_backend("_flash_3")
450
  # pipe.load_lora_weights("bdsqlsz/qinglong_DetailedEyes_Z-Image", weight_name="qinglong_detailedeye_z-imageV2(comfy).safetensors", adapter_name="lora")
451
  pipe.load_lora_weights("rahul7star/ZImageLora",
452
  weight_name="NSFW/doggystyle_pov.safetensors", adapter_name="lora")
 
468
  # -----------------------------
469
  # Monkey-patch prepare_latents
470
  # -----------------------------
471
+ # -----------------------------
472
+ # Monkey-patch prepare_latents
473
+ # -----------------------------
474
+ if pipe is not None:
475
+ original_prepare_latents = pipe.prepare_latents
476
 
477
+ def logged_prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
478
+ result_latents = original_prepare_latents(
479
+ batch_size, num_channels_latents, height, width, dtype, device, generator, latents
480
+ )
481
+ log_msg = f"🔹 prepare_latents called | shape={result_latents.shape}, dtype={result_latents.dtype}, device={result_latents.device}"
482
+ if hasattr(self, "_latents_log"):
483
+ self._latents_log.append(log_msg)
484
+ else:
485
+ self._latents_log = [log_msg]
486
+ return result_latents
487
 
488
+ pipe.prepare_latents = logged_prepare_latents.__get__(pipe)
489
+ else:
490
+ log("❌ WARNING: Pipe not initialized; skipping prepare_latents patch")
 
 
 
491
 
 
492
 
493
  # Apply patch
494
  pipe.prepare_latents = logged_prepare_latents.__get__(pipe)