rahul7star commited on
Commit
696c058
·
verified ·
1 Parent(s): bae8c03

Update app_quant_latent.py

Browse files
Files changed (1) hide show
  1. app_quant_latent.py +59 -6
app_quant_latent.py CHANGED
@@ -291,6 +291,62 @@ def generate_image(prompt, height, width, steps, seed):
291
  log(f"❌ Inference error: {e}")
292
  return None, None, LOGS
293
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294
 
295
  # ============================================================
296
  # UI
@@ -310,14 +366,11 @@ with gr.Blocks(title="Z-Image-Turbo Generator") as demo:
310
 
311
  with gr.Column(scale=1):
312
  final_image = gr.Image(label="Final Image")
313
- latent_gallery = gr.Gallery(label="Latent Steps").style(grid=[4], height="256px")
314
  logs_box = gr.Textbox(label="Logs", lines=15)
315
 
316
- run_btn.click(
317
- generate_image,
318
- inputs=[prompt, height, width, steps, seed],
319
- outputs=[final_image, latent_gallery, logs_box]
320
- )
321
 
322
 
323
  demo.launch()
 
291
  log(f"❌ Inference error: {e}")
292
  return None, None, LOGS
293
 
294
+ @spaces.GPU
295
+ def generate_image(prompt, height, width, steps, seed):
296
+
297
+ try:
298
+ generator = torch.Generator(device).manual_seed(seed)
299
+ latent_history = []
300
+
301
+ # Callback to save latents and GPU info
302
+ def save_latents(step, timestep, latents):
303
+ latent_history.append(latents.detach().clone())
304
+ gpu_mem = torch.cuda.memory_allocated(0)/1e9
305
+ log(f"Step {step} - GPU Memory Used: {gpu_mem:.2f} GB")
306
+
307
+ # Step-wise loop just for latent capture
308
+ for step, _ in pipe(
309
+ prompt=prompt,
310
+ height=height,
311
+ width=width,
312
+ num_inference_steps=steps,
313
+ guidance_scale=0.0,
314
+ generator=generator,
315
+ callback=save_latents,
316
+ callback_steps=1
317
+ ).iter():
318
+ pass # only capturing latents, ignoring intermediate images
319
+
320
+ # Convert latents to PIL images for gallery
321
+ latent_images = []
322
+ for latent in latent_history:
323
+ try:
324
+ img_tensor = pipe.vae.decode(latent)
325
+ img_tensor = (img_tensor / 2 + 0.5).clamp(0, 1)
326
+ pil_img = T.ToPILImage()(img_tensor[0].cpu())
327
+ latent_images.append(pil_img)
328
+ except Exception as e:
329
+ log(f"⚠️ Failed to convert latent to image: {e}")
330
+
331
+ # Original final image generation
332
+ output = pipe(
333
+ prompt=prompt,
334
+ height=height,
335
+ width=width,
336
+ num_inference_steps=steps,
337
+ guidance_scale=0.0,
338
+ generator=generator,
339
+ )
340
+
341
+ log("✅ Inference finished.")
342
+ log_system_stats("AFTER INFERENCE")
343
+
344
+ return output.images[0], latent_images, LOGS
345
+
346
+ except Exception as e:
347
+ log(f"❌ Inference error: {e}")
348
+ return None, None, LOGS
349
+
350
 
351
  # ============================================================
352
  # UI
 
366
 
367
  with gr.Column(scale=1):
368
  final_image = gr.Image(label="Final Image")
369
+ latent_gallery = gr.Gallery(label="Latent Steps", grid=[4], height=256)
370
  logs_box = gr.Textbox(label="Logs", lines=15)
371
 
372
+
373
+ run_btn.click( generate_image, inputs=[prompt, height, width, steps, seed], outputs=[final_image, latent_gallery, logs_box] )
 
 
 
374
 
375
 
376
  demo.launch()