sayakpaul HF Staff commited on
Commit
408c04e
·
1 Parent(s): c5db835
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -14,8 +14,6 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
14
  # Load the model pipeline
15
  pipe = QwenImagePipeline.from_pretrained("Qwen/Qwen-Image", torch_dtype=dtype).to(device)
16
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
17
- # --- Ahead-of-time compilation ---
18
- compiled_transformer = compile_transformer(pipe, prompt="prompt")
19
 
20
  @spaces.GPU(duration=120)
21
  def push_to_hub(repo_id, filename, oauth_token: gr.OAuthToken):
@@ -25,6 +23,9 @@ def push_to_hub(repo_id, filename, oauth_token: gr.OAuthToken):
25
  # this will throw if token is invalid
26
  _ = whoami(oauth_token.token)
27
 
 
 
 
28
  token = oauth_token.token
29
  out = _push_compiled_graph_to_hub(
30
  compiled_transformer.archive_file,
 
14
  # Load the model pipeline
15
  pipe = QwenImagePipeline.from_pretrained("Qwen/Qwen-Image", torch_dtype=dtype).to(device)
16
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
 
 
17
 
18
  @spaces.GPU(duration=120)
19
  def push_to_hub(repo_id, filename, oauth_token: gr.OAuthToken):
 
23
  # this will throw if token is invalid
24
  _ = whoami(oauth_token.token)
25
 
26
+ # --- Ahead-of-time compilation ---
27
+ compiled_transformer = compile_transformer(pipe, prompt="prompt")
28
+
29
  token = oauth_token.token
30
  out = _push_compiled_graph_to_hub(
31
  compiled_transformer.archive_file,