Upload HunyuanVideoPipeline
Browse files
tokenizer_2/tokenizer_config.json
CHANGED
|
@@ -23,7 +23,6 @@
|
|
| 23 |
"do_lower_case": true,
|
| 24 |
"eos_token": "<|endoftext|>",
|
| 25 |
"errors": "replace",
|
| 26 |
-
"extra_special_tokens": {},
|
| 27 |
"max_length": 77,
|
| 28 |
"model_max_length": 77,
|
| 29 |
"pad_token": "<|endoftext|>",
|
|
|
|
| 23 |
"do_lower_case": true,
|
| 24 |
"eos_token": "<|endoftext|>",
|
| 25 |
"errors": "replace",
|
|
|
|
| 26 |
"max_length": 77,
|
| 27 |
"model_max_length": 77,
|
| 28 |
"pad_token": "<|endoftext|>",
|
transformer/config.json
CHANGED
|
@@ -12,12 +12,13 @@
|
|
| 12 |
"out_channels": 16,
|
| 13 |
"patch_size": 2,
|
| 14 |
"patch_size_t": 1,
|
|
|
|
| 15 |
"qk_norm": "rms_norm",
|
| 16 |
-
"
|
| 17 |
16,
|
| 18 |
56,
|
| 19 |
56
|
| 20 |
],
|
| 21 |
-
"
|
| 22 |
-
"
|
| 23 |
}
|
|
|
|
| 12 |
"out_channels": 16,
|
| 13 |
"patch_size": 2,
|
| 14 |
"patch_size_t": 1,
|
| 15 |
+
"pooled_projection_dim": 768,
|
| 16 |
"qk_norm": "rms_norm",
|
| 17 |
+
"rope_axes_dim": [
|
| 18 |
16,
|
| 19 |
56,
|
| 20 |
56
|
| 21 |
],
|
| 22 |
+
"rope_theta": 256.0,
|
| 23 |
+
"text_embed_dim": 4096
|
| 24 |
}
|
transformer/diffusion_pytorch_model-00001-of-00003.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f261f32347ca7cbb88e9d143d6316608c37ddfb5262d829bce63a74679a9a644
|
| 3 |
+
size 9972080536
|
transformer/diffusion_pytorch_model.safetensors.index.json
CHANGED
|
@@ -3,10 +3,6 @@
|
|
| 3 |
"total_size": 25642025088
|
| 4 |
},
|
| 5 |
"weight_map": {
|
| 6 |
-
"guidance_in.mlp.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 7 |
-
"guidance_in.mlp.0.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 8 |
-
"guidance_in.mlp.2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 9 |
-
"guidance_in.mlp.2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 10 |
"img_in.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 11 |
"img_in.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 12 |
"norm_out.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
|
|
@@ -573,10 +569,18 @@
|
|
| 573 |
"single_transformer_blocks.9.proj_mlp.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
|
| 574 |
"single_transformer_blocks.9.proj_out.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
|
| 575 |
"single_transformer_blocks.9.proj_out.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
|
| 576 |
-
"
|
| 577 |
-
"
|
| 578 |
-
"
|
| 579 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 580 |
"transformer_blocks.0.attn.add_k_proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 581 |
"transformer_blocks.0.attn.add_k_proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 582 |
"transformer_blocks.0.attn.add_q_proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
|
@@ -1217,18 +1221,16 @@
|
|
| 1217 |
"transformer_blocks.9.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1218 |
"transformer_blocks.9.norm1_context.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1219 |
"transformer_blocks.9.norm1_context.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1220 |
-
"txt_in.
|
| 1221 |
-
"txt_in.
|
| 1222 |
-
"txt_in.
|
| 1223 |
-
"txt_in.
|
| 1224 |
-
"txt_in.
|
| 1225 |
-
"txt_in.
|
| 1226 |
-
"txt_in.
|
| 1227 |
-
"txt_in.
|
| 1228 |
-
"txt_in.
|
| 1229 |
-
"txt_in.
|
| 1230 |
-
"txt_in.token_refiner.refiner_blocks.0.adaLN_modulation.1.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1231 |
-
"txt_in.token_refiner.refiner_blocks.0.adaLN_modulation.1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1232 |
"txt_in.token_refiner.refiner_blocks.0.attn.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1233 |
"txt_in.token_refiner.refiner_blocks.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1234 |
"txt_in.token_refiner.refiner_blocks.0.attn.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
|
@@ -1245,8 +1247,8 @@
|
|
| 1245 |
"txt_in.token_refiner.refiner_blocks.0.norm1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1246 |
"txt_in.token_refiner.refiner_blocks.0.norm2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1247 |
"txt_in.token_refiner.refiner_blocks.0.norm2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1248 |
-
"txt_in.token_refiner.refiner_blocks.
|
| 1249 |
-
"txt_in.token_refiner.refiner_blocks.
|
| 1250 |
"txt_in.token_refiner.refiner_blocks.1.attn.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1251 |
"txt_in.token_refiner.refiner_blocks.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1252 |
"txt_in.token_refiner.refiner_blocks.1.attn.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
|
@@ -1263,9 +1265,7 @@
|
|
| 1263 |
"txt_in.token_refiner.refiner_blocks.1.norm1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1264 |
"txt_in.token_refiner.refiner_blocks.1.norm2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1265 |
"txt_in.token_refiner.refiner_blocks.1.norm2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1266 |
-
"
|
| 1267 |
-
"
|
| 1268 |
-
"vector_in.out_layer.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1269 |
-
"vector_in.out_layer.weight": "diffusion_pytorch_model-00001-of-00003.safetensors"
|
| 1270 |
}
|
| 1271 |
}
|
|
|
|
| 3 |
"total_size": 25642025088
|
| 4 |
},
|
| 5 |
"weight_map": {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
"img_in.proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 7 |
"img_in.proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 8 |
"norm_out.linear.bias": "diffusion_pytorch_model-00003-of-00003.safetensors",
|
|
|
|
| 569 |
"single_transformer_blocks.9.proj_mlp.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
|
| 570 |
"single_transformer_blocks.9.proj_out.bias": "diffusion_pytorch_model-00002-of-00003.safetensors",
|
| 571 |
"single_transformer_blocks.9.proj_out.weight": "diffusion_pytorch_model-00002-of-00003.safetensors",
|
| 572 |
+
"time_text_embed.guidance_embedder.linear_1.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 573 |
+
"time_text_embed.guidance_embedder.linear_1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 574 |
+
"time_text_embed.guidance_embedder.linear_2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 575 |
+
"time_text_embed.guidance_embedder.linear_2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 576 |
+
"time_text_embed.text_embedder.linear_1.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 577 |
+
"time_text_embed.text_embedder.linear_1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 578 |
+
"time_text_embed.text_embedder.linear_2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 579 |
+
"time_text_embed.text_embedder.linear_2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 580 |
+
"time_text_embed.timestep_embedder.linear_1.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 581 |
+
"time_text_embed.timestep_embedder.linear_1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 582 |
+
"time_text_embed.timestep_embedder.linear_2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 583 |
+
"time_text_embed.timestep_embedder.linear_2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 584 |
"transformer_blocks.0.attn.add_k_proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 585 |
"transformer_blocks.0.attn.add_k_proj.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 586 |
"transformer_blocks.0.attn.add_q_proj.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
|
|
|
| 1221 |
"transformer_blocks.9.norm1.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1222 |
"transformer_blocks.9.norm1_context.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1223 |
"transformer_blocks.9.norm1_context.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1224 |
+
"txt_in.proj_in.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1225 |
+
"txt_in.proj_in.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1226 |
+
"txt_in.time_text_embed.text_embedder.linear_1.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1227 |
+
"txt_in.time_text_embed.text_embedder.linear_1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1228 |
+
"txt_in.time_text_embed.text_embedder.linear_2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1229 |
+
"txt_in.time_text_embed.text_embedder.linear_2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1230 |
+
"txt_in.time_text_embed.timestep_embedder.linear_1.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1231 |
+
"txt_in.time_text_embed.timestep_embedder.linear_1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1232 |
+
"txt_in.time_text_embed.timestep_embedder.linear_2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1233 |
+
"txt_in.time_text_embed.timestep_embedder.linear_2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
|
|
|
|
|
|
| 1234 |
"txt_in.token_refiner.refiner_blocks.0.attn.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1235 |
"txt_in.token_refiner.refiner_blocks.0.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1236 |
"txt_in.token_refiner.refiner_blocks.0.attn.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
|
|
|
| 1247 |
"txt_in.token_refiner.refiner_blocks.0.norm1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1248 |
"txt_in.token_refiner.refiner_blocks.0.norm2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1249 |
"txt_in.token_refiner.refiner_blocks.0.norm2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1250 |
+
"txt_in.token_refiner.refiner_blocks.0.norm_out.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1251 |
+
"txt_in.token_refiner.refiner_blocks.0.norm_out.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1252 |
"txt_in.token_refiner.refiner_blocks.1.attn.to_k.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1253 |
"txt_in.token_refiner.refiner_blocks.1.attn.to_k.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1254 |
"txt_in.token_refiner.refiner_blocks.1.attn.to_out.0.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
|
|
|
| 1265 |
"txt_in.token_refiner.refiner_blocks.1.norm1.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1266 |
"txt_in.token_refiner.refiner_blocks.1.norm2.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1267 |
"txt_in.token_refiner.refiner_blocks.1.norm2.weight": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1268 |
+
"txt_in.token_refiner.refiner_blocks.1.norm_out.linear.bias": "diffusion_pytorch_model-00001-of-00003.safetensors",
|
| 1269 |
+
"txt_in.token_refiner.refiner_blocks.1.norm_out.linear.weight": "diffusion_pytorch_model-00001-of-00003.safetensors"
|
|
|
|
|
|
|
| 1270 |
}
|
| 1271 |
}
|